1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Kernel-based Virtual Machine driver for Linux 4 * 5 * derived from drivers/kvm/kvm_main.c 6 * 7 * Copyright (C) 2006 Qumranet, Inc. 8 * Copyright (C) 2008 Qumranet, Inc. 9 * Copyright IBM Corporation, 2008 10 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 11 * 12 * Authors: 13 * Avi Kivity <avi@qumranet.com> 14 * Yaniv Kamay <yaniv@qumranet.com> 15 * Amit Shah <amit.shah@qumranet.com> 16 * Ben-Ami Yassour <benami@il.ibm.com> 17 */ 18 19 #include <linux/kvm_host.h> 20 #include "irq.h" 21 #include "ioapic.h" 22 #include "mmu.h" 23 #include "i8254.h" 24 #include "tss.h" 25 #include "kvm_cache_regs.h" 26 #include "kvm_emulate.h" 27 #include "x86.h" 28 #include "cpuid.h" 29 #include "pmu.h" 30 #include "hyperv.h" 31 #include "lapic.h" 32 #include "xen.h" 33 #include "smm.h" 34 35 #include <linux/clocksource.h> 36 #include <linux/interrupt.h> 37 #include <linux/kvm.h> 38 #include <linux/fs.h> 39 #include <linux/vmalloc.h> 40 #include <linux/export.h> 41 #include <linux/moduleparam.h> 42 #include <linux/mman.h> 43 #include <linux/highmem.h> 44 #include <linux/iommu.h> 45 #include <linux/cpufreq.h> 46 #include <linux/user-return-notifier.h> 47 #include <linux/srcu.h> 48 #include <linux/slab.h> 49 #include <linux/perf_event.h> 50 #include <linux/uaccess.h> 51 #include <linux/hash.h> 52 #include <linux/pci.h> 53 #include <linux/timekeeper_internal.h> 54 #include <linux/pvclock_gtod.h> 55 #include <linux/kvm_irqfd.h> 56 #include <linux/irqbypass.h> 57 #include <linux/sched/stat.h> 58 #include <linux/sched/isolation.h> 59 #include <linux/mem_encrypt.h> 60 #include <linux/entry-kvm.h> 61 #include <linux/suspend.h> 62 63 #include <trace/events/kvm.h> 64 65 #include <asm/debugreg.h> 66 #include <asm/msr.h> 67 #include <asm/desc.h> 68 #include <asm/mce.h> 69 #include <asm/pkru.h> 70 #include <linux/kernel_stat.h> 71 #include <asm/fpu/api.h> 72 #include <asm/fpu/xcr.h> 73 #include <asm/fpu/xstate.h> 74 #include <asm/pvclock.h> 75 #include <asm/div64.h> 76 #include <asm/irq_remapping.h> 77 #include <asm/mshyperv.h> 78 #include <asm/hypervisor.h> 79 #include <asm/tlbflush.h> 80 #include <asm/intel_pt.h> 81 #include <asm/emulate_prefix.h> 82 #include <asm/sgx.h> 83 #include <clocksource/hyperv_timer.h> 84 85 #define CREATE_TRACE_POINTS 86 #include "trace.h" 87 88 #define MAX_IO_MSRS 256 89 #define KVM_MAX_MCE_BANKS 32 90 91 struct kvm_caps kvm_caps __read_mostly = { 92 .supported_mce_cap = MCG_CTL_P | MCG_SER_P, 93 }; 94 EXPORT_SYMBOL_GPL(kvm_caps); 95 96 #define ERR_PTR_USR(e) ((void __user *)ERR_PTR(e)) 97 98 #define emul_to_vcpu(ctxt) \ 99 ((struct kvm_vcpu *)(ctxt)->vcpu) 100 101 /* EFER defaults: 102 * - enable syscall per default because its emulated by KVM 103 * - enable LME and LMA per default on 64 bit KVM 104 */ 105 #ifdef CONFIG_X86_64 106 static 107 u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA)); 108 #else 109 static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE); 110 #endif 111 112 static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS; 113 114 #define KVM_EXIT_HYPERCALL_VALID_MASK (1 << KVM_HC_MAP_GPA_RANGE) 115 116 #define KVM_CAP_PMU_VALID_MASK KVM_PMU_CAP_DISABLE 117 118 #define KVM_X2APIC_API_VALID_FLAGS (KVM_X2APIC_API_USE_32BIT_IDS | \ 119 KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK) 120 121 static void update_cr8_intercept(struct kvm_vcpu *vcpu); 122 static void process_nmi(struct kvm_vcpu *vcpu); 123 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); 124 static void store_regs(struct kvm_vcpu *vcpu); 125 static int sync_regs(struct kvm_vcpu *vcpu); 126 static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu); 127 128 static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2); 129 static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2); 130 131 struct kvm_x86_ops kvm_x86_ops __read_mostly; 132 133 #define KVM_X86_OP(func) \ 134 DEFINE_STATIC_CALL_NULL(kvm_x86_##func, \ 135 *(((struct kvm_x86_ops *)0)->func)); 136 #define KVM_X86_OP_OPTIONAL KVM_X86_OP 137 #define KVM_X86_OP_OPTIONAL_RET0 KVM_X86_OP 138 #include <asm/kvm-x86-ops.h> 139 EXPORT_STATIC_CALL_GPL(kvm_x86_get_cs_db_l_bits); 140 EXPORT_STATIC_CALL_GPL(kvm_x86_cache_reg); 141 142 static bool __read_mostly ignore_msrs = 0; 143 module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR); 144 145 bool __read_mostly report_ignored_msrs = true; 146 module_param(report_ignored_msrs, bool, S_IRUGO | S_IWUSR); 147 EXPORT_SYMBOL_GPL(report_ignored_msrs); 148 149 unsigned int min_timer_period_us = 200; 150 module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR); 151 152 static bool __read_mostly kvmclock_periodic_sync = true; 153 module_param(kvmclock_periodic_sync, bool, S_IRUGO); 154 155 /* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */ 156 static u32 __read_mostly tsc_tolerance_ppm = 250; 157 module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR); 158 159 /* 160 * lapic timer advance (tscdeadline mode only) in nanoseconds. '-1' enables 161 * adaptive tuning starting from default advancement of 1000ns. '0' disables 162 * advancement entirely. Any other value is used as-is and disables adaptive 163 * tuning, i.e. allows privileged userspace to set an exact advancement time. 164 */ 165 static int __read_mostly lapic_timer_advance_ns = -1; 166 module_param(lapic_timer_advance_ns, int, S_IRUGO | S_IWUSR); 167 168 static bool __read_mostly vector_hashing = true; 169 module_param(vector_hashing, bool, S_IRUGO); 170 171 bool __read_mostly enable_vmware_backdoor = false; 172 module_param(enable_vmware_backdoor, bool, S_IRUGO); 173 EXPORT_SYMBOL_GPL(enable_vmware_backdoor); 174 175 /* 176 * Flags to manipulate forced emulation behavior (any non-zero value will 177 * enable forced emulation). 178 */ 179 #define KVM_FEP_CLEAR_RFLAGS_RF BIT(1) 180 static int __read_mostly force_emulation_prefix; 181 module_param(force_emulation_prefix, int, 0644); 182 183 int __read_mostly pi_inject_timer = -1; 184 module_param(pi_inject_timer, bint, S_IRUGO | S_IWUSR); 185 186 /* Enable/disable PMU virtualization */ 187 bool __read_mostly enable_pmu = true; 188 EXPORT_SYMBOL_GPL(enable_pmu); 189 module_param(enable_pmu, bool, 0444); 190 191 bool __read_mostly eager_page_split = true; 192 module_param(eager_page_split, bool, 0644); 193 194 /* 195 * Restoring the host value for MSRs that are only consumed when running in 196 * usermode, e.g. SYSCALL MSRs and TSC_AUX, can be deferred until the CPU 197 * returns to userspace, i.e. the kernel can run with the guest's value. 198 */ 199 #define KVM_MAX_NR_USER_RETURN_MSRS 16 200 201 struct kvm_user_return_msrs { 202 struct user_return_notifier urn; 203 bool registered; 204 struct kvm_user_return_msr_values { 205 u64 host; 206 u64 curr; 207 } values[KVM_MAX_NR_USER_RETURN_MSRS]; 208 }; 209 210 u32 __read_mostly kvm_nr_uret_msrs; 211 EXPORT_SYMBOL_GPL(kvm_nr_uret_msrs); 212 static u32 __read_mostly kvm_uret_msrs_list[KVM_MAX_NR_USER_RETURN_MSRS]; 213 static struct kvm_user_return_msrs __percpu *user_return_msrs; 214 215 #define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \ 216 | XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \ 217 | XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512 \ 218 | XFEATURE_MASK_PKRU | XFEATURE_MASK_XTILE) 219 220 u64 __read_mostly host_efer; 221 EXPORT_SYMBOL_GPL(host_efer); 222 223 bool __read_mostly allow_smaller_maxphyaddr = 0; 224 EXPORT_SYMBOL_GPL(allow_smaller_maxphyaddr); 225 226 bool __read_mostly enable_apicv = true; 227 EXPORT_SYMBOL_GPL(enable_apicv); 228 229 u64 __read_mostly host_xss; 230 EXPORT_SYMBOL_GPL(host_xss); 231 232 const struct _kvm_stats_desc kvm_vm_stats_desc[] = { 233 KVM_GENERIC_VM_STATS(), 234 STATS_DESC_COUNTER(VM, mmu_shadow_zapped), 235 STATS_DESC_COUNTER(VM, mmu_pte_write), 236 STATS_DESC_COUNTER(VM, mmu_pde_zapped), 237 STATS_DESC_COUNTER(VM, mmu_flooded), 238 STATS_DESC_COUNTER(VM, mmu_recycled), 239 STATS_DESC_COUNTER(VM, mmu_cache_miss), 240 STATS_DESC_ICOUNTER(VM, mmu_unsync), 241 STATS_DESC_ICOUNTER(VM, pages_4k), 242 STATS_DESC_ICOUNTER(VM, pages_2m), 243 STATS_DESC_ICOUNTER(VM, pages_1g), 244 STATS_DESC_ICOUNTER(VM, nx_lpage_splits), 245 STATS_DESC_PCOUNTER(VM, max_mmu_rmap_size), 246 STATS_DESC_PCOUNTER(VM, max_mmu_page_hash_collisions) 247 }; 248 249 const struct kvm_stats_header kvm_vm_stats_header = { 250 .name_size = KVM_STATS_NAME_SIZE, 251 .num_desc = ARRAY_SIZE(kvm_vm_stats_desc), 252 .id_offset = sizeof(struct kvm_stats_header), 253 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, 254 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + 255 sizeof(kvm_vm_stats_desc), 256 }; 257 258 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { 259 KVM_GENERIC_VCPU_STATS(), 260 STATS_DESC_COUNTER(VCPU, pf_taken), 261 STATS_DESC_COUNTER(VCPU, pf_fixed), 262 STATS_DESC_COUNTER(VCPU, pf_emulate), 263 STATS_DESC_COUNTER(VCPU, pf_spurious), 264 STATS_DESC_COUNTER(VCPU, pf_fast), 265 STATS_DESC_COUNTER(VCPU, pf_mmio_spte_created), 266 STATS_DESC_COUNTER(VCPU, pf_guest), 267 STATS_DESC_COUNTER(VCPU, tlb_flush), 268 STATS_DESC_COUNTER(VCPU, invlpg), 269 STATS_DESC_COUNTER(VCPU, exits), 270 STATS_DESC_COUNTER(VCPU, io_exits), 271 STATS_DESC_COUNTER(VCPU, mmio_exits), 272 STATS_DESC_COUNTER(VCPU, signal_exits), 273 STATS_DESC_COUNTER(VCPU, irq_window_exits), 274 STATS_DESC_COUNTER(VCPU, nmi_window_exits), 275 STATS_DESC_COUNTER(VCPU, l1d_flush), 276 STATS_DESC_COUNTER(VCPU, halt_exits), 277 STATS_DESC_COUNTER(VCPU, request_irq_exits), 278 STATS_DESC_COUNTER(VCPU, irq_exits), 279 STATS_DESC_COUNTER(VCPU, host_state_reload), 280 STATS_DESC_COUNTER(VCPU, fpu_reload), 281 STATS_DESC_COUNTER(VCPU, insn_emulation), 282 STATS_DESC_COUNTER(VCPU, insn_emulation_fail), 283 STATS_DESC_COUNTER(VCPU, hypercalls), 284 STATS_DESC_COUNTER(VCPU, irq_injections), 285 STATS_DESC_COUNTER(VCPU, nmi_injections), 286 STATS_DESC_COUNTER(VCPU, req_event), 287 STATS_DESC_COUNTER(VCPU, nested_run), 288 STATS_DESC_COUNTER(VCPU, directed_yield_attempted), 289 STATS_DESC_COUNTER(VCPU, directed_yield_successful), 290 STATS_DESC_COUNTER(VCPU, preemption_reported), 291 STATS_DESC_COUNTER(VCPU, preemption_other), 292 STATS_DESC_IBOOLEAN(VCPU, guest_mode), 293 STATS_DESC_COUNTER(VCPU, notify_window_exits), 294 }; 295 296 const struct kvm_stats_header kvm_vcpu_stats_header = { 297 .name_size = KVM_STATS_NAME_SIZE, 298 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc), 299 .id_offset = sizeof(struct kvm_stats_header), 300 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, 301 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + 302 sizeof(kvm_vcpu_stats_desc), 303 }; 304 305 u64 __read_mostly host_xcr0; 306 307 static struct kmem_cache *x86_emulator_cache; 308 309 /* 310 * When called, it means the previous get/set msr reached an invalid msr. 311 * Return true if we want to ignore/silent this failed msr access. 312 */ 313 static bool kvm_msr_ignored_check(u32 msr, u64 data, bool write) 314 { 315 const char *op = write ? "wrmsr" : "rdmsr"; 316 317 if (ignore_msrs) { 318 if (report_ignored_msrs) 319 kvm_pr_unimpl("ignored %s: 0x%x data 0x%llx\n", 320 op, msr, data); 321 /* Mask the error */ 322 return true; 323 } else { 324 kvm_debug_ratelimited("unhandled %s: 0x%x data 0x%llx\n", 325 op, msr, data); 326 return false; 327 } 328 } 329 330 static struct kmem_cache *kvm_alloc_emulator_cache(void) 331 { 332 unsigned int useroffset = offsetof(struct x86_emulate_ctxt, src); 333 unsigned int size = sizeof(struct x86_emulate_ctxt); 334 335 return kmem_cache_create_usercopy("x86_emulator", size, 336 __alignof__(struct x86_emulate_ctxt), 337 SLAB_ACCOUNT, useroffset, 338 size - useroffset, NULL); 339 } 340 341 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt); 342 343 static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu) 344 { 345 int i; 346 for (i = 0; i < ASYNC_PF_PER_VCPU; i++) 347 vcpu->arch.apf.gfns[i] = ~0; 348 } 349 350 static void kvm_on_user_return(struct user_return_notifier *urn) 351 { 352 unsigned slot; 353 struct kvm_user_return_msrs *msrs 354 = container_of(urn, struct kvm_user_return_msrs, urn); 355 struct kvm_user_return_msr_values *values; 356 unsigned long flags; 357 358 /* 359 * Disabling irqs at this point since the following code could be 360 * interrupted and executed through kvm_arch_hardware_disable() 361 */ 362 local_irq_save(flags); 363 if (msrs->registered) { 364 msrs->registered = false; 365 user_return_notifier_unregister(urn); 366 } 367 local_irq_restore(flags); 368 for (slot = 0; slot < kvm_nr_uret_msrs; ++slot) { 369 values = &msrs->values[slot]; 370 if (values->host != values->curr) { 371 wrmsrl(kvm_uret_msrs_list[slot], values->host); 372 values->curr = values->host; 373 } 374 } 375 } 376 377 static int kvm_probe_user_return_msr(u32 msr) 378 { 379 u64 val; 380 int ret; 381 382 preempt_disable(); 383 ret = rdmsrl_safe(msr, &val); 384 if (ret) 385 goto out; 386 ret = wrmsrl_safe(msr, val); 387 out: 388 preempt_enable(); 389 return ret; 390 } 391 392 int kvm_add_user_return_msr(u32 msr) 393 { 394 BUG_ON(kvm_nr_uret_msrs >= KVM_MAX_NR_USER_RETURN_MSRS); 395 396 if (kvm_probe_user_return_msr(msr)) 397 return -1; 398 399 kvm_uret_msrs_list[kvm_nr_uret_msrs] = msr; 400 return kvm_nr_uret_msrs++; 401 } 402 EXPORT_SYMBOL_GPL(kvm_add_user_return_msr); 403 404 int kvm_find_user_return_msr(u32 msr) 405 { 406 int i; 407 408 for (i = 0; i < kvm_nr_uret_msrs; ++i) { 409 if (kvm_uret_msrs_list[i] == msr) 410 return i; 411 } 412 return -1; 413 } 414 EXPORT_SYMBOL_GPL(kvm_find_user_return_msr); 415 416 static void kvm_user_return_msr_cpu_online(void) 417 { 418 unsigned int cpu = smp_processor_id(); 419 struct kvm_user_return_msrs *msrs = per_cpu_ptr(user_return_msrs, cpu); 420 u64 value; 421 int i; 422 423 for (i = 0; i < kvm_nr_uret_msrs; ++i) { 424 rdmsrl_safe(kvm_uret_msrs_list[i], &value); 425 msrs->values[i].host = value; 426 msrs->values[i].curr = value; 427 } 428 } 429 430 int kvm_set_user_return_msr(unsigned slot, u64 value, u64 mask) 431 { 432 unsigned int cpu = smp_processor_id(); 433 struct kvm_user_return_msrs *msrs = per_cpu_ptr(user_return_msrs, cpu); 434 int err; 435 436 value = (value & mask) | (msrs->values[slot].host & ~mask); 437 if (value == msrs->values[slot].curr) 438 return 0; 439 err = wrmsrl_safe(kvm_uret_msrs_list[slot], value); 440 if (err) 441 return 1; 442 443 msrs->values[slot].curr = value; 444 if (!msrs->registered) { 445 msrs->urn.on_user_return = kvm_on_user_return; 446 user_return_notifier_register(&msrs->urn); 447 msrs->registered = true; 448 } 449 return 0; 450 } 451 EXPORT_SYMBOL_GPL(kvm_set_user_return_msr); 452 453 static void drop_user_return_notifiers(void) 454 { 455 unsigned int cpu = smp_processor_id(); 456 struct kvm_user_return_msrs *msrs = per_cpu_ptr(user_return_msrs, cpu); 457 458 if (msrs->registered) 459 kvm_on_user_return(&msrs->urn); 460 } 461 462 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu) 463 { 464 return vcpu->arch.apic_base; 465 } 466 EXPORT_SYMBOL_GPL(kvm_get_apic_base); 467 468 enum lapic_mode kvm_get_apic_mode(struct kvm_vcpu *vcpu) 469 { 470 return kvm_apic_mode(kvm_get_apic_base(vcpu)); 471 } 472 EXPORT_SYMBOL_GPL(kvm_get_apic_mode); 473 474 int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 475 { 476 enum lapic_mode old_mode = kvm_get_apic_mode(vcpu); 477 enum lapic_mode new_mode = kvm_apic_mode(msr_info->data); 478 u64 reserved_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu) | 0x2ff | 479 (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) ? 0 : X2APIC_ENABLE); 480 481 if ((msr_info->data & reserved_bits) != 0 || new_mode == LAPIC_MODE_INVALID) 482 return 1; 483 if (!msr_info->host_initiated) { 484 if (old_mode == LAPIC_MODE_X2APIC && new_mode == LAPIC_MODE_XAPIC) 485 return 1; 486 if (old_mode == LAPIC_MODE_DISABLED && new_mode == LAPIC_MODE_X2APIC) 487 return 1; 488 } 489 490 kvm_lapic_set_base(vcpu, msr_info->data); 491 kvm_recalculate_apic_map(vcpu->kvm); 492 return 0; 493 } 494 EXPORT_SYMBOL_GPL(kvm_set_apic_base); 495 496 /* 497 * Handle a fault on a hardware virtualization (VMX or SVM) instruction. 498 * 499 * Hardware virtualization extension instructions may fault if a reboot turns 500 * off virtualization while processes are running. Usually after catching the 501 * fault we just panic; during reboot instead the instruction is ignored. 502 */ 503 noinstr void kvm_spurious_fault(void) 504 { 505 /* Fault while not rebooting. We want the trace. */ 506 BUG_ON(!kvm_rebooting); 507 } 508 EXPORT_SYMBOL_GPL(kvm_spurious_fault); 509 510 #define EXCPT_BENIGN 0 511 #define EXCPT_CONTRIBUTORY 1 512 #define EXCPT_PF 2 513 514 static int exception_class(int vector) 515 { 516 switch (vector) { 517 case PF_VECTOR: 518 return EXCPT_PF; 519 case DE_VECTOR: 520 case TS_VECTOR: 521 case NP_VECTOR: 522 case SS_VECTOR: 523 case GP_VECTOR: 524 return EXCPT_CONTRIBUTORY; 525 default: 526 break; 527 } 528 return EXCPT_BENIGN; 529 } 530 531 #define EXCPT_FAULT 0 532 #define EXCPT_TRAP 1 533 #define EXCPT_ABORT 2 534 #define EXCPT_INTERRUPT 3 535 #define EXCPT_DB 4 536 537 static int exception_type(int vector) 538 { 539 unsigned int mask; 540 541 if (WARN_ON(vector > 31 || vector == NMI_VECTOR)) 542 return EXCPT_INTERRUPT; 543 544 mask = 1 << vector; 545 546 /* 547 * #DBs can be trap-like or fault-like, the caller must check other CPU 548 * state, e.g. DR6, to determine whether a #DB is a trap or fault. 549 */ 550 if (mask & (1 << DB_VECTOR)) 551 return EXCPT_DB; 552 553 if (mask & ((1 << BP_VECTOR) | (1 << OF_VECTOR))) 554 return EXCPT_TRAP; 555 556 if (mask & ((1 << DF_VECTOR) | (1 << MC_VECTOR))) 557 return EXCPT_ABORT; 558 559 /* Reserved exceptions will result in fault */ 560 return EXCPT_FAULT; 561 } 562 563 void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu, 564 struct kvm_queued_exception *ex) 565 { 566 if (!ex->has_payload) 567 return; 568 569 switch (ex->vector) { 570 case DB_VECTOR: 571 /* 572 * "Certain debug exceptions may clear bit 0-3. The 573 * remaining contents of the DR6 register are never 574 * cleared by the processor". 575 */ 576 vcpu->arch.dr6 &= ~DR_TRAP_BITS; 577 /* 578 * In order to reflect the #DB exception payload in guest 579 * dr6, three components need to be considered: active low 580 * bit, FIXED_1 bits and active high bits (e.g. DR6_BD, 581 * DR6_BS and DR6_BT) 582 * DR6_ACTIVE_LOW contains the FIXED_1 and active low bits. 583 * In the target guest dr6: 584 * FIXED_1 bits should always be set. 585 * Active low bits should be cleared if 1-setting in payload. 586 * Active high bits should be set if 1-setting in payload. 587 * 588 * Note, the payload is compatible with the pending debug 589 * exceptions/exit qualification under VMX, that active_low bits 590 * are active high in payload. 591 * So they need to be flipped for DR6. 592 */ 593 vcpu->arch.dr6 |= DR6_ACTIVE_LOW; 594 vcpu->arch.dr6 |= ex->payload; 595 vcpu->arch.dr6 ^= ex->payload & DR6_ACTIVE_LOW; 596 597 /* 598 * The #DB payload is defined as compatible with the 'pending 599 * debug exceptions' field under VMX, not DR6. While bit 12 is 600 * defined in the 'pending debug exceptions' field (enabled 601 * breakpoint), it is reserved and must be zero in DR6. 602 */ 603 vcpu->arch.dr6 &= ~BIT(12); 604 break; 605 case PF_VECTOR: 606 vcpu->arch.cr2 = ex->payload; 607 break; 608 } 609 610 ex->has_payload = false; 611 ex->payload = 0; 612 } 613 EXPORT_SYMBOL_GPL(kvm_deliver_exception_payload); 614 615 static void kvm_queue_exception_vmexit(struct kvm_vcpu *vcpu, unsigned int vector, 616 bool has_error_code, u32 error_code, 617 bool has_payload, unsigned long payload) 618 { 619 struct kvm_queued_exception *ex = &vcpu->arch.exception_vmexit; 620 621 ex->vector = vector; 622 ex->injected = false; 623 ex->pending = true; 624 ex->has_error_code = has_error_code; 625 ex->error_code = error_code; 626 ex->has_payload = has_payload; 627 ex->payload = payload; 628 } 629 630 static void kvm_multiple_exception(struct kvm_vcpu *vcpu, 631 unsigned nr, bool has_error, u32 error_code, 632 bool has_payload, unsigned long payload, bool reinject) 633 { 634 u32 prev_nr; 635 int class1, class2; 636 637 kvm_make_request(KVM_REQ_EVENT, vcpu); 638 639 /* 640 * If the exception is destined for L2 and isn't being reinjected, 641 * morph it to a VM-Exit if L1 wants to intercept the exception. A 642 * previously injected exception is not checked because it was checked 643 * when it was original queued, and re-checking is incorrect if _L1_ 644 * injected the exception, in which case it's exempt from interception. 645 */ 646 if (!reinject && is_guest_mode(vcpu) && 647 kvm_x86_ops.nested_ops->is_exception_vmexit(vcpu, nr, error_code)) { 648 kvm_queue_exception_vmexit(vcpu, nr, has_error, error_code, 649 has_payload, payload); 650 return; 651 } 652 653 if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) { 654 queue: 655 if (reinject) { 656 /* 657 * On VM-Entry, an exception can be pending if and only 658 * if event injection was blocked by nested_run_pending. 659 * In that case, however, vcpu_enter_guest() requests an 660 * immediate exit, and the guest shouldn't proceed far 661 * enough to need reinjection. 662 */ 663 WARN_ON_ONCE(kvm_is_exception_pending(vcpu)); 664 vcpu->arch.exception.injected = true; 665 if (WARN_ON_ONCE(has_payload)) { 666 /* 667 * A reinjected event has already 668 * delivered its payload. 669 */ 670 has_payload = false; 671 payload = 0; 672 } 673 } else { 674 vcpu->arch.exception.pending = true; 675 vcpu->arch.exception.injected = false; 676 } 677 vcpu->arch.exception.has_error_code = has_error; 678 vcpu->arch.exception.vector = nr; 679 vcpu->arch.exception.error_code = error_code; 680 vcpu->arch.exception.has_payload = has_payload; 681 vcpu->arch.exception.payload = payload; 682 if (!is_guest_mode(vcpu)) 683 kvm_deliver_exception_payload(vcpu, 684 &vcpu->arch.exception); 685 return; 686 } 687 688 /* to check exception */ 689 prev_nr = vcpu->arch.exception.vector; 690 if (prev_nr == DF_VECTOR) { 691 /* triple fault -> shutdown */ 692 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 693 return; 694 } 695 class1 = exception_class(prev_nr); 696 class2 = exception_class(nr); 697 if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY) || 698 (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) { 699 /* 700 * Synthesize #DF. Clear the previously injected or pending 701 * exception so as not to incorrectly trigger shutdown. 702 */ 703 vcpu->arch.exception.injected = false; 704 vcpu->arch.exception.pending = false; 705 706 kvm_queue_exception_e(vcpu, DF_VECTOR, 0); 707 } else { 708 /* replace previous exception with a new one in a hope 709 that instruction re-execution will regenerate lost 710 exception */ 711 goto queue; 712 } 713 } 714 715 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr) 716 { 717 kvm_multiple_exception(vcpu, nr, false, 0, false, 0, false); 718 } 719 EXPORT_SYMBOL_GPL(kvm_queue_exception); 720 721 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr) 722 { 723 kvm_multiple_exception(vcpu, nr, false, 0, false, 0, true); 724 } 725 EXPORT_SYMBOL_GPL(kvm_requeue_exception); 726 727 void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr, 728 unsigned long payload) 729 { 730 kvm_multiple_exception(vcpu, nr, false, 0, true, payload, false); 731 } 732 EXPORT_SYMBOL_GPL(kvm_queue_exception_p); 733 734 static void kvm_queue_exception_e_p(struct kvm_vcpu *vcpu, unsigned nr, 735 u32 error_code, unsigned long payload) 736 { 737 kvm_multiple_exception(vcpu, nr, true, error_code, 738 true, payload, false); 739 } 740 741 int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err) 742 { 743 if (err) 744 kvm_inject_gp(vcpu, 0); 745 else 746 return kvm_skip_emulated_instruction(vcpu); 747 748 return 1; 749 } 750 EXPORT_SYMBOL_GPL(kvm_complete_insn_gp); 751 752 static int complete_emulated_insn_gp(struct kvm_vcpu *vcpu, int err) 753 { 754 if (err) { 755 kvm_inject_gp(vcpu, 0); 756 return 1; 757 } 758 759 return kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE | EMULTYPE_SKIP | 760 EMULTYPE_COMPLETE_USER_EXIT); 761 } 762 763 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) 764 { 765 ++vcpu->stat.pf_guest; 766 767 /* 768 * Async #PF in L2 is always forwarded to L1 as a VM-Exit regardless of 769 * whether or not L1 wants to intercept "regular" #PF. 770 */ 771 if (is_guest_mode(vcpu) && fault->async_page_fault) 772 kvm_queue_exception_vmexit(vcpu, PF_VECTOR, 773 true, fault->error_code, 774 true, fault->address); 775 else 776 kvm_queue_exception_e_p(vcpu, PF_VECTOR, fault->error_code, 777 fault->address); 778 } 779 EXPORT_SYMBOL_GPL(kvm_inject_page_fault); 780 781 void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu, 782 struct x86_exception *fault) 783 { 784 struct kvm_mmu *fault_mmu; 785 WARN_ON_ONCE(fault->vector != PF_VECTOR); 786 787 fault_mmu = fault->nested_page_fault ? vcpu->arch.mmu : 788 vcpu->arch.walk_mmu; 789 790 /* 791 * Invalidate the TLB entry for the faulting address, if it exists, 792 * else the access will fault indefinitely (and to emulate hardware). 793 */ 794 if ((fault->error_code & PFERR_PRESENT_MASK) && 795 !(fault->error_code & PFERR_RSVD_MASK)) 796 kvm_mmu_invalidate_gva(vcpu, fault_mmu, fault->address, 797 fault_mmu->root.hpa); 798 799 fault_mmu->inject_page_fault(vcpu, fault); 800 } 801 EXPORT_SYMBOL_GPL(kvm_inject_emulated_page_fault); 802 803 void kvm_inject_nmi(struct kvm_vcpu *vcpu) 804 { 805 atomic_inc(&vcpu->arch.nmi_queued); 806 kvm_make_request(KVM_REQ_NMI, vcpu); 807 } 808 EXPORT_SYMBOL_GPL(kvm_inject_nmi); 809 810 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) 811 { 812 kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, false); 813 } 814 EXPORT_SYMBOL_GPL(kvm_queue_exception_e); 815 816 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) 817 { 818 kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, true); 819 } 820 EXPORT_SYMBOL_GPL(kvm_requeue_exception_e); 821 822 /* 823 * Checks if cpl <= required_cpl; if true, return true. Otherwise queue 824 * a #GP and return false. 825 */ 826 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl) 827 { 828 if (static_call(kvm_x86_get_cpl)(vcpu) <= required_cpl) 829 return true; 830 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); 831 return false; 832 } 833 EXPORT_SYMBOL_GPL(kvm_require_cpl); 834 835 bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr) 836 { 837 if ((dr != 4 && dr != 5) || !kvm_read_cr4_bits(vcpu, X86_CR4_DE)) 838 return true; 839 840 kvm_queue_exception(vcpu, UD_VECTOR); 841 return false; 842 } 843 EXPORT_SYMBOL_GPL(kvm_require_dr); 844 845 static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu) 846 { 847 return vcpu->arch.reserved_gpa_bits | rsvd_bits(5, 8) | rsvd_bits(1, 2); 848 } 849 850 /* 851 * Load the pae pdptrs. Return 1 if they are all valid, 0 otherwise. 852 */ 853 int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3) 854 { 855 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 856 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT; 857 gpa_t real_gpa; 858 int i; 859 int ret; 860 u64 pdpte[ARRAY_SIZE(mmu->pdptrs)]; 861 862 /* 863 * If the MMU is nested, CR3 holds an L2 GPA and needs to be translated 864 * to an L1 GPA. 865 */ 866 real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(pdpt_gfn), 867 PFERR_USER_MASK | PFERR_WRITE_MASK, NULL); 868 if (real_gpa == INVALID_GPA) 869 return 0; 870 871 /* Note the offset, PDPTRs are 32 byte aligned when using PAE paging. */ 872 ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(real_gpa), pdpte, 873 cr3 & GENMASK(11, 5), sizeof(pdpte)); 874 if (ret < 0) 875 return 0; 876 877 for (i = 0; i < ARRAY_SIZE(pdpte); ++i) { 878 if ((pdpte[i] & PT_PRESENT_MASK) && 879 (pdpte[i] & pdptr_rsvd_bits(vcpu))) { 880 return 0; 881 } 882 } 883 884 /* 885 * Marking VCPU_EXREG_PDPTR dirty doesn't work for !tdp_enabled. 886 * Shadow page roots need to be reconstructed instead. 887 */ 888 if (!tdp_enabled && memcmp(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs))) 889 kvm_mmu_free_roots(vcpu->kvm, mmu, KVM_MMU_ROOT_CURRENT); 890 891 memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs)); 892 kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR); 893 kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu); 894 vcpu->arch.pdptrs_from_userspace = false; 895 896 return 1; 897 } 898 EXPORT_SYMBOL_GPL(load_pdptrs); 899 900 void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0) 901 { 902 if ((cr0 ^ old_cr0) & X86_CR0_PG) { 903 kvm_clear_async_pf_completion_queue(vcpu); 904 kvm_async_pf_hash_reset(vcpu); 905 906 /* 907 * Clearing CR0.PG is defined to flush the TLB from the guest's 908 * perspective. 909 */ 910 if (!(cr0 & X86_CR0_PG)) 911 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 912 } 913 914 if ((cr0 ^ old_cr0) & KVM_MMU_CR0_ROLE_BITS) 915 kvm_mmu_reset_context(vcpu); 916 917 if (((cr0 ^ old_cr0) & X86_CR0_CD) && 918 kvm_arch_has_noncoherent_dma(vcpu->kvm) && 919 !kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) 920 kvm_zap_gfn_range(vcpu->kvm, 0, ~0ULL); 921 } 922 EXPORT_SYMBOL_GPL(kvm_post_set_cr0); 923 924 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 925 { 926 unsigned long old_cr0 = kvm_read_cr0(vcpu); 927 928 cr0 |= X86_CR0_ET; 929 930 #ifdef CONFIG_X86_64 931 if (cr0 & 0xffffffff00000000UL) 932 return 1; 933 #endif 934 935 cr0 &= ~CR0_RESERVED_BITS; 936 937 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) 938 return 1; 939 940 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) 941 return 1; 942 943 #ifdef CONFIG_X86_64 944 if ((vcpu->arch.efer & EFER_LME) && !is_paging(vcpu) && 945 (cr0 & X86_CR0_PG)) { 946 int cs_db, cs_l; 947 948 if (!is_pae(vcpu)) 949 return 1; 950 static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l); 951 if (cs_l) 952 return 1; 953 } 954 #endif 955 if (!(vcpu->arch.efer & EFER_LME) && (cr0 & X86_CR0_PG) && 956 is_pae(vcpu) && ((cr0 ^ old_cr0) & X86_CR0_PDPTR_BITS) && 957 !load_pdptrs(vcpu, kvm_read_cr3(vcpu))) 958 return 1; 959 960 if (!(cr0 & X86_CR0_PG) && 961 (is_64_bit_mode(vcpu) || kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE))) 962 return 1; 963 964 static_call(kvm_x86_set_cr0)(vcpu, cr0); 965 966 kvm_post_set_cr0(vcpu, old_cr0, cr0); 967 968 return 0; 969 } 970 EXPORT_SYMBOL_GPL(kvm_set_cr0); 971 972 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) 973 { 974 (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f)); 975 } 976 EXPORT_SYMBOL_GPL(kvm_lmsw); 977 978 void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu) 979 { 980 if (vcpu->arch.guest_state_protected) 981 return; 982 983 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) { 984 985 if (vcpu->arch.xcr0 != host_xcr0) 986 xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); 987 988 if (vcpu->arch.xsaves_enabled && 989 vcpu->arch.ia32_xss != host_xss) 990 wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss); 991 } 992 993 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS 994 if (static_cpu_has(X86_FEATURE_PKU) && 995 vcpu->arch.pkru != vcpu->arch.host_pkru && 996 ((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) || 997 kvm_read_cr4_bits(vcpu, X86_CR4_PKE))) 998 write_pkru(vcpu->arch.pkru); 999 #endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */ 1000 } 1001 EXPORT_SYMBOL_GPL(kvm_load_guest_xsave_state); 1002 1003 void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu) 1004 { 1005 if (vcpu->arch.guest_state_protected) 1006 return; 1007 1008 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS 1009 if (static_cpu_has(X86_FEATURE_PKU) && 1010 ((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) || 1011 kvm_read_cr4_bits(vcpu, X86_CR4_PKE))) { 1012 vcpu->arch.pkru = rdpkru(); 1013 if (vcpu->arch.pkru != vcpu->arch.host_pkru) 1014 write_pkru(vcpu->arch.host_pkru); 1015 } 1016 #endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */ 1017 1018 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) { 1019 1020 if (vcpu->arch.xcr0 != host_xcr0) 1021 xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0); 1022 1023 if (vcpu->arch.xsaves_enabled && 1024 vcpu->arch.ia32_xss != host_xss) 1025 wrmsrl(MSR_IA32_XSS, host_xss); 1026 } 1027 1028 } 1029 EXPORT_SYMBOL_GPL(kvm_load_host_xsave_state); 1030 1031 #ifdef CONFIG_X86_64 1032 static inline u64 kvm_guest_supported_xfd(struct kvm_vcpu *vcpu) 1033 { 1034 return vcpu->arch.guest_supported_xcr0 & XFEATURE_MASK_USER_DYNAMIC; 1035 } 1036 #endif 1037 1038 static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) 1039 { 1040 u64 xcr0 = xcr; 1041 u64 old_xcr0 = vcpu->arch.xcr0; 1042 u64 valid_bits; 1043 1044 /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now */ 1045 if (index != XCR_XFEATURE_ENABLED_MASK) 1046 return 1; 1047 if (!(xcr0 & XFEATURE_MASK_FP)) 1048 return 1; 1049 if ((xcr0 & XFEATURE_MASK_YMM) && !(xcr0 & XFEATURE_MASK_SSE)) 1050 return 1; 1051 1052 /* 1053 * Do not allow the guest to set bits that we do not support 1054 * saving. However, xcr0 bit 0 is always set, even if the 1055 * emulated CPU does not support XSAVE (see kvm_vcpu_reset()). 1056 */ 1057 valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP; 1058 if (xcr0 & ~valid_bits) 1059 return 1; 1060 1061 if ((!(xcr0 & XFEATURE_MASK_BNDREGS)) != 1062 (!(xcr0 & XFEATURE_MASK_BNDCSR))) 1063 return 1; 1064 1065 if (xcr0 & XFEATURE_MASK_AVX512) { 1066 if (!(xcr0 & XFEATURE_MASK_YMM)) 1067 return 1; 1068 if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512) 1069 return 1; 1070 } 1071 1072 if ((xcr0 & XFEATURE_MASK_XTILE) && 1073 ((xcr0 & XFEATURE_MASK_XTILE) != XFEATURE_MASK_XTILE)) 1074 return 1; 1075 1076 vcpu->arch.xcr0 = xcr0; 1077 1078 if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND) 1079 kvm_update_cpuid_runtime(vcpu); 1080 return 0; 1081 } 1082 1083 int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu) 1084 { 1085 /* Note, #UD due to CR4.OSXSAVE=0 has priority over the intercept. */ 1086 if (static_call(kvm_x86_get_cpl)(vcpu) != 0 || 1087 __kvm_set_xcr(vcpu, kvm_rcx_read(vcpu), kvm_read_edx_eax(vcpu))) { 1088 kvm_inject_gp(vcpu, 0); 1089 return 1; 1090 } 1091 1092 return kvm_skip_emulated_instruction(vcpu); 1093 } 1094 EXPORT_SYMBOL_GPL(kvm_emulate_xsetbv); 1095 1096 bool __kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 1097 { 1098 if (cr4 & cr4_reserved_bits) 1099 return false; 1100 1101 if (cr4 & vcpu->arch.cr4_guest_rsvd_bits) 1102 return false; 1103 1104 return true; 1105 } 1106 EXPORT_SYMBOL_GPL(__kvm_is_valid_cr4); 1107 1108 static bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 1109 { 1110 return __kvm_is_valid_cr4(vcpu, cr4) && 1111 static_call(kvm_x86_is_valid_cr4)(vcpu, cr4); 1112 } 1113 1114 void kvm_post_set_cr4(struct kvm_vcpu *vcpu, unsigned long old_cr4, unsigned long cr4) 1115 { 1116 if ((cr4 ^ old_cr4) & KVM_MMU_CR4_ROLE_BITS) 1117 kvm_mmu_reset_context(vcpu); 1118 1119 /* 1120 * If CR4.PCIDE is changed 0 -> 1, there is no need to flush the TLB 1121 * according to the SDM; however, stale prev_roots could be reused 1122 * incorrectly in the future after a MOV to CR3 with NOFLUSH=1, so we 1123 * free them all. This is *not* a superset of KVM_REQ_TLB_FLUSH_GUEST 1124 * or KVM_REQ_TLB_FLUSH_CURRENT, because the hardware TLB is not flushed, 1125 * so fall through. 1126 */ 1127 if (!tdp_enabled && 1128 (cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) 1129 kvm_mmu_unload(vcpu); 1130 1131 /* 1132 * The TLB has to be flushed for all PCIDs if any of the following 1133 * (architecturally required) changes happen: 1134 * - CR4.PCIDE is changed from 1 to 0 1135 * - CR4.PGE is toggled 1136 * 1137 * This is a superset of KVM_REQ_TLB_FLUSH_CURRENT. 1138 */ 1139 if (((cr4 ^ old_cr4) & X86_CR4_PGE) || 1140 (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE))) 1141 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 1142 1143 /* 1144 * The TLB has to be flushed for the current PCID if any of the 1145 * following (architecturally required) changes happen: 1146 * - CR4.SMEP is changed from 0 to 1 1147 * - CR4.PAE is toggled 1148 */ 1149 else if (((cr4 ^ old_cr4) & X86_CR4_PAE) || 1150 ((cr4 & X86_CR4_SMEP) && !(old_cr4 & X86_CR4_SMEP))) 1151 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); 1152 1153 } 1154 EXPORT_SYMBOL_GPL(kvm_post_set_cr4); 1155 1156 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 1157 { 1158 unsigned long old_cr4 = kvm_read_cr4(vcpu); 1159 1160 if (!kvm_is_valid_cr4(vcpu, cr4)) 1161 return 1; 1162 1163 if (is_long_mode(vcpu)) { 1164 if (!(cr4 & X86_CR4_PAE)) 1165 return 1; 1166 if ((cr4 ^ old_cr4) & X86_CR4_LA57) 1167 return 1; 1168 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) 1169 && ((cr4 ^ old_cr4) & X86_CR4_PDPTR_BITS) 1170 && !load_pdptrs(vcpu, kvm_read_cr3(vcpu))) 1171 return 1; 1172 1173 if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) { 1174 if (!guest_cpuid_has(vcpu, X86_FEATURE_PCID)) 1175 return 1; 1176 1177 /* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */ 1178 if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu)) 1179 return 1; 1180 } 1181 1182 static_call(kvm_x86_set_cr4)(vcpu, cr4); 1183 1184 kvm_post_set_cr4(vcpu, old_cr4, cr4); 1185 1186 return 0; 1187 } 1188 EXPORT_SYMBOL_GPL(kvm_set_cr4); 1189 1190 static void kvm_invalidate_pcid(struct kvm_vcpu *vcpu, unsigned long pcid) 1191 { 1192 struct kvm_mmu *mmu = vcpu->arch.mmu; 1193 unsigned long roots_to_free = 0; 1194 int i; 1195 1196 /* 1197 * MOV CR3 and INVPCID are usually not intercepted when using TDP, but 1198 * this is reachable when running EPT=1 and unrestricted_guest=0, and 1199 * also via the emulator. KVM's TDP page tables are not in the scope of 1200 * the invalidation, but the guest's TLB entries need to be flushed as 1201 * the CPU may have cached entries in its TLB for the target PCID. 1202 */ 1203 if (unlikely(tdp_enabled)) { 1204 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 1205 return; 1206 } 1207 1208 /* 1209 * If neither the current CR3 nor any of the prev_roots use the given 1210 * PCID, then nothing needs to be done here because a resync will 1211 * happen anyway before switching to any other CR3. 1212 */ 1213 if (kvm_get_active_pcid(vcpu) == pcid) { 1214 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); 1215 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); 1216 } 1217 1218 /* 1219 * If PCID is disabled, there is no need to free prev_roots even if the 1220 * PCIDs for them are also 0, because MOV to CR3 always flushes the TLB 1221 * with PCIDE=0. 1222 */ 1223 if (!kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)) 1224 return; 1225 1226 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) 1227 if (kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd) == pcid) 1228 roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i); 1229 1230 kvm_mmu_free_roots(vcpu->kvm, mmu, roots_to_free); 1231 } 1232 1233 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) 1234 { 1235 bool skip_tlb_flush = false; 1236 unsigned long pcid = 0; 1237 #ifdef CONFIG_X86_64 1238 bool pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE); 1239 1240 if (pcid_enabled) { 1241 skip_tlb_flush = cr3 & X86_CR3_PCID_NOFLUSH; 1242 cr3 &= ~X86_CR3_PCID_NOFLUSH; 1243 pcid = cr3 & X86_CR3_PCID_MASK; 1244 } 1245 #endif 1246 1247 /* PDPTRs are always reloaded for PAE paging. */ 1248 if (cr3 == kvm_read_cr3(vcpu) && !is_pae_paging(vcpu)) 1249 goto handle_tlb_flush; 1250 1251 /* 1252 * Do not condition the GPA check on long mode, this helper is used to 1253 * stuff CR3, e.g. for RSM emulation, and there is no guarantee that 1254 * the current vCPU mode is accurate. 1255 */ 1256 if (kvm_vcpu_is_illegal_gpa(vcpu, cr3)) 1257 return 1; 1258 1259 if (is_pae_paging(vcpu) && !load_pdptrs(vcpu, cr3)) 1260 return 1; 1261 1262 if (cr3 != kvm_read_cr3(vcpu)) 1263 kvm_mmu_new_pgd(vcpu, cr3); 1264 1265 vcpu->arch.cr3 = cr3; 1266 kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); 1267 /* Do not call post_set_cr3, we do not get here for confidential guests. */ 1268 1269 handle_tlb_flush: 1270 /* 1271 * A load of CR3 that flushes the TLB flushes only the current PCID, 1272 * even if PCID is disabled, in which case PCID=0 is flushed. It's a 1273 * moot point in the end because _disabling_ PCID will flush all PCIDs, 1274 * and it's impossible to use a non-zero PCID when PCID is disabled, 1275 * i.e. only PCID=0 can be relevant. 1276 */ 1277 if (!skip_tlb_flush) 1278 kvm_invalidate_pcid(vcpu, pcid); 1279 1280 return 0; 1281 } 1282 EXPORT_SYMBOL_GPL(kvm_set_cr3); 1283 1284 int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) 1285 { 1286 if (cr8 & CR8_RESERVED_BITS) 1287 return 1; 1288 if (lapic_in_kernel(vcpu)) 1289 kvm_lapic_set_tpr(vcpu, cr8); 1290 else 1291 vcpu->arch.cr8 = cr8; 1292 return 0; 1293 } 1294 EXPORT_SYMBOL_GPL(kvm_set_cr8); 1295 1296 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu) 1297 { 1298 if (lapic_in_kernel(vcpu)) 1299 return kvm_lapic_get_cr8(vcpu); 1300 else 1301 return vcpu->arch.cr8; 1302 } 1303 EXPORT_SYMBOL_GPL(kvm_get_cr8); 1304 1305 static void kvm_update_dr0123(struct kvm_vcpu *vcpu) 1306 { 1307 int i; 1308 1309 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) { 1310 for (i = 0; i < KVM_NR_DB_REGS; i++) 1311 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; 1312 } 1313 } 1314 1315 void kvm_update_dr7(struct kvm_vcpu *vcpu) 1316 { 1317 unsigned long dr7; 1318 1319 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) 1320 dr7 = vcpu->arch.guest_debug_dr7; 1321 else 1322 dr7 = vcpu->arch.dr7; 1323 static_call(kvm_x86_set_dr7)(vcpu, dr7); 1324 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED; 1325 if (dr7 & DR7_BP_EN_MASK) 1326 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED; 1327 } 1328 EXPORT_SYMBOL_GPL(kvm_update_dr7); 1329 1330 static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu) 1331 { 1332 u64 fixed = DR6_FIXED_1; 1333 1334 if (!guest_cpuid_has(vcpu, X86_FEATURE_RTM)) 1335 fixed |= DR6_RTM; 1336 1337 if (!guest_cpuid_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT)) 1338 fixed |= DR6_BUS_LOCK; 1339 return fixed; 1340 } 1341 1342 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) 1343 { 1344 size_t size = ARRAY_SIZE(vcpu->arch.db); 1345 1346 switch (dr) { 1347 case 0 ... 3: 1348 vcpu->arch.db[array_index_nospec(dr, size)] = val; 1349 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) 1350 vcpu->arch.eff_db[dr] = val; 1351 break; 1352 case 4: 1353 case 6: 1354 if (!kvm_dr6_valid(val)) 1355 return 1; /* #GP */ 1356 vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu); 1357 break; 1358 case 5: 1359 default: /* 7 */ 1360 if (!kvm_dr7_valid(val)) 1361 return 1; /* #GP */ 1362 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; 1363 kvm_update_dr7(vcpu); 1364 break; 1365 } 1366 1367 return 0; 1368 } 1369 EXPORT_SYMBOL_GPL(kvm_set_dr); 1370 1371 void kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) 1372 { 1373 size_t size = ARRAY_SIZE(vcpu->arch.db); 1374 1375 switch (dr) { 1376 case 0 ... 3: 1377 *val = vcpu->arch.db[array_index_nospec(dr, size)]; 1378 break; 1379 case 4: 1380 case 6: 1381 *val = vcpu->arch.dr6; 1382 break; 1383 case 5: 1384 default: /* 7 */ 1385 *val = vcpu->arch.dr7; 1386 break; 1387 } 1388 } 1389 EXPORT_SYMBOL_GPL(kvm_get_dr); 1390 1391 int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu) 1392 { 1393 u32 ecx = kvm_rcx_read(vcpu); 1394 u64 data; 1395 1396 if (kvm_pmu_rdpmc(vcpu, ecx, &data)) { 1397 kvm_inject_gp(vcpu, 0); 1398 return 1; 1399 } 1400 1401 kvm_rax_write(vcpu, (u32)data); 1402 kvm_rdx_write(vcpu, data >> 32); 1403 return kvm_skip_emulated_instruction(vcpu); 1404 } 1405 EXPORT_SYMBOL_GPL(kvm_emulate_rdpmc); 1406 1407 /* 1408 * List of msr numbers which we expose to userspace through KVM_GET_MSRS 1409 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. 1410 * 1411 * The three MSR lists(msrs_to_save, emulated_msrs, msr_based_features) 1412 * extract the supported MSRs from the related const lists. 1413 * msrs_to_save is selected from the msrs_to_save_all to reflect the 1414 * capabilities of the host cpu. This capabilities test skips MSRs that are 1415 * kvm-specific. Those are put in emulated_msrs_all; filtering of emulated_msrs 1416 * may depend on host virtualization features rather than host cpu features. 1417 */ 1418 1419 static const u32 msrs_to_save_all[] = { 1420 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, 1421 MSR_STAR, 1422 #ifdef CONFIG_X86_64 1423 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR, 1424 #endif 1425 MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA, 1426 MSR_IA32_FEAT_CTL, MSR_IA32_BNDCFGS, MSR_TSC_AUX, 1427 MSR_IA32_SPEC_CTRL, 1428 MSR_IA32_RTIT_CTL, MSR_IA32_RTIT_STATUS, MSR_IA32_RTIT_CR3_MATCH, 1429 MSR_IA32_RTIT_OUTPUT_BASE, MSR_IA32_RTIT_OUTPUT_MASK, 1430 MSR_IA32_RTIT_ADDR0_A, MSR_IA32_RTIT_ADDR0_B, 1431 MSR_IA32_RTIT_ADDR1_A, MSR_IA32_RTIT_ADDR1_B, 1432 MSR_IA32_RTIT_ADDR2_A, MSR_IA32_RTIT_ADDR2_B, 1433 MSR_IA32_RTIT_ADDR3_A, MSR_IA32_RTIT_ADDR3_B, 1434 MSR_IA32_UMWAIT_CONTROL, 1435 1436 MSR_ARCH_PERFMON_FIXED_CTR0, MSR_ARCH_PERFMON_FIXED_CTR1, 1437 MSR_ARCH_PERFMON_FIXED_CTR0 + 2, 1438 MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_STATUS, 1439 MSR_CORE_PERF_GLOBAL_CTRL, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 1440 MSR_IA32_PEBS_ENABLE, MSR_IA32_DS_AREA, MSR_PEBS_DATA_CFG, 1441 1442 /* This part of MSRs should match KVM_INTEL_PMC_MAX_GENERIC. */ 1443 MSR_ARCH_PERFMON_PERFCTR0, MSR_ARCH_PERFMON_PERFCTR1, 1444 MSR_ARCH_PERFMON_PERFCTR0 + 2, MSR_ARCH_PERFMON_PERFCTR0 + 3, 1445 MSR_ARCH_PERFMON_PERFCTR0 + 4, MSR_ARCH_PERFMON_PERFCTR0 + 5, 1446 MSR_ARCH_PERFMON_PERFCTR0 + 6, MSR_ARCH_PERFMON_PERFCTR0 + 7, 1447 MSR_ARCH_PERFMON_EVENTSEL0, MSR_ARCH_PERFMON_EVENTSEL1, 1448 MSR_ARCH_PERFMON_EVENTSEL0 + 2, MSR_ARCH_PERFMON_EVENTSEL0 + 3, 1449 MSR_ARCH_PERFMON_EVENTSEL0 + 4, MSR_ARCH_PERFMON_EVENTSEL0 + 5, 1450 MSR_ARCH_PERFMON_EVENTSEL0 + 6, MSR_ARCH_PERFMON_EVENTSEL0 + 7, 1451 1452 MSR_K7_EVNTSEL0, MSR_K7_EVNTSEL1, MSR_K7_EVNTSEL2, MSR_K7_EVNTSEL3, 1453 MSR_K7_PERFCTR0, MSR_K7_PERFCTR1, MSR_K7_PERFCTR2, MSR_K7_PERFCTR3, 1454 1455 /* This part of MSRs should match KVM_AMD_PMC_MAX_GENERIC. */ 1456 MSR_F15H_PERF_CTL0, MSR_F15H_PERF_CTL1, MSR_F15H_PERF_CTL2, 1457 MSR_F15H_PERF_CTL3, MSR_F15H_PERF_CTL4, MSR_F15H_PERF_CTL5, 1458 MSR_F15H_PERF_CTR0, MSR_F15H_PERF_CTR1, MSR_F15H_PERF_CTR2, 1459 MSR_F15H_PERF_CTR3, MSR_F15H_PERF_CTR4, MSR_F15H_PERF_CTR5, 1460 1461 MSR_IA32_XFD, MSR_IA32_XFD_ERR, 1462 }; 1463 1464 static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_all)]; 1465 static unsigned num_msrs_to_save; 1466 1467 static const u32 emulated_msrs_all[] = { 1468 MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, 1469 MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW, 1470 HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL, 1471 HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC, 1472 HV_X64_MSR_TSC_FREQUENCY, HV_X64_MSR_APIC_FREQUENCY, 1473 HV_X64_MSR_CRASH_P0, HV_X64_MSR_CRASH_P1, HV_X64_MSR_CRASH_P2, 1474 HV_X64_MSR_CRASH_P3, HV_X64_MSR_CRASH_P4, HV_X64_MSR_CRASH_CTL, 1475 HV_X64_MSR_RESET, 1476 HV_X64_MSR_VP_INDEX, 1477 HV_X64_MSR_VP_RUNTIME, 1478 HV_X64_MSR_SCONTROL, 1479 HV_X64_MSR_STIMER0_CONFIG, 1480 HV_X64_MSR_VP_ASSIST_PAGE, 1481 HV_X64_MSR_REENLIGHTENMENT_CONTROL, HV_X64_MSR_TSC_EMULATION_CONTROL, 1482 HV_X64_MSR_TSC_EMULATION_STATUS, 1483 HV_X64_MSR_SYNDBG_OPTIONS, 1484 HV_X64_MSR_SYNDBG_CONTROL, HV_X64_MSR_SYNDBG_STATUS, 1485 HV_X64_MSR_SYNDBG_SEND_BUFFER, HV_X64_MSR_SYNDBG_RECV_BUFFER, 1486 HV_X64_MSR_SYNDBG_PENDING_BUFFER, 1487 1488 MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME, 1489 MSR_KVM_PV_EOI_EN, MSR_KVM_ASYNC_PF_INT, MSR_KVM_ASYNC_PF_ACK, 1490 1491 MSR_IA32_TSC_ADJUST, 1492 MSR_IA32_TSC_DEADLINE, 1493 MSR_IA32_ARCH_CAPABILITIES, 1494 MSR_IA32_PERF_CAPABILITIES, 1495 MSR_IA32_MISC_ENABLE, 1496 MSR_IA32_MCG_STATUS, 1497 MSR_IA32_MCG_CTL, 1498 MSR_IA32_MCG_EXT_CTL, 1499 MSR_IA32_SMBASE, 1500 MSR_SMI_COUNT, 1501 MSR_PLATFORM_INFO, 1502 MSR_MISC_FEATURES_ENABLES, 1503 MSR_AMD64_VIRT_SPEC_CTRL, 1504 MSR_AMD64_TSC_RATIO, 1505 MSR_IA32_POWER_CTL, 1506 MSR_IA32_UCODE_REV, 1507 1508 /* 1509 * The following list leaves out MSRs whose values are determined 1510 * by arch/x86/kvm/vmx/nested.c based on CPUID or other MSRs. 1511 * We always support the "true" VMX control MSRs, even if the host 1512 * processor does not, so I am putting these registers here rather 1513 * than in msrs_to_save_all. 1514 */ 1515 MSR_IA32_VMX_BASIC, 1516 MSR_IA32_VMX_TRUE_PINBASED_CTLS, 1517 MSR_IA32_VMX_TRUE_PROCBASED_CTLS, 1518 MSR_IA32_VMX_TRUE_EXIT_CTLS, 1519 MSR_IA32_VMX_TRUE_ENTRY_CTLS, 1520 MSR_IA32_VMX_MISC, 1521 MSR_IA32_VMX_CR0_FIXED0, 1522 MSR_IA32_VMX_CR4_FIXED0, 1523 MSR_IA32_VMX_VMCS_ENUM, 1524 MSR_IA32_VMX_PROCBASED_CTLS2, 1525 MSR_IA32_VMX_EPT_VPID_CAP, 1526 MSR_IA32_VMX_VMFUNC, 1527 1528 MSR_K7_HWCR, 1529 MSR_KVM_POLL_CONTROL, 1530 }; 1531 1532 static u32 emulated_msrs[ARRAY_SIZE(emulated_msrs_all)]; 1533 static unsigned num_emulated_msrs; 1534 1535 /* 1536 * List of msr numbers which are used to expose MSR-based features that 1537 * can be used by a hypervisor to validate requested CPU features. 1538 */ 1539 static const u32 msr_based_features_all[] = { 1540 MSR_IA32_VMX_BASIC, 1541 MSR_IA32_VMX_TRUE_PINBASED_CTLS, 1542 MSR_IA32_VMX_PINBASED_CTLS, 1543 MSR_IA32_VMX_TRUE_PROCBASED_CTLS, 1544 MSR_IA32_VMX_PROCBASED_CTLS, 1545 MSR_IA32_VMX_TRUE_EXIT_CTLS, 1546 MSR_IA32_VMX_EXIT_CTLS, 1547 MSR_IA32_VMX_TRUE_ENTRY_CTLS, 1548 MSR_IA32_VMX_ENTRY_CTLS, 1549 MSR_IA32_VMX_MISC, 1550 MSR_IA32_VMX_CR0_FIXED0, 1551 MSR_IA32_VMX_CR0_FIXED1, 1552 MSR_IA32_VMX_CR4_FIXED0, 1553 MSR_IA32_VMX_CR4_FIXED1, 1554 MSR_IA32_VMX_VMCS_ENUM, 1555 MSR_IA32_VMX_PROCBASED_CTLS2, 1556 MSR_IA32_VMX_EPT_VPID_CAP, 1557 MSR_IA32_VMX_VMFUNC, 1558 1559 MSR_F10H_DECFG, 1560 MSR_IA32_UCODE_REV, 1561 MSR_IA32_ARCH_CAPABILITIES, 1562 MSR_IA32_PERF_CAPABILITIES, 1563 }; 1564 1565 static u32 msr_based_features[ARRAY_SIZE(msr_based_features_all)]; 1566 static unsigned int num_msr_based_features; 1567 1568 /* 1569 * Some IA32_ARCH_CAPABILITIES bits have dependencies on MSRs that KVM 1570 * does not yet virtualize. These include: 1571 * 10 - MISC_PACKAGE_CTRLS 1572 * 11 - ENERGY_FILTERING_CTL 1573 * 12 - DOITM 1574 * 18 - FB_CLEAR_CTRL 1575 * 21 - XAPIC_DISABLE_STATUS 1576 * 23 - OVERCLOCKING_STATUS 1577 */ 1578 1579 #define KVM_SUPPORTED_ARCH_CAP \ 1580 (ARCH_CAP_RDCL_NO | ARCH_CAP_IBRS_ALL | ARCH_CAP_RSBA | \ 1581 ARCH_CAP_SKIP_VMENTRY_L1DFLUSH | ARCH_CAP_SSB_NO | ARCH_CAP_MDS_NO | \ 1582 ARCH_CAP_PSCHANGE_MC_NO | ARCH_CAP_TSX_CTRL_MSR | ARCH_CAP_TAA_NO | \ 1583 ARCH_CAP_SBDR_SSDP_NO | ARCH_CAP_FBSDP_NO | ARCH_CAP_PSDP_NO | \ 1584 ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO) 1585 1586 static u64 kvm_get_arch_capabilities(void) 1587 { 1588 u64 data = 0; 1589 1590 if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) { 1591 rdmsrl(MSR_IA32_ARCH_CAPABILITIES, data); 1592 data &= KVM_SUPPORTED_ARCH_CAP; 1593 } 1594 1595 /* 1596 * If nx_huge_pages is enabled, KVM's shadow paging will ensure that 1597 * the nested hypervisor runs with NX huge pages. If it is not, 1598 * L1 is anyway vulnerable to ITLB_MULTIHIT exploits from other 1599 * L1 guests, so it need not worry about its own (L2) guests. 1600 */ 1601 data |= ARCH_CAP_PSCHANGE_MC_NO; 1602 1603 /* 1604 * If we're doing cache flushes (either "always" or "cond") 1605 * we will do one whenever the guest does a vmlaunch/vmresume. 1606 * If an outer hypervisor is doing the cache flush for us 1607 * (VMENTER_L1D_FLUSH_NESTED_VM), we can safely pass that 1608 * capability to the guest too, and if EPT is disabled we're not 1609 * vulnerable. Overall, only VMENTER_L1D_FLUSH_NEVER will 1610 * require a nested hypervisor to do a flush of its own. 1611 */ 1612 if (l1tf_vmx_mitigation != VMENTER_L1D_FLUSH_NEVER) 1613 data |= ARCH_CAP_SKIP_VMENTRY_L1DFLUSH; 1614 1615 if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN)) 1616 data |= ARCH_CAP_RDCL_NO; 1617 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) 1618 data |= ARCH_CAP_SSB_NO; 1619 if (!boot_cpu_has_bug(X86_BUG_MDS)) 1620 data |= ARCH_CAP_MDS_NO; 1621 1622 if (!boot_cpu_has(X86_FEATURE_RTM)) { 1623 /* 1624 * If RTM=0 because the kernel has disabled TSX, the host might 1625 * have TAA_NO or TSX_CTRL. Clear TAA_NO (the guest sees RTM=0 1626 * and therefore knows that there cannot be TAA) but keep 1627 * TSX_CTRL: some buggy userspaces leave it set on tsx=on hosts, 1628 * and we want to allow migrating those guests to tsx=off hosts. 1629 */ 1630 data &= ~ARCH_CAP_TAA_NO; 1631 } else if (!boot_cpu_has_bug(X86_BUG_TAA)) { 1632 data |= ARCH_CAP_TAA_NO; 1633 } else { 1634 /* 1635 * Nothing to do here; we emulate TSX_CTRL if present on the 1636 * host so the guest can choose between disabling TSX or 1637 * using VERW to clear CPU buffers. 1638 */ 1639 } 1640 1641 return data; 1642 } 1643 1644 static int kvm_get_msr_feature(struct kvm_msr_entry *msr) 1645 { 1646 switch (msr->index) { 1647 case MSR_IA32_ARCH_CAPABILITIES: 1648 msr->data = kvm_get_arch_capabilities(); 1649 break; 1650 case MSR_IA32_PERF_CAPABILITIES: 1651 msr->data = kvm_caps.supported_perf_cap; 1652 break; 1653 case MSR_IA32_UCODE_REV: 1654 rdmsrl_safe(msr->index, &msr->data); 1655 break; 1656 default: 1657 return static_call(kvm_x86_get_msr_feature)(msr); 1658 } 1659 return 0; 1660 } 1661 1662 static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data) 1663 { 1664 struct kvm_msr_entry msr; 1665 int r; 1666 1667 msr.index = index; 1668 r = kvm_get_msr_feature(&msr); 1669 1670 if (r == KVM_MSR_RET_INVALID) { 1671 /* Unconditionally clear the output for simplicity */ 1672 *data = 0; 1673 if (kvm_msr_ignored_check(index, 0, false)) 1674 r = 0; 1675 } 1676 1677 if (r) 1678 return r; 1679 1680 *data = msr.data; 1681 1682 return 0; 1683 } 1684 1685 static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) 1686 { 1687 if (efer & EFER_FFXSR && !guest_cpuid_has(vcpu, X86_FEATURE_FXSR_OPT)) 1688 return false; 1689 1690 if (efer & EFER_SVME && !guest_cpuid_has(vcpu, X86_FEATURE_SVM)) 1691 return false; 1692 1693 if (efer & (EFER_LME | EFER_LMA) && 1694 !guest_cpuid_has(vcpu, X86_FEATURE_LM)) 1695 return false; 1696 1697 if (efer & EFER_NX && !guest_cpuid_has(vcpu, X86_FEATURE_NX)) 1698 return false; 1699 1700 return true; 1701 1702 } 1703 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) 1704 { 1705 if (efer & efer_reserved_bits) 1706 return false; 1707 1708 return __kvm_valid_efer(vcpu, efer); 1709 } 1710 EXPORT_SYMBOL_GPL(kvm_valid_efer); 1711 1712 static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 1713 { 1714 u64 old_efer = vcpu->arch.efer; 1715 u64 efer = msr_info->data; 1716 int r; 1717 1718 if (efer & efer_reserved_bits) 1719 return 1; 1720 1721 if (!msr_info->host_initiated) { 1722 if (!__kvm_valid_efer(vcpu, efer)) 1723 return 1; 1724 1725 if (is_paging(vcpu) && 1726 (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) 1727 return 1; 1728 } 1729 1730 efer &= ~EFER_LMA; 1731 efer |= vcpu->arch.efer & EFER_LMA; 1732 1733 r = static_call(kvm_x86_set_efer)(vcpu, efer); 1734 if (r) { 1735 WARN_ON(r > 0); 1736 return r; 1737 } 1738 1739 if ((efer ^ old_efer) & KVM_MMU_EFER_ROLE_BITS) 1740 kvm_mmu_reset_context(vcpu); 1741 1742 return 0; 1743 } 1744 1745 void kvm_enable_efer_bits(u64 mask) 1746 { 1747 efer_reserved_bits &= ~mask; 1748 } 1749 EXPORT_SYMBOL_GPL(kvm_enable_efer_bits); 1750 1751 bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type) 1752 { 1753 struct kvm_x86_msr_filter *msr_filter; 1754 struct msr_bitmap_range *ranges; 1755 struct kvm *kvm = vcpu->kvm; 1756 bool allowed; 1757 int idx; 1758 u32 i; 1759 1760 /* x2APIC MSRs do not support filtering. */ 1761 if (index >= 0x800 && index <= 0x8ff) 1762 return true; 1763 1764 idx = srcu_read_lock(&kvm->srcu); 1765 1766 msr_filter = srcu_dereference(kvm->arch.msr_filter, &kvm->srcu); 1767 if (!msr_filter) { 1768 allowed = true; 1769 goto out; 1770 } 1771 1772 allowed = msr_filter->default_allow; 1773 ranges = msr_filter->ranges; 1774 1775 for (i = 0; i < msr_filter->count; i++) { 1776 u32 start = ranges[i].base; 1777 u32 end = start + ranges[i].nmsrs; 1778 u32 flags = ranges[i].flags; 1779 unsigned long *bitmap = ranges[i].bitmap; 1780 1781 if ((index >= start) && (index < end) && (flags & type)) { 1782 allowed = !!test_bit(index - start, bitmap); 1783 break; 1784 } 1785 } 1786 1787 out: 1788 srcu_read_unlock(&kvm->srcu, idx); 1789 1790 return allowed; 1791 } 1792 EXPORT_SYMBOL_GPL(kvm_msr_allowed); 1793 1794 /* 1795 * Write @data into the MSR specified by @index. Select MSR specific fault 1796 * checks are bypassed if @host_initiated is %true. 1797 * Returns 0 on success, non-0 otherwise. 1798 * Assumes vcpu_load() was already called. 1799 */ 1800 static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data, 1801 bool host_initiated) 1802 { 1803 struct msr_data msr; 1804 1805 switch (index) { 1806 case MSR_FS_BASE: 1807 case MSR_GS_BASE: 1808 case MSR_KERNEL_GS_BASE: 1809 case MSR_CSTAR: 1810 case MSR_LSTAR: 1811 if (is_noncanonical_address(data, vcpu)) 1812 return 1; 1813 break; 1814 case MSR_IA32_SYSENTER_EIP: 1815 case MSR_IA32_SYSENTER_ESP: 1816 /* 1817 * IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if 1818 * non-canonical address is written on Intel but not on 1819 * AMD (which ignores the top 32-bits, because it does 1820 * not implement 64-bit SYSENTER). 1821 * 1822 * 64-bit code should hence be able to write a non-canonical 1823 * value on AMD. Making the address canonical ensures that 1824 * vmentry does not fail on Intel after writing a non-canonical 1825 * value, and that something deterministic happens if the guest 1826 * invokes 64-bit SYSENTER. 1827 */ 1828 data = __canonical_address(data, vcpu_virt_addr_bits(vcpu)); 1829 break; 1830 case MSR_TSC_AUX: 1831 if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX)) 1832 return 1; 1833 1834 if (!host_initiated && 1835 !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) && 1836 !guest_cpuid_has(vcpu, X86_FEATURE_RDPID)) 1837 return 1; 1838 1839 /* 1840 * Per Intel's SDM, bits 63:32 are reserved, but AMD's APM has 1841 * incomplete and conflicting architectural behavior. Current 1842 * AMD CPUs completely ignore bits 63:32, i.e. they aren't 1843 * reserved and always read as zeros. Enforce Intel's reserved 1844 * bits check if and only if the guest CPU is Intel, and clear 1845 * the bits in all other cases. This ensures cross-vendor 1846 * migration will provide consistent behavior for the guest. 1847 */ 1848 if (guest_cpuid_is_intel(vcpu) && (data >> 32) != 0) 1849 return 1; 1850 1851 data = (u32)data; 1852 break; 1853 } 1854 1855 msr.data = data; 1856 msr.index = index; 1857 msr.host_initiated = host_initiated; 1858 1859 return static_call(kvm_x86_set_msr)(vcpu, &msr); 1860 } 1861 1862 static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu, 1863 u32 index, u64 data, bool host_initiated) 1864 { 1865 int ret = __kvm_set_msr(vcpu, index, data, host_initiated); 1866 1867 if (ret == KVM_MSR_RET_INVALID) 1868 if (kvm_msr_ignored_check(index, data, true)) 1869 ret = 0; 1870 1871 return ret; 1872 } 1873 1874 /* 1875 * Read the MSR specified by @index into @data. Select MSR specific fault 1876 * checks are bypassed if @host_initiated is %true. 1877 * Returns 0 on success, non-0 otherwise. 1878 * Assumes vcpu_load() was already called. 1879 */ 1880 int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, 1881 bool host_initiated) 1882 { 1883 struct msr_data msr; 1884 int ret; 1885 1886 switch (index) { 1887 case MSR_TSC_AUX: 1888 if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX)) 1889 return 1; 1890 1891 if (!host_initiated && 1892 !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) && 1893 !guest_cpuid_has(vcpu, X86_FEATURE_RDPID)) 1894 return 1; 1895 break; 1896 } 1897 1898 msr.index = index; 1899 msr.host_initiated = host_initiated; 1900 1901 ret = static_call(kvm_x86_get_msr)(vcpu, &msr); 1902 if (!ret) 1903 *data = msr.data; 1904 return ret; 1905 } 1906 1907 static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu, 1908 u32 index, u64 *data, bool host_initiated) 1909 { 1910 int ret = __kvm_get_msr(vcpu, index, data, host_initiated); 1911 1912 if (ret == KVM_MSR_RET_INVALID) { 1913 /* Unconditionally clear *data for simplicity */ 1914 *data = 0; 1915 if (kvm_msr_ignored_check(index, 0, false)) 1916 ret = 0; 1917 } 1918 1919 return ret; 1920 } 1921 1922 static int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data) 1923 { 1924 if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_READ)) 1925 return KVM_MSR_RET_FILTERED; 1926 return kvm_get_msr_ignored_check(vcpu, index, data, false); 1927 } 1928 1929 static int kvm_set_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 data) 1930 { 1931 if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_WRITE)) 1932 return KVM_MSR_RET_FILTERED; 1933 return kvm_set_msr_ignored_check(vcpu, index, data, false); 1934 } 1935 1936 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data) 1937 { 1938 return kvm_get_msr_ignored_check(vcpu, index, data, false); 1939 } 1940 EXPORT_SYMBOL_GPL(kvm_get_msr); 1941 1942 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data) 1943 { 1944 return kvm_set_msr_ignored_check(vcpu, index, data, false); 1945 } 1946 EXPORT_SYMBOL_GPL(kvm_set_msr); 1947 1948 static void complete_userspace_rdmsr(struct kvm_vcpu *vcpu) 1949 { 1950 if (!vcpu->run->msr.error) { 1951 kvm_rax_write(vcpu, (u32)vcpu->run->msr.data); 1952 kvm_rdx_write(vcpu, vcpu->run->msr.data >> 32); 1953 } 1954 } 1955 1956 static int complete_emulated_msr_access(struct kvm_vcpu *vcpu) 1957 { 1958 return complete_emulated_insn_gp(vcpu, vcpu->run->msr.error); 1959 } 1960 1961 static int complete_emulated_rdmsr(struct kvm_vcpu *vcpu) 1962 { 1963 complete_userspace_rdmsr(vcpu); 1964 return complete_emulated_msr_access(vcpu); 1965 } 1966 1967 static int complete_fast_msr_access(struct kvm_vcpu *vcpu) 1968 { 1969 return static_call(kvm_x86_complete_emulated_msr)(vcpu, vcpu->run->msr.error); 1970 } 1971 1972 static int complete_fast_rdmsr(struct kvm_vcpu *vcpu) 1973 { 1974 complete_userspace_rdmsr(vcpu); 1975 return complete_fast_msr_access(vcpu); 1976 } 1977 1978 static u64 kvm_msr_reason(int r) 1979 { 1980 switch (r) { 1981 case KVM_MSR_RET_INVALID: 1982 return KVM_MSR_EXIT_REASON_UNKNOWN; 1983 case KVM_MSR_RET_FILTERED: 1984 return KVM_MSR_EXIT_REASON_FILTER; 1985 default: 1986 return KVM_MSR_EXIT_REASON_INVAL; 1987 } 1988 } 1989 1990 static int kvm_msr_user_space(struct kvm_vcpu *vcpu, u32 index, 1991 u32 exit_reason, u64 data, 1992 int (*completion)(struct kvm_vcpu *vcpu), 1993 int r) 1994 { 1995 u64 msr_reason = kvm_msr_reason(r); 1996 1997 /* Check if the user wanted to know about this MSR fault */ 1998 if (!(vcpu->kvm->arch.user_space_msr_mask & msr_reason)) 1999 return 0; 2000 2001 vcpu->run->exit_reason = exit_reason; 2002 vcpu->run->msr.error = 0; 2003 memset(vcpu->run->msr.pad, 0, sizeof(vcpu->run->msr.pad)); 2004 vcpu->run->msr.reason = msr_reason; 2005 vcpu->run->msr.index = index; 2006 vcpu->run->msr.data = data; 2007 vcpu->arch.complete_userspace_io = completion; 2008 2009 return 1; 2010 } 2011 2012 int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu) 2013 { 2014 u32 ecx = kvm_rcx_read(vcpu); 2015 u64 data; 2016 int r; 2017 2018 r = kvm_get_msr_with_filter(vcpu, ecx, &data); 2019 2020 if (!r) { 2021 trace_kvm_msr_read(ecx, data); 2022 2023 kvm_rax_write(vcpu, data & -1u); 2024 kvm_rdx_write(vcpu, (data >> 32) & -1u); 2025 } else { 2026 /* MSR read failed? See if we should ask user space */ 2027 if (kvm_msr_user_space(vcpu, ecx, KVM_EXIT_X86_RDMSR, 0, 2028 complete_fast_rdmsr, r)) 2029 return 0; 2030 trace_kvm_msr_read_ex(ecx); 2031 } 2032 2033 return static_call(kvm_x86_complete_emulated_msr)(vcpu, r); 2034 } 2035 EXPORT_SYMBOL_GPL(kvm_emulate_rdmsr); 2036 2037 int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu) 2038 { 2039 u32 ecx = kvm_rcx_read(vcpu); 2040 u64 data = kvm_read_edx_eax(vcpu); 2041 int r; 2042 2043 r = kvm_set_msr_with_filter(vcpu, ecx, data); 2044 2045 if (!r) { 2046 trace_kvm_msr_write(ecx, data); 2047 } else { 2048 /* MSR write failed? See if we should ask user space */ 2049 if (kvm_msr_user_space(vcpu, ecx, KVM_EXIT_X86_WRMSR, data, 2050 complete_fast_msr_access, r)) 2051 return 0; 2052 /* Signal all other negative errors to userspace */ 2053 if (r < 0) 2054 return r; 2055 trace_kvm_msr_write_ex(ecx, data); 2056 } 2057 2058 return static_call(kvm_x86_complete_emulated_msr)(vcpu, r); 2059 } 2060 EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr); 2061 2062 int kvm_emulate_as_nop(struct kvm_vcpu *vcpu) 2063 { 2064 return kvm_skip_emulated_instruction(vcpu); 2065 } 2066 EXPORT_SYMBOL_GPL(kvm_emulate_as_nop); 2067 2068 int kvm_emulate_invd(struct kvm_vcpu *vcpu) 2069 { 2070 /* Treat an INVD instruction as a NOP and just skip it. */ 2071 return kvm_emulate_as_nop(vcpu); 2072 } 2073 EXPORT_SYMBOL_GPL(kvm_emulate_invd); 2074 2075 int kvm_handle_invalid_op(struct kvm_vcpu *vcpu) 2076 { 2077 kvm_queue_exception(vcpu, UD_VECTOR); 2078 return 1; 2079 } 2080 EXPORT_SYMBOL_GPL(kvm_handle_invalid_op); 2081 2082 2083 static int kvm_emulate_monitor_mwait(struct kvm_vcpu *vcpu, const char *insn) 2084 { 2085 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS) && 2086 !guest_cpuid_has(vcpu, X86_FEATURE_MWAIT)) 2087 return kvm_handle_invalid_op(vcpu); 2088 2089 pr_warn_once("kvm: %s instruction emulated as NOP!\n", insn); 2090 return kvm_emulate_as_nop(vcpu); 2091 } 2092 int kvm_emulate_mwait(struct kvm_vcpu *vcpu) 2093 { 2094 return kvm_emulate_monitor_mwait(vcpu, "MWAIT"); 2095 } 2096 EXPORT_SYMBOL_GPL(kvm_emulate_mwait); 2097 2098 int kvm_emulate_monitor(struct kvm_vcpu *vcpu) 2099 { 2100 return kvm_emulate_monitor_mwait(vcpu, "MONITOR"); 2101 } 2102 EXPORT_SYMBOL_GPL(kvm_emulate_monitor); 2103 2104 static inline bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu) 2105 { 2106 xfer_to_guest_mode_prepare(); 2107 return vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu) || 2108 xfer_to_guest_mode_work_pending(); 2109 } 2110 2111 /* 2112 * The fast path for frequent and performance sensitive wrmsr emulation, 2113 * i.e. the sending of IPI, sending IPI early in the VM-Exit flow reduces 2114 * the latency of virtual IPI by avoiding the expensive bits of transitioning 2115 * from guest to host, e.g. reacquiring KVM's SRCU lock. In contrast to the 2116 * other cases which must be called after interrupts are enabled on the host. 2117 */ 2118 static int handle_fastpath_set_x2apic_icr_irqoff(struct kvm_vcpu *vcpu, u64 data) 2119 { 2120 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(vcpu->arch.apic)) 2121 return 1; 2122 2123 if (((data & APIC_SHORT_MASK) == APIC_DEST_NOSHORT) && 2124 ((data & APIC_DEST_MASK) == APIC_DEST_PHYSICAL) && 2125 ((data & APIC_MODE_MASK) == APIC_DM_FIXED) && 2126 ((u32)(data >> 32) != X2APIC_BROADCAST)) 2127 return kvm_x2apic_icr_write(vcpu->arch.apic, data); 2128 2129 return 1; 2130 } 2131 2132 static int handle_fastpath_set_tscdeadline(struct kvm_vcpu *vcpu, u64 data) 2133 { 2134 if (!kvm_can_use_hv_timer(vcpu)) 2135 return 1; 2136 2137 kvm_set_lapic_tscdeadline_msr(vcpu, data); 2138 return 0; 2139 } 2140 2141 fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu) 2142 { 2143 u32 msr = kvm_rcx_read(vcpu); 2144 u64 data; 2145 fastpath_t ret = EXIT_FASTPATH_NONE; 2146 2147 switch (msr) { 2148 case APIC_BASE_MSR + (APIC_ICR >> 4): 2149 data = kvm_read_edx_eax(vcpu); 2150 if (!handle_fastpath_set_x2apic_icr_irqoff(vcpu, data)) { 2151 kvm_skip_emulated_instruction(vcpu); 2152 ret = EXIT_FASTPATH_EXIT_HANDLED; 2153 } 2154 break; 2155 case MSR_IA32_TSC_DEADLINE: 2156 data = kvm_read_edx_eax(vcpu); 2157 if (!handle_fastpath_set_tscdeadline(vcpu, data)) { 2158 kvm_skip_emulated_instruction(vcpu); 2159 ret = EXIT_FASTPATH_REENTER_GUEST; 2160 } 2161 break; 2162 default: 2163 break; 2164 } 2165 2166 if (ret != EXIT_FASTPATH_NONE) 2167 trace_kvm_msr_write(msr, data); 2168 2169 return ret; 2170 } 2171 EXPORT_SYMBOL_GPL(handle_fastpath_set_msr_irqoff); 2172 2173 /* 2174 * Adapt set_msr() to msr_io()'s calling convention 2175 */ 2176 static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) 2177 { 2178 return kvm_get_msr_ignored_check(vcpu, index, data, true); 2179 } 2180 2181 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) 2182 { 2183 return kvm_set_msr_ignored_check(vcpu, index, *data, true); 2184 } 2185 2186 #ifdef CONFIG_X86_64 2187 struct pvclock_clock { 2188 int vclock_mode; 2189 u64 cycle_last; 2190 u64 mask; 2191 u32 mult; 2192 u32 shift; 2193 u64 base_cycles; 2194 u64 offset; 2195 }; 2196 2197 struct pvclock_gtod_data { 2198 seqcount_t seq; 2199 2200 struct pvclock_clock clock; /* extract of a clocksource struct */ 2201 struct pvclock_clock raw_clock; /* extract of a clocksource struct */ 2202 2203 ktime_t offs_boot; 2204 u64 wall_time_sec; 2205 }; 2206 2207 static struct pvclock_gtod_data pvclock_gtod_data; 2208 2209 static void update_pvclock_gtod(struct timekeeper *tk) 2210 { 2211 struct pvclock_gtod_data *vdata = &pvclock_gtod_data; 2212 2213 write_seqcount_begin(&vdata->seq); 2214 2215 /* copy pvclock gtod data */ 2216 vdata->clock.vclock_mode = tk->tkr_mono.clock->vdso_clock_mode; 2217 vdata->clock.cycle_last = tk->tkr_mono.cycle_last; 2218 vdata->clock.mask = tk->tkr_mono.mask; 2219 vdata->clock.mult = tk->tkr_mono.mult; 2220 vdata->clock.shift = tk->tkr_mono.shift; 2221 vdata->clock.base_cycles = tk->tkr_mono.xtime_nsec; 2222 vdata->clock.offset = tk->tkr_mono.base; 2223 2224 vdata->raw_clock.vclock_mode = tk->tkr_raw.clock->vdso_clock_mode; 2225 vdata->raw_clock.cycle_last = tk->tkr_raw.cycle_last; 2226 vdata->raw_clock.mask = tk->tkr_raw.mask; 2227 vdata->raw_clock.mult = tk->tkr_raw.mult; 2228 vdata->raw_clock.shift = tk->tkr_raw.shift; 2229 vdata->raw_clock.base_cycles = tk->tkr_raw.xtime_nsec; 2230 vdata->raw_clock.offset = tk->tkr_raw.base; 2231 2232 vdata->wall_time_sec = tk->xtime_sec; 2233 2234 vdata->offs_boot = tk->offs_boot; 2235 2236 write_seqcount_end(&vdata->seq); 2237 } 2238 2239 static s64 get_kvmclock_base_ns(void) 2240 { 2241 /* Count up from boot time, but with the frequency of the raw clock. */ 2242 return ktime_to_ns(ktime_add(ktime_get_raw(), pvclock_gtod_data.offs_boot)); 2243 } 2244 #else 2245 static s64 get_kvmclock_base_ns(void) 2246 { 2247 /* Master clock not used, so we can just use CLOCK_BOOTTIME. */ 2248 return ktime_get_boottime_ns(); 2249 } 2250 #endif 2251 2252 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock, int sec_hi_ofs) 2253 { 2254 int version; 2255 int r; 2256 struct pvclock_wall_clock wc; 2257 u32 wc_sec_hi; 2258 u64 wall_nsec; 2259 2260 if (!wall_clock) 2261 return; 2262 2263 r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version)); 2264 if (r) 2265 return; 2266 2267 if (version & 1) 2268 ++version; /* first time write, random junk */ 2269 2270 ++version; 2271 2272 if (kvm_write_guest(kvm, wall_clock, &version, sizeof(version))) 2273 return; 2274 2275 /* 2276 * The guest calculates current wall clock time by adding 2277 * system time (updated by kvm_guest_time_update below) to the 2278 * wall clock specified here. We do the reverse here. 2279 */ 2280 wall_nsec = ktime_get_real_ns() - get_kvmclock_ns(kvm); 2281 2282 wc.nsec = do_div(wall_nsec, 1000000000); 2283 wc.sec = (u32)wall_nsec; /* overflow in 2106 guest time */ 2284 wc.version = version; 2285 2286 kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc)); 2287 2288 if (sec_hi_ofs) { 2289 wc_sec_hi = wall_nsec >> 32; 2290 kvm_write_guest(kvm, wall_clock + sec_hi_ofs, 2291 &wc_sec_hi, sizeof(wc_sec_hi)); 2292 } 2293 2294 version++; 2295 kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); 2296 } 2297 2298 static void kvm_write_system_time(struct kvm_vcpu *vcpu, gpa_t system_time, 2299 bool old_msr, bool host_initiated) 2300 { 2301 struct kvm_arch *ka = &vcpu->kvm->arch; 2302 2303 if (vcpu->vcpu_id == 0 && !host_initiated) { 2304 if (ka->boot_vcpu_runs_old_kvmclock != old_msr) 2305 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); 2306 2307 ka->boot_vcpu_runs_old_kvmclock = old_msr; 2308 } 2309 2310 vcpu->arch.time = system_time; 2311 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu); 2312 2313 /* we verify if the enable bit is set... */ 2314 if (system_time & 1) { 2315 kvm_gpc_activate(vcpu->kvm, &vcpu->arch.pv_time, vcpu, 2316 KVM_HOST_USES_PFN, system_time & ~1ULL, 2317 sizeof(struct pvclock_vcpu_time_info)); 2318 } else { 2319 kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.pv_time); 2320 } 2321 2322 return; 2323 } 2324 2325 static uint32_t div_frac(uint32_t dividend, uint32_t divisor) 2326 { 2327 do_shl32_div32(dividend, divisor); 2328 return dividend; 2329 } 2330 2331 static void kvm_get_time_scale(uint64_t scaled_hz, uint64_t base_hz, 2332 s8 *pshift, u32 *pmultiplier) 2333 { 2334 uint64_t scaled64; 2335 int32_t shift = 0; 2336 uint64_t tps64; 2337 uint32_t tps32; 2338 2339 tps64 = base_hz; 2340 scaled64 = scaled_hz; 2341 while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) { 2342 tps64 >>= 1; 2343 shift--; 2344 } 2345 2346 tps32 = (uint32_t)tps64; 2347 while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) { 2348 if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000) 2349 scaled64 >>= 1; 2350 else 2351 tps32 <<= 1; 2352 shift++; 2353 } 2354 2355 *pshift = shift; 2356 *pmultiplier = div_frac(scaled64, tps32); 2357 } 2358 2359 #ifdef CONFIG_X86_64 2360 static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0); 2361 #endif 2362 2363 static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz); 2364 static unsigned long max_tsc_khz; 2365 2366 static u32 adjust_tsc_khz(u32 khz, s32 ppm) 2367 { 2368 u64 v = (u64)khz * (1000000 + ppm); 2369 do_div(v, 1000000); 2370 return v; 2371 } 2372 2373 static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier); 2374 2375 static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale) 2376 { 2377 u64 ratio; 2378 2379 /* Guest TSC same frequency as host TSC? */ 2380 if (!scale) { 2381 kvm_vcpu_write_tsc_multiplier(vcpu, kvm_caps.default_tsc_scaling_ratio); 2382 return 0; 2383 } 2384 2385 /* TSC scaling supported? */ 2386 if (!kvm_caps.has_tsc_control) { 2387 if (user_tsc_khz > tsc_khz) { 2388 vcpu->arch.tsc_catchup = 1; 2389 vcpu->arch.tsc_always_catchup = 1; 2390 return 0; 2391 } else { 2392 pr_warn_ratelimited("user requested TSC rate below hardware speed\n"); 2393 return -1; 2394 } 2395 } 2396 2397 /* TSC scaling required - calculate ratio */ 2398 ratio = mul_u64_u32_div(1ULL << kvm_caps.tsc_scaling_ratio_frac_bits, 2399 user_tsc_khz, tsc_khz); 2400 2401 if (ratio == 0 || ratio >= kvm_caps.max_tsc_scaling_ratio) { 2402 pr_warn_ratelimited("Invalid TSC scaling ratio - virtual-tsc-khz=%u\n", 2403 user_tsc_khz); 2404 return -1; 2405 } 2406 2407 kvm_vcpu_write_tsc_multiplier(vcpu, ratio); 2408 return 0; 2409 } 2410 2411 static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz) 2412 { 2413 u32 thresh_lo, thresh_hi; 2414 int use_scaling = 0; 2415 2416 /* tsc_khz can be zero if TSC calibration fails */ 2417 if (user_tsc_khz == 0) { 2418 /* set tsc_scaling_ratio to a safe value */ 2419 kvm_vcpu_write_tsc_multiplier(vcpu, kvm_caps.default_tsc_scaling_ratio); 2420 return -1; 2421 } 2422 2423 /* Compute a scale to convert nanoseconds in TSC cycles */ 2424 kvm_get_time_scale(user_tsc_khz * 1000LL, NSEC_PER_SEC, 2425 &vcpu->arch.virtual_tsc_shift, 2426 &vcpu->arch.virtual_tsc_mult); 2427 vcpu->arch.virtual_tsc_khz = user_tsc_khz; 2428 2429 /* 2430 * Compute the variation in TSC rate which is acceptable 2431 * within the range of tolerance and decide if the 2432 * rate being applied is within that bounds of the hardware 2433 * rate. If so, no scaling or compensation need be done. 2434 */ 2435 thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm); 2436 thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm); 2437 if (user_tsc_khz < thresh_lo || user_tsc_khz > thresh_hi) { 2438 pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", user_tsc_khz, thresh_lo, thresh_hi); 2439 use_scaling = 1; 2440 } 2441 return set_tsc_khz(vcpu, user_tsc_khz, use_scaling); 2442 } 2443 2444 static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns) 2445 { 2446 u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec, 2447 vcpu->arch.virtual_tsc_mult, 2448 vcpu->arch.virtual_tsc_shift); 2449 tsc += vcpu->arch.this_tsc_write; 2450 return tsc; 2451 } 2452 2453 #ifdef CONFIG_X86_64 2454 static inline int gtod_is_based_on_tsc(int mode) 2455 { 2456 return mode == VDSO_CLOCKMODE_TSC || mode == VDSO_CLOCKMODE_HVCLOCK; 2457 } 2458 #endif 2459 2460 static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu) 2461 { 2462 #ifdef CONFIG_X86_64 2463 bool vcpus_matched; 2464 struct kvm_arch *ka = &vcpu->kvm->arch; 2465 struct pvclock_gtod_data *gtod = &pvclock_gtod_data; 2466 2467 vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == 2468 atomic_read(&vcpu->kvm->online_vcpus)); 2469 2470 /* 2471 * Once the masterclock is enabled, always perform request in 2472 * order to update it. 2473 * 2474 * In order to enable masterclock, the host clocksource must be TSC 2475 * and the vcpus need to have matched TSCs. When that happens, 2476 * perform request to enable masterclock. 2477 */ 2478 if (ka->use_master_clock || 2479 (gtod_is_based_on_tsc(gtod->clock.vclock_mode) && vcpus_matched)) 2480 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); 2481 2482 trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc, 2483 atomic_read(&vcpu->kvm->online_vcpus), 2484 ka->use_master_clock, gtod->clock.vclock_mode); 2485 #endif 2486 } 2487 2488 /* 2489 * Multiply tsc by a fixed point number represented by ratio. 2490 * 2491 * The most significant 64-N bits (mult) of ratio represent the 2492 * integral part of the fixed point number; the remaining N bits 2493 * (frac) represent the fractional part, ie. ratio represents a fixed 2494 * point number (mult + frac * 2^(-N)). 2495 * 2496 * N equals to kvm_caps.tsc_scaling_ratio_frac_bits. 2497 */ 2498 static inline u64 __scale_tsc(u64 ratio, u64 tsc) 2499 { 2500 return mul_u64_u64_shr(tsc, ratio, kvm_caps.tsc_scaling_ratio_frac_bits); 2501 } 2502 2503 u64 kvm_scale_tsc(u64 tsc, u64 ratio) 2504 { 2505 u64 _tsc = tsc; 2506 2507 if (ratio != kvm_caps.default_tsc_scaling_ratio) 2508 _tsc = __scale_tsc(ratio, tsc); 2509 2510 return _tsc; 2511 } 2512 EXPORT_SYMBOL_GPL(kvm_scale_tsc); 2513 2514 static u64 kvm_compute_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) 2515 { 2516 u64 tsc; 2517 2518 tsc = kvm_scale_tsc(rdtsc(), vcpu->arch.l1_tsc_scaling_ratio); 2519 2520 return target_tsc - tsc; 2521 } 2522 2523 u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc) 2524 { 2525 return vcpu->arch.l1_tsc_offset + 2526 kvm_scale_tsc(host_tsc, vcpu->arch.l1_tsc_scaling_ratio); 2527 } 2528 EXPORT_SYMBOL_GPL(kvm_read_l1_tsc); 2529 2530 u64 kvm_calc_nested_tsc_offset(u64 l1_offset, u64 l2_offset, u64 l2_multiplier) 2531 { 2532 u64 nested_offset; 2533 2534 if (l2_multiplier == kvm_caps.default_tsc_scaling_ratio) 2535 nested_offset = l1_offset; 2536 else 2537 nested_offset = mul_s64_u64_shr((s64) l1_offset, l2_multiplier, 2538 kvm_caps.tsc_scaling_ratio_frac_bits); 2539 2540 nested_offset += l2_offset; 2541 return nested_offset; 2542 } 2543 EXPORT_SYMBOL_GPL(kvm_calc_nested_tsc_offset); 2544 2545 u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier) 2546 { 2547 if (l2_multiplier != kvm_caps.default_tsc_scaling_ratio) 2548 return mul_u64_u64_shr(l1_multiplier, l2_multiplier, 2549 kvm_caps.tsc_scaling_ratio_frac_bits); 2550 2551 return l1_multiplier; 2552 } 2553 EXPORT_SYMBOL_GPL(kvm_calc_nested_tsc_multiplier); 2554 2555 static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 l1_offset) 2556 { 2557 trace_kvm_write_tsc_offset(vcpu->vcpu_id, 2558 vcpu->arch.l1_tsc_offset, 2559 l1_offset); 2560 2561 vcpu->arch.l1_tsc_offset = l1_offset; 2562 2563 /* 2564 * If we are here because L1 chose not to trap WRMSR to TSC then 2565 * according to the spec this should set L1's TSC (as opposed to 2566 * setting L1's offset for L2). 2567 */ 2568 if (is_guest_mode(vcpu)) 2569 vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset( 2570 l1_offset, 2571 static_call(kvm_x86_get_l2_tsc_offset)(vcpu), 2572 static_call(kvm_x86_get_l2_tsc_multiplier)(vcpu)); 2573 else 2574 vcpu->arch.tsc_offset = l1_offset; 2575 2576 static_call(kvm_x86_write_tsc_offset)(vcpu, vcpu->arch.tsc_offset); 2577 } 2578 2579 static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier) 2580 { 2581 vcpu->arch.l1_tsc_scaling_ratio = l1_multiplier; 2582 2583 /* Userspace is changing the multiplier while L2 is active */ 2584 if (is_guest_mode(vcpu)) 2585 vcpu->arch.tsc_scaling_ratio = kvm_calc_nested_tsc_multiplier( 2586 l1_multiplier, 2587 static_call(kvm_x86_get_l2_tsc_multiplier)(vcpu)); 2588 else 2589 vcpu->arch.tsc_scaling_ratio = l1_multiplier; 2590 2591 if (kvm_caps.has_tsc_control) 2592 static_call(kvm_x86_write_tsc_multiplier)( 2593 vcpu, vcpu->arch.tsc_scaling_ratio); 2594 } 2595 2596 static inline bool kvm_check_tsc_unstable(void) 2597 { 2598 #ifdef CONFIG_X86_64 2599 /* 2600 * TSC is marked unstable when we're running on Hyper-V, 2601 * 'TSC page' clocksource is good. 2602 */ 2603 if (pvclock_gtod_data.clock.vclock_mode == VDSO_CLOCKMODE_HVCLOCK) 2604 return false; 2605 #endif 2606 return check_tsc_unstable(); 2607 } 2608 2609 /* 2610 * Infers attempts to synchronize the guest's tsc from host writes. Sets the 2611 * offset for the vcpu and tracks the TSC matching generation that the vcpu 2612 * participates in. 2613 */ 2614 static void __kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 offset, u64 tsc, 2615 u64 ns, bool matched) 2616 { 2617 struct kvm *kvm = vcpu->kvm; 2618 2619 lockdep_assert_held(&kvm->arch.tsc_write_lock); 2620 2621 /* 2622 * We also track th most recent recorded KHZ, write and time to 2623 * allow the matching interval to be extended at each write. 2624 */ 2625 kvm->arch.last_tsc_nsec = ns; 2626 kvm->arch.last_tsc_write = tsc; 2627 kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; 2628 kvm->arch.last_tsc_offset = offset; 2629 2630 vcpu->arch.last_guest_tsc = tsc; 2631 2632 kvm_vcpu_write_tsc_offset(vcpu, offset); 2633 2634 if (!matched) { 2635 /* 2636 * We split periods of matched TSC writes into generations. 2637 * For each generation, we track the original measured 2638 * nanosecond time, offset, and write, so if TSCs are in 2639 * sync, we can match exact offset, and if not, we can match 2640 * exact software computation in compute_guest_tsc() 2641 * 2642 * These values are tracked in kvm->arch.cur_xxx variables. 2643 */ 2644 kvm->arch.cur_tsc_generation++; 2645 kvm->arch.cur_tsc_nsec = ns; 2646 kvm->arch.cur_tsc_write = tsc; 2647 kvm->arch.cur_tsc_offset = offset; 2648 kvm->arch.nr_vcpus_matched_tsc = 0; 2649 } else if (vcpu->arch.this_tsc_generation != kvm->arch.cur_tsc_generation) { 2650 kvm->arch.nr_vcpus_matched_tsc++; 2651 } 2652 2653 /* Keep track of which generation this VCPU has synchronized to */ 2654 vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation; 2655 vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; 2656 vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; 2657 2658 kvm_track_tsc_matching(vcpu); 2659 } 2660 2661 static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data) 2662 { 2663 struct kvm *kvm = vcpu->kvm; 2664 u64 offset, ns, elapsed; 2665 unsigned long flags; 2666 bool matched = false; 2667 bool synchronizing = false; 2668 2669 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); 2670 offset = kvm_compute_l1_tsc_offset(vcpu, data); 2671 ns = get_kvmclock_base_ns(); 2672 elapsed = ns - kvm->arch.last_tsc_nsec; 2673 2674 if (vcpu->arch.virtual_tsc_khz) { 2675 if (data == 0) { 2676 /* 2677 * detection of vcpu initialization -- need to sync 2678 * with other vCPUs. This particularly helps to keep 2679 * kvm_clock stable after CPU hotplug 2680 */ 2681 synchronizing = true; 2682 } else { 2683 u64 tsc_exp = kvm->arch.last_tsc_write + 2684 nsec_to_cycles(vcpu, elapsed); 2685 u64 tsc_hz = vcpu->arch.virtual_tsc_khz * 1000LL; 2686 /* 2687 * Special case: TSC write with a small delta (1 second) 2688 * of virtual cycle time against real time is 2689 * interpreted as an attempt to synchronize the CPU. 2690 */ 2691 synchronizing = data < tsc_exp + tsc_hz && 2692 data + tsc_hz > tsc_exp; 2693 } 2694 } 2695 2696 /* 2697 * For a reliable TSC, we can match TSC offsets, and for an unstable 2698 * TSC, we add elapsed time in this computation. We could let the 2699 * compensation code attempt to catch up if we fall behind, but 2700 * it's better to try to match offsets from the beginning. 2701 */ 2702 if (synchronizing && 2703 vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) { 2704 if (!kvm_check_tsc_unstable()) { 2705 offset = kvm->arch.cur_tsc_offset; 2706 } else { 2707 u64 delta = nsec_to_cycles(vcpu, elapsed); 2708 data += delta; 2709 offset = kvm_compute_l1_tsc_offset(vcpu, data); 2710 } 2711 matched = true; 2712 } 2713 2714 __kvm_synchronize_tsc(vcpu, offset, data, ns, matched); 2715 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); 2716 } 2717 2718 static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, 2719 s64 adjustment) 2720 { 2721 u64 tsc_offset = vcpu->arch.l1_tsc_offset; 2722 kvm_vcpu_write_tsc_offset(vcpu, tsc_offset + adjustment); 2723 } 2724 2725 static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment) 2726 { 2727 if (vcpu->arch.l1_tsc_scaling_ratio != kvm_caps.default_tsc_scaling_ratio) 2728 WARN_ON(adjustment < 0); 2729 adjustment = kvm_scale_tsc((u64) adjustment, 2730 vcpu->arch.l1_tsc_scaling_ratio); 2731 adjust_tsc_offset_guest(vcpu, adjustment); 2732 } 2733 2734 #ifdef CONFIG_X86_64 2735 2736 static u64 read_tsc(void) 2737 { 2738 u64 ret = (u64)rdtsc_ordered(); 2739 u64 last = pvclock_gtod_data.clock.cycle_last; 2740 2741 if (likely(ret >= last)) 2742 return ret; 2743 2744 /* 2745 * GCC likes to generate cmov here, but this branch is extremely 2746 * predictable (it's just a function of time and the likely is 2747 * very likely) and there's a data dependence, so force GCC 2748 * to generate a branch instead. I don't barrier() because 2749 * we don't actually need a barrier, and if this function 2750 * ever gets inlined it will generate worse code. 2751 */ 2752 asm volatile (""); 2753 return last; 2754 } 2755 2756 static inline u64 vgettsc(struct pvclock_clock *clock, u64 *tsc_timestamp, 2757 int *mode) 2758 { 2759 long v; 2760 u64 tsc_pg_val; 2761 2762 switch (clock->vclock_mode) { 2763 case VDSO_CLOCKMODE_HVCLOCK: 2764 tsc_pg_val = hv_read_tsc_page_tsc(hv_get_tsc_page(), 2765 tsc_timestamp); 2766 if (tsc_pg_val != U64_MAX) { 2767 /* TSC page valid */ 2768 *mode = VDSO_CLOCKMODE_HVCLOCK; 2769 v = (tsc_pg_val - clock->cycle_last) & 2770 clock->mask; 2771 } else { 2772 /* TSC page invalid */ 2773 *mode = VDSO_CLOCKMODE_NONE; 2774 } 2775 break; 2776 case VDSO_CLOCKMODE_TSC: 2777 *mode = VDSO_CLOCKMODE_TSC; 2778 *tsc_timestamp = read_tsc(); 2779 v = (*tsc_timestamp - clock->cycle_last) & 2780 clock->mask; 2781 break; 2782 default: 2783 *mode = VDSO_CLOCKMODE_NONE; 2784 } 2785 2786 if (*mode == VDSO_CLOCKMODE_NONE) 2787 *tsc_timestamp = v = 0; 2788 2789 return v * clock->mult; 2790 } 2791 2792 static int do_monotonic_raw(s64 *t, u64 *tsc_timestamp) 2793 { 2794 struct pvclock_gtod_data *gtod = &pvclock_gtod_data; 2795 unsigned long seq; 2796 int mode; 2797 u64 ns; 2798 2799 do { 2800 seq = read_seqcount_begin(>od->seq); 2801 ns = gtod->raw_clock.base_cycles; 2802 ns += vgettsc(>od->raw_clock, tsc_timestamp, &mode); 2803 ns >>= gtod->raw_clock.shift; 2804 ns += ktime_to_ns(ktime_add(gtod->raw_clock.offset, gtod->offs_boot)); 2805 } while (unlikely(read_seqcount_retry(>od->seq, seq))); 2806 *t = ns; 2807 2808 return mode; 2809 } 2810 2811 static int do_realtime(struct timespec64 *ts, u64 *tsc_timestamp) 2812 { 2813 struct pvclock_gtod_data *gtod = &pvclock_gtod_data; 2814 unsigned long seq; 2815 int mode; 2816 u64 ns; 2817 2818 do { 2819 seq = read_seqcount_begin(>od->seq); 2820 ts->tv_sec = gtod->wall_time_sec; 2821 ns = gtod->clock.base_cycles; 2822 ns += vgettsc(>od->clock, tsc_timestamp, &mode); 2823 ns >>= gtod->clock.shift; 2824 } while (unlikely(read_seqcount_retry(>od->seq, seq))); 2825 2826 ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); 2827 ts->tv_nsec = ns; 2828 2829 return mode; 2830 } 2831 2832 /* returns true if host is using TSC based clocksource */ 2833 static bool kvm_get_time_and_clockread(s64 *kernel_ns, u64 *tsc_timestamp) 2834 { 2835 /* checked again under seqlock below */ 2836 if (!gtod_is_based_on_tsc(pvclock_gtod_data.clock.vclock_mode)) 2837 return false; 2838 2839 return gtod_is_based_on_tsc(do_monotonic_raw(kernel_ns, 2840 tsc_timestamp)); 2841 } 2842 2843 /* returns true if host is using TSC based clocksource */ 2844 static bool kvm_get_walltime_and_clockread(struct timespec64 *ts, 2845 u64 *tsc_timestamp) 2846 { 2847 /* checked again under seqlock below */ 2848 if (!gtod_is_based_on_tsc(pvclock_gtod_data.clock.vclock_mode)) 2849 return false; 2850 2851 return gtod_is_based_on_tsc(do_realtime(ts, tsc_timestamp)); 2852 } 2853 #endif 2854 2855 /* 2856 * 2857 * Assuming a stable TSC across physical CPUS, and a stable TSC 2858 * across virtual CPUs, the following condition is possible. 2859 * Each numbered line represents an event visible to both 2860 * CPUs at the next numbered event. 2861 * 2862 * "timespecX" represents host monotonic time. "tscX" represents 2863 * RDTSC value. 2864 * 2865 * VCPU0 on CPU0 | VCPU1 on CPU1 2866 * 2867 * 1. read timespec0,tsc0 2868 * 2. | timespec1 = timespec0 + N 2869 * | tsc1 = tsc0 + M 2870 * 3. transition to guest | transition to guest 2871 * 4. ret0 = timespec0 + (rdtsc - tsc0) | 2872 * 5. | ret1 = timespec1 + (rdtsc - tsc1) 2873 * | ret1 = timespec0 + N + (rdtsc - (tsc0 + M)) 2874 * 2875 * Since ret0 update is visible to VCPU1 at time 5, to obey monotonicity: 2876 * 2877 * - ret0 < ret1 2878 * - timespec0 + (rdtsc - tsc0) < timespec0 + N + (rdtsc - (tsc0 + M)) 2879 * ... 2880 * - 0 < N - M => M < N 2881 * 2882 * That is, when timespec0 != timespec1, M < N. Unfortunately that is not 2883 * always the case (the difference between two distinct xtime instances 2884 * might be smaller then the difference between corresponding TSC reads, 2885 * when updating guest vcpus pvclock areas). 2886 * 2887 * To avoid that problem, do not allow visibility of distinct 2888 * system_timestamp/tsc_timestamp values simultaneously: use a master 2889 * copy of host monotonic time values. Update that master copy 2890 * in lockstep. 2891 * 2892 * Rely on synchronization of host TSCs and guest TSCs for monotonicity. 2893 * 2894 */ 2895 2896 static void pvclock_update_vm_gtod_copy(struct kvm *kvm) 2897 { 2898 #ifdef CONFIG_X86_64 2899 struct kvm_arch *ka = &kvm->arch; 2900 int vclock_mode; 2901 bool host_tsc_clocksource, vcpus_matched; 2902 2903 lockdep_assert_held(&kvm->arch.tsc_write_lock); 2904 vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == 2905 atomic_read(&kvm->online_vcpus)); 2906 2907 /* 2908 * If the host uses TSC clock, then passthrough TSC as stable 2909 * to the guest. 2910 */ 2911 host_tsc_clocksource = kvm_get_time_and_clockread( 2912 &ka->master_kernel_ns, 2913 &ka->master_cycle_now); 2914 2915 ka->use_master_clock = host_tsc_clocksource && vcpus_matched 2916 && !ka->backwards_tsc_observed 2917 && !ka->boot_vcpu_runs_old_kvmclock; 2918 2919 if (ka->use_master_clock) 2920 atomic_set(&kvm_guest_has_master_clock, 1); 2921 2922 vclock_mode = pvclock_gtod_data.clock.vclock_mode; 2923 trace_kvm_update_master_clock(ka->use_master_clock, vclock_mode, 2924 vcpus_matched); 2925 #endif 2926 } 2927 2928 static void kvm_make_mclock_inprogress_request(struct kvm *kvm) 2929 { 2930 kvm_make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS); 2931 } 2932 2933 static void __kvm_start_pvclock_update(struct kvm *kvm) 2934 { 2935 raw_spin_lock_irq(&kvm->arch.tsc_write_lock); 2936 write_seqcount_begin(&kvm->arch.pvclock_sc); 2937 } 2938 2939 static void kvm_start_pvclock_update(struct kvm *kvm) 2940 { 2941 kvm_make_mclock_inprogress_request(kvm); 2942 2943 /* no guest entries from this point */ 2944 __kvm_start_pvclock_update(kvm); 2945 } 2946 2947 static void kvm_end_pvclock_update(struct kvm *kvm) 2948 { 2949 struct kvm_arch *ka = &kvm->arch; 2950 struct kvm_vcpu *vcpu; 2951 unsigned long i; 2952 2953 write_seqcount_end(&ka->pvclock_sc); 2954 raw_spin_unlock_irq(&ka->tsc_write_lock); 2955 kvm_for_each_vcpu(i, vcpu, kvm) 2956 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 2957 2958 /* guest entries allowed */ 2959 kvm_for_each_vcpu(i, vcpu, kvm) 2960 kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu); 2961 } 2962 2963 static void kvm_update_masterclock(struct kvm *kvm) 2964 { 2965 kvm_hv_request_tsc_page_update(kvm); 2966 kvm_start_pvclock_update(kvm); 2967 pvclock_update_vm_gtod_copy(kvm); 2968 kvm_end_pvclock_update(kvm); 2969 } 2970 2971 /* Called within read_seqcount_begin/retry for kvm->pvclock_sc. */ 2972 static void __get_kvmclock(struct kvm *kvm, struct kvm_clock_data *data) 2973 { 2974 struct kvm_arch *ka = &kvm->arch; 2975 struct pvclock_vcpu_time_info hv_clock; 2976 2977 /* both __this_cpu_read() and rdtsc() should be on the same cpu */ 2978 get_cpu(); 2979 2980 data->flags = 0; 2981 if (ka->use_master_clock && __this_cpu_read(cpu_tsc_khz)) { 2982 #ifdef CONFIG_X86_64 2983 struct timespec64 ts; 2984 2985 if (kvm_get_walltime_and_clockread(&ts, &data->host_tsc)) { 2986 data->realtime = ts.tv_nsec + NSEC_PER_SEC * ts.tv_sec; 2987 data->flags |= KVM_CLOCK_REALTIME | KVM_CLOCK_HOST_TSC; 2988 } else 2989 #endif 2990 data->host_tsc = rdtsc(); 2991 2992 data->flags |= KVM_CLOCK_TSC_STABLE; 2993 hv_clock.tsc_timestamp = ka->master_cycle_now; 2994 hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset; 2995 kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL, 2996 &hv_clock.tsc_shift, 2997 &hv_clock.tsc_to_system_mul); 2998 data->clock = __pvclock_read_cycles(&hv_clock, data->host_tsc); 2999 } else { 3000 data->clock = get_kvmclock_base_ns() + ka->kvmclock_offset; 3001 } 3002 3003 put_cpu(); 3004 } 3005 3006 static void get_kvmclock(struct kvm *kvm, struct kvm_clock_data *data) 3007 { 3008 struct kvm_arch *ka = &kvm->arch; 3009 unsigned seq; 3010 3011 do { 3012 seq = read_seqcount_begin(&ka->pvclock_sc); 3013 __get_kvmclock(kvm, data); 3014 } while (read_seqcount_retry(&ka->pvclock_sc, seq)); 3015 } 3016 3017 u64 get_kvmclock_ns(struct kvm *kvm) 3018 { 3019 struct kvm_clock_data data; 3020 3021 get_kvmclock(kvm, &data); 3022 return data.clock; 3023 } 3024 3025 static void kvm_setup_guest_pvclock(struct kvm_vcpu *v, 3026 struct gfn_to_pfn_cache *gpc, 3027 unsigned int offset) 3028 { 3029 struct kvm_vcpu_arch *vcpu = &v->arch; 3030 struct pvclock_vcpu_time_info *guest_hv_clock; 3031 unsigned long flags; 3032 3033 read_lock_irqsave(&gpc->lock, flags); 3034 while (!kvm_gfn_to_pfn_cache_check(v->kvm, gpc, gpc->gpa, 3035 offset + sizeof(*guest_hv_clock))) { 3036 read_unlock_irqrestore(&gpc->lock, flags); 3037 3038 if (kvm_gfn_to_pfn_cache_refresh(v->kvm, gpc, gpc->gpa, 3039 offset + sizeof(*guest_hv_clock))) 3040 return; 3041 3042 read_lock_irqsave(&gpc->lock, flags); 3043 } 3044 3045 guest_hv_clock = (void *)(gpc->khva + offset); 3046 3047 /* 3048 * This VCPU is paused, but it's legal for a guest to read another 3049 * VCPU's kvmclock, so we really have to follow the specification where 3050 * it says that version is odd if data is being modified, and even after 3051 * it is consistent. 3052 */ 3053 3054 guest_hv_clock->version = vcpu->hv_clock.version = (guest_hv_clock->version + 1) | 1; 3055 smp_wmb(); 3056 3057 /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */ 3058 vcpu->hv_clock.flags |= (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED); 3059 3060 if (vcpu->pvclock_set_guest_stopped_request) { 3061 vcpu->hv_clock.flags |= PVCLOCK_GUEST_STOPPED; 3062 vcpu->pvclock_set_guest_stopped_request = false; 3063 } 3064 3065 memcpy(guest_hv_clock, &vcpu->hv_clock, sizeof(*guest_hv_clock)); 3066 smp_wmb(); 3067 3068 guest_hv_clock->version = ++vcpu->hv_clock.version; 3069 3070 mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT); 3071 read_unlock_irqrestore(&gpc->lock, flags); 3072 3073 trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock); 3074 } 3075 3076 static int kvm_guest_time_update(struct kvm_vcpu *v) 3077 { 3078 unsigned long flags, tgt_tsc_khz; 3079 unsigned seq; 3080 struct kvm_vcpu_arch *vcpu = &v->arch; 3081 struct kvm_arch *ka = &v->kvm->arch; 3082 s64 kernel_ns; 3083 u64 tsc_timestamp, host_tsc; 3084 u8 pvclock_flags; 3085 bool use_master_clock; 3086 3087 kernel_ns = 0; 3088 host_tsc = 0; 3089 3090 /* 3091 * If the host uses TSC clock, then passthrough TSC as stable 3092 * to the guest. 3093 */ 3094 do { 3095 seq = read_seqcount_begin(&ka->pvclock_sc); 3096 use_master_clock = ka->use_master_clock; 3097 if (use_master_clock) { 3098 host_tsc = ka->master_cycle_now; 3099 kernel_ns = ka->master_kernel_ns; 3100 } 3101 } while (read_seqcount_retry(&ka->pvclock_sc, seq)); 3102 3103 /* Keep irq disabled to prevent changes to the clock */ 3104 local_irq_save(flags); 3105 tgt_tsc_khz = __this_cpu_read(cpu_tsc_khz); 3106 if (unlikely(tgt_tsc_khz == 0)) { 3107 local_irq_restore(flags); 3108 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); 3109 return 1; 3110 } 3111 if (!use_master_clock) { 3112 host_tsc = rdtsc(); 3113 kernel_ns = get_kvmclock_base_ns(); 3114 } 3115 3116 tsc_timestamp = kvm_read_l1_tsc(v, host_tsc); 3117 3118 /* 3119 * We may have to catch up the TSC to match elapsed wall clock 3120 * time for two reasons, even if kvmclock is used. 3121 * 1) CPU could have been running below the maximum TSC rate 3122 * 2) Broken TSC compensation resets the base at each VCPU 3123 * entry to avoid unknown leaps of TSC even when running 3124 * again on the same CPU. This may cause apparent elapsed 3125 * time to disappear, and the guest to stand still or run 3126 * very slowly. 3127 */ 3128 if (vcpu->tsc_catchup) { 3129 u64 tsc = compute_guest_tsc(v, kernel_ns); 3130 if (tsc > tsc_timestamp) { 3131 adjust_tsc_offset_guest(v, tsc - tsc_timestamp); 3132 tsc_timestamp = tsc; 3133 } 3134 } 3135 3136 local_irq_restore(flags); 3137 3138 /* With all the info we got, fill in the values */ 3139 3140 if (kvm_caps.has_tsc_control) 3141 tgt_tsc_khz = kvm_scale_tsc(tgt_tsc_khz, 3142 v->arch.l1_tsc_scaling_ratio); 3143 3144 if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) { 3145 kvm_get_time_scale(NSEC_PER_SEC, tgt_tsc_khz * 1000LL, 3146 &vcpu->hv_clock.tsc_shift, 3147 &vcpu->hv_clock.tsc_to_system_mul); 3148 vcpu->hw_tsc_khz = tgt_tsc_khz; 3149 } 3150 3151 vcpu->hv_clock.tsc_timestamp = tsc_timestamp; 3152 vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset; 3153 vcpu->last_guest_tsc = tsc_timestamp; 3154 3155 /* If the host uses TSC clocksource, then it is stable */ 3156 pvclock_flags = 0; 3157 if (use_master_clock) 3158 pvclock_flags |= PVCLOCK_TSC_STABLE_BIT; 3159 3160 vcpu->hv_clock.flags = pvclock_flags; 3161 3162 if (vcpu->pv_time.active) 3163 kvm_setup_guest_pvclock(v, &vcpu->pv_time, 0); 3164 if (vcpu->xen.vcpu_info_cache.active) 3165 kvm_setup_guest_pvclock(v, &vcpu->xen.vcpu_info_cache, 3166 offsetof(struct compat_vcpu_info, time)); 3167 if (vcpu->xen.vcpu_time_info_cache.active) 3168 kvm_setup_guest_pvclock(v, &vcpu->xen.vcpu_time_info_cache, 0); 3169 kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock); 3170 return 0; 3171 } 3172 3173 /* 3174 * kvmclock updates which are isolated to a given vcpu, such as 3175 * vcpu->cpu migration, should not allow system_timestamp from 3176 * the rest of the vcpus to remain static. Otherwise ntp frequency 3177 * correction applies to one vcpu's system_timestamp but not 3178 * the others. 3179 * 3180 * So in those cases, request a kvmclock update for all vcpus. 3181 * We need to rate-limit these requests though, as they can 3182 * considerably slow guests that have a large number of vcpus. 3183 * The time for a remote vcpu to update its kvmclock is bound 3184 * by the delay we use to rate-limit the updates. 3185 */ 3186 3187 #define KVMCLOCK_UPDATE_DELAY msecs_to_jiffies(100) 3188 3189 static void kvmclock_update_fn(struct work_struct *work) 3190 { 3191 unsigned long i; 3192 struct delayed_work *dwork = to_delayed_work(work); 3193 struct kvm_arch *ka = container_of(dwork, struct kvm_arch, 3194 kvmclock_update_work); 3195 struct kvm *kvm = container_of(ka, struct kvm, arch); 3196 struct kvm_vcpu *vcpu; 3197 3198 kvm_for_each_vcpu(i, vcpu, kvm) { 3199 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 3200 kvm_vcpu_kick(vcpu); 3201 } 3202 } 3203 3204 static void kvm_gen_kvmclock_update(struct kvm_vcpu *v) 3205 { 3206 struct kvm *kvm = v->kvm; 3207 3208 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); 3209 schedule_delayed_work(&kvm->arch.kvmclock_update_work, 3210 KVMCLOCK_UPDATE_DELAY); 3211 } 3212 3213 #define KVMCLOCK_SYNC_PERIOD (300 * HZ) 3214 3215 static void kvmclock_sync_fn(struct work_struct *work) 3216 { 3217 struct delayed_work *dwork = to_delayed_work(work); 3218 struct kvm_arch *ka = container_of(dwork, struct kvm_arch, 3219 kvmclock_sync_work); 3220 struct kvm *kvm = container_of(ka, struct kvm, arch); 3221 3222 if (!kvmclock_periodic_sync) 3223 return; 3224 3225 schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0); 3226 schedule_delayed_work(&kvm->arch.kvmclock_sync_work, 3227 KVMCLOCK_SYNC_PERIOD); 3228 } 3229 3230 /* These helpers are safe iff @msr is known to be an MCx bank MSR. */ 3231 static bool is_mci_control_msr(u32 msr) 3232 { 3233 return (msr & 3) == 0; 3234 } 3235 static bool is_mci_status_msr(u32 msr) 3236 { 3237 return (msr & 3) == 1; 3238 } 3239 3240 /* 3241 * On AMD, HWCR[McStatusWrEn] controls whether setting MCi_STATUS results in #GP. 3242 */ 3243 static bool can_set_mci_status(struct kvm_vcpu *vcpu) 3244 { 3245 /* McStatusWrEn enabled? */ 3246 if (guest_cpuid_is_amd_or_hygon(vcpu)) 3247 return !!(vcpu->arch.msr_hwcr & BIT_ULL(18)); 3248 3249 return false; 3250 } 3251 3252 static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 3253 { 3254 u64 mcg_cap = vcpu->arch.mcg_cap; 3255 unsigned bank_num = mcg_cap & 0xff; 3256 u32 msr = msr_info->index; 3257 u64 data = msr_info->data; 3258 u32 offset, last_msr; 3259 3260 switch (msr) { 3261 case MSR_IA32_MCG_STATUS: 3262 vcpu->arch.mcg_status = data; 3263 break; 3264 case MSR_IA32_MCG_CTL: 3265 if (!(mcg_cap & MCG_CTL_P) && 3266 (data || !msr_info->host_initiated)) 3267 return 1; 3268 if (data != 0 && data != ~(u64)0) 3269 return 1; 3270 vcpu->arch.mcg_ctl = data; 3271 break; 3272 case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1: 3273 last_msr = MSR_IA32_MCx_CTL2(bank_num) - 1; 3274 if (msr > last_msr) 3275 return 1; 3276 3277 if (!(mcg_cap & MCG_CMCI_P) && (data || !msr_info->host_initiated)) 3278 return 1; 3279 /* An attempt to write a 1 to a reserved bit raises #GP */ 3280 if (data & ~(MCI_CTL2_CMCI_EN | MCI_CTL2_CMCI_THRESHOLD_MASK)) 3281 return 1; 3282 offset = array_index_nospec(msr - MSR_IA32_MC0_CTL2, 3283 last_msr + 1 - MSR_IA32_MC0_CTL2); 3284 vcpu->arch.mci_ctl2_banks[offset] = data; 3285 break; 3286 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: 3287 last_msr = MSR_IA32_MCx_CTL(bank_num) - 1; 3288 if (msr > last_msr) 3289 return 1; 3290 3291 /* 3292 * Only 0 or all 1s can be written to IA32_MCi_CTL, all other 3293 * values are architecturally undefined. But, some Linux 3294 * kernels clear bit 10 in bank 4 to workaround a BIOS/GART TLB 3295 * issue on AMD K8s, allow bit 10 to be clear when setting all 3296 * other bits in order to avoid an uncaught #GP in the guest. 3297 * 3298 * UNIXWARE clears bit 0 of MC1_CTL to ignore correctable, 3299 * single-bit ECC data errors. 3300 */ 3301 if (is_mci_control_msr(msr) && 3302 data != 0 && (data | (1 << 10) | 1) != ~(u64)0) 3303 return 1; 3304 3305 /* 3306 * All CPUs allow writing 0 to MCi_STATUS MSRs to clear the MSR. 3307 * AMD-based CPUs allow non-zero values, but if and only if 3308 * HWCR[McStatusWrEn] is set. 3309 */ 3310 if (!msr_info->host_initiated && is_mci_status_msr(msr) && 3311 data != 0 && !can_set_mci_status(vcpu)) 3312 return 1; 3313 3314 offset = array_index_nospec(msr - MSR_IA32_MC0_CTL, 3315 last_msr + 1 - MSR_IA32_MC0_CTL); 3316 vcpu->arch.mce_banks[offset] = data; 3317 break; 3318 default: 3319 return 1; 3320 } 3321 return 0; 3322 } 3323 3324 static inline bool kvm_pv_async_pf_enabled(struct kvm_vcpu *vcpu) 3325 { 3326 u64 mask = KVM_ASYNC_PF_ENABLED | KVM_ASYNC_PF_DELIVERY_AS_INT; 3327 3328 return (vcpu->arch.apf.msr_en_val & mask) == mask; 3329 } 3330 3331 static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) 3332 { 3333 gpa_t gpa = data & ~0x3f; 3334 3335 /* Bits 4:5 are reserved, Should be zero */ 3336 if (data & 0x30) 3337 return 1; 3338 3339 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_VMEXIT) && 3340 (data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT)) 3341 return 1; 3342 3343 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT) && 3344 (data & KVM_ASYNC_PF_DELIVERY_AS_INT)) 3345 return 1; 3346 3347 if (!lapic_in_kernel(vcpu)) 3348 return data ? 1 : 0; 3349 3350 vcpu->arch.apf.msr_en_val = data; 3351 3352 if (!kvm_pv_async_pf_enabled(vcpu)) { 3353 kvm_clear_async_pf_completion_queue(vcpu); 3354 kvm_async_pf_hash_reset(vcpu); 3355 return 0; 3356 } 3357 3358 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, 3359 sizeof(u64))) 3360 return 1; 3361 3362 vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); 3363 vcpu->arch.apf.delivery_as_pf_vmexit = data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT; 3364 3365 kvm_async_pf_wakeup_all(vcpu); 3366 3367 return 0; 3368 } 3369 3370 static int kvm_pv_enable_async_pf_int(struct kvm_vcpu *vcpu, u64 data) 3371 { 3372 /* Bits 8-63 are reserved */ 3373 if (data >> 8) 3374 return 1; 3375 3376 if (!lapic_in_kernel(vcpu)) 3377 return 1; 3378 3379 vcpu->arch.apf.msr_int_val = data; 3380 3381 vcpu->arch.apf.vec = data & KVM_ASYNC_PF_VEC_MASK; 3382 3383 return 0; 3384 } 3385 3386 static void kvmclock_reset(struct kvm_vcpu *vcpu) 3387 { 3388 kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.pv_time); 3389 vcpu->arch.time = 0; 3390 } 3391 3392 static void kvm_vcpu_flush_tlb_all(struct kvm_vcpu *vcpu) 3393 { 3394 ++vcpu->stat.tlb_flush; 3395 static_call(kvm_x86_flush_tlb_all)(vcpu); 3396 } 3397 3398 static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu) 3399 { 3400 ++vcpu->stat.tlb_flush; 3401 3402 if (!tdp_enabled) { 3403 /* 3404 * A TLB flush on behalf of the guest is equivalent to 3405 * INVPCID(all), toggling CR4.PGE, etc., which requires 3406 * a forced sync of the shadow page tables. Ensure all the 3407 * roots are synced and the guest TLB in hardware is clean. 3408 */ 3409 kvm_mmu_sync_roots(vcpu); 3410 kvm_mmu_sync_prev_roots(vcpu); 3411 } 3412 3413 static_call(kvm_x86_flush_tlb_guest)(vcpu); 3414 } 3415 3416 3417 static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu) 3418 { 3419 ++vcpu->stat.tlb_flush; 3420 static_call(kvm_x86_flush_tlb_current)(vcpu); 3421 } 3422 3423 /* 3424 * Service "local" TLB flush requests, which are specific to the current MMU 3425 * context. In addition to the generic event handling in vcpu_enter_guest(), 3426 * TLB flushes that are targeted at an MMU context also need to be serviced 3427 * prior before nested VM-Enter/VM-Exit. 3428 */ 3429 void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu) 3430 { 3431 if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu)) 3432 kvm_vcpu_flush_tlb_current(vcpu); 3433 3434 if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu)) 3435 kvm_vcpu_flush_tlb_guest(vcpu); 3436 } 3437 EXPORT_SYMBOL_GPL(kvm_service_local_tlb_flush_requests); 3438 3439 static void record_steal_time(struct kvm_vcpu *vcpu) 3440 { 3441 struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache; 3442 struct kvm_steal_time __user *st; 3443 struct kvm_memslots *slots; 3444 gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS; 3445 u64 steal; 3446 u32 version; 3447 3448 if (kvm_xen_msr_enabled(vcpu->kvm)) { 3449 kvm_xen_runstate_set_running(vcpu); 3450 return; 3451 } 3452 3453 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) 3454 return; 3455 3456 if (WARN_ON_ONCE(current->mm != vcpu->kvm->mm)) 3457 return; 3458 3459 slots = kvm_memslots(vcpu->kvm); 3460 3461 if (unlikely(slots->generation != ghc->generation || 3462 gpa != ghc->gpa || 3463 kvm_is_error_hva(ghc->hva) || !ghc->memslot)) { 3464 /* We rely on the fact that it fits in a single page. */ 3465 BUILD_BUG_ON((sizeof(*st) - 1) & KVM_STEAL_VALID_BITS); 3466 3467 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st)) || 3468 kvm_is_error_hva(ghc->hva) || !ghc->memslot) 3469 return; 3470 } 3471 3472 st = (struct kvm_steal_time __user *)ghc->hva; 3473 /* 3474 * Doing a TLB flush here, on the guest's behalf, can avoid 3475 * expensive IPIs. 3476 */ 3477 if (guest_pv_has(vcpu, KVM_FEATURE_PV_TLB_FLUSH)) { 3478 u8 st_preempted = 0; 3479 int err = -EFAULT; 3480 3481 if (!user_access_begin(st, sizeof(*st))) 3482 return; 3483 3484 asm volatile("1: xchgb %0, %2\n" 3485 "xor %1, %1\n" 3486 "2:\n" 3487 _ASM_EXTABLE_UA(1b, 2b) 3488 : "+q" (st_preempted), 3489 "+&r" (err), 3490 "+m" (st->preempted)); 3491 if (err) 3492 goto out; 3493 3494 user_access_end(); 3495 3496 vcpu->arch.st.preempted = 0; 3497 3498 trace_kvm_pv_tlb_flush(vcpu->vcpu_id, 3499 st_preempted & KVM_VCPU_FLUSH_TLB); 3500 if (st_preempted & KVM_VCPU_FLUSH_TLB) 3501 kvm_vcpu_flush_tlb_guest(vcpu); 3502 3503 if (!user_access_begin(st, sizeof(*st))) 3504 goto dirty; 3505 } else { 3506 if (!user_access_begin(st, sizeof(*st))) 3507 return; 3508 3509 unsafe_put_user(0, &st->preempted, out); 3510 vcpu->arch.st.preempted = 0; 3511 } 3512 3513 unsafe_get_user(version, &st->version, out); 3514 if (version & 1) 3515 version += 1; /* first time write, random junk */ 3516 3517 version += 1; 3518 unsafe_put_user(version, &st->version, out); 3519 3520 smp_wmb(); 3521 3522 unsafe_get_user(steal, &st->steal, out); 3523 steal += current->sched_info.run_delay - 3524 vcpu->arch.st.last_steal; 3525 vcpu->arch.st.last_steal = current->sched_info.run_delay; 3526 unsafe_put_user(steal, &st->steal, out); 3527 3528 version += 1; 3529 unsafe_put_user(version, &st->version, out); 3530 3531 out: 3532 user_access_end(); 3533 dirty: 3534 mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa)); 3535 } 3536 3537 int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 3538 { 3539 bool pr = false; 3540 u32 msr = msr_info->index; 3541 u64 data = msr_info->data; 3542 3543 if (msr && msr == vcpu->kvm->arch.xen_hvm_config.msr) 3544 return kvm_xen_write_hypercall_page(vcpu, data); 3545 3546 switch (msr) { 3547 case MSR_AMD64_NB_CFG: 3548 case MSR_IA32_UCODE_WRITE: 3549 case MSR_VM_HSAVE_PA: 3550 case MSR_AMD64_PATCH_LOADER: 3551 case MSR_AMD64_BU_CFG2: 3552 case MSR_AMD64_DC_CFG: 3553 case MSR_F15H_EX_CFG: 3554 break; 3555 3556 case MSR_IA32_UCODE_REV: 3557 if (msr_info->host_initiated) 3558 vcpu->arch.microcode_version = data; 3559 break; 3560 case MSR_IA32_ARCH_CAPABILITIES: 3561 if (!msr_info->host_initiated) 3562 return 1; 3563 vcpu->arch.arch_capabilities = data; 3564 break; 3565 case MSR_IA32_PERF_CAPABILITIES: 3566 if (!msr_info->host_initiated) 3567 return 1; 3568 if (data & ~kvm_caps.supported_perf_cap) 3569 return 1; 3570 3571 vcpu->arch.perf_capabilities = data; 3572 kvm_pmu_refresh(vcpu); 3573 return 0; 3574 case MSR_EFER: 3575 return set_efer(vcpu, msr_info); 3576 case MSR_K7_HWCR: 3577 data &= ~(u64)0x40; /* ignore flush filter disable */ 3578 data &= ~(u64)0x100; /* ignore ignne emulation enable */ 3579 data &= ~(u64)0x8; /* ignore TLB cache disable */ 3580 3581 /* Handle McStatusWrEn */ 3582 if (data == BIT_ULL(18)) { 3583 vcpu->arch.msr_hwcr = data; 3584 } else if (data != 0) { 3585 vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n", 3586 data); 3587 return 1; 3588 } 3589 break; 3590 case MSR_FAM10H_MMIO_CONF_BASE: 3591 if (data != 0) { 3592 vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: " 3593 "0x%llx\n", data); 3594 return 1; 3595 } 3596 break; 3597 case 0x200 ... MSR_IA32_MC0_CTL2 - 1: 3598 case MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) ... 0x2ff: 3599 return kvm_mtrr_set_msr(vcpu, msr, data); 3600 case MSR_IA32_APICBASE: 3601 return kvm_set_apic_base(vcpu, msr_info); 3602 case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff: 3603 return kvm_x2apic_msr_write(vcpu, msr, data); 3604 case MSR_IA32_TSC_DEADLINE: 3605 kvm_set_lapic_tscdeadline_msr(vcpu, data); 3606 break; 3607 case MSR_IA32_TSC_ADJUST: 3608 if (guest_cpuid_has(vcpu, X86_FEATURE_TSC_ADJUST)) { 3609 if (!msr_info->host_initiated) { 3610 s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr; 3611 adjust_tsc_offset_guest(vcpu, adj); 3612 /* Before back to guest, tsc_timestamp must be adjusted 3613 * as well, otherwise guest's percpu pvclock time could jump. 3614 */ 3615 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 3616 } 3617 vcpu->arch.ia32_tsc_adjust_msr = data; 3618 } 3619 break; 3620 case MSR_IA32_MISC_ENABLE: { 3621 u64 old_val = vcpu->arch.ia32_misc_enable_msr; 3622 3623 if (!msr_info->host_initiated) { 3624 /* RO bits */ 3625 if ((old_val ^ data) & MSR_IA32_MISC_ENABLE_PMU_RO_MASK) 3626 return 1; 3627 3628 /* R bits, i.e. writes are ignored, but don't fault. */ 3629 data = data & ~MSR_IA32_MISC_ENABLE_EMON; 3630 data |= old_val & MSR_IA32_MISC_ENABLE_EMON; 3631 } 3632 3633 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT) && 3634 ((old_val ^ data) & MSR_IA32_MISC_ENABLE_MWAIT)) { 3635 if (!guest_cpuid_has(vcpu, X86_FEATURE_XMM3)) 3636 return 1; 3637 vcpu->arch.ia32_misc_enable_msr = data; 3638 kvm_update_cpuid_runtime(vcpu); 3639 } else { 3640 vcpu->arch.ia32_misc_enable_msr = data; 3641 } 3642 break; 3643 } 3644 case MSR_IA32_SMBASE: 3645 if (!IS_ENABLED(CONFIG_KVM_SMM) || !msr_info->host_initiated) 3646 return 1; 3647 vcpu->arch.smbase = data; 3648 break; 3649 case MSR_IA32_POWER_CTL: 3650 vcpu->arch.msr_ia32_power_ctl = data; 3651 break; 3652 case MSR_IA32_TSC: 3653 if (msr_info->host_initiated) { 3654 kvm_synchronize_tsc(vcpu, data); 3655 } else { 3656 u64 adj = kvm_compute_l1_tsc_offset(vcpu, data) - vcpu->arch.l1_tsc_offset; 3657 adjust_tsc_offset_guest(vcpu, adj); 3658 vcpu->arch.ia32_tsc_adjust_msr += adj; 3659 } 3660 break; 3661 case MSR_IA32_XSS: 3662 if (!msr_info->host_initiated && 3663 !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)) 3664 return 1; 3665 /* 3666 * KVM supports exposing PT to the guest, but does not support 3667 * IA32_XSS[bit 8]. Guests have to use RDMSR/WRMSR rather than 3668 * XSAVES/XRSTORS to save/restore PT MSRs. 3669 */ 3670 if (data & ~kvm_caps.supported_xss) 3671 return 1; 3672 vcpu->arch.ia32_xss = data; 3673 kvm_update_cpuid_runtime(vcpu); 3674 break; 3675 case MSR_SMI_COUNT: 3676 if (!msr_info->host_initiated) 3677 return 1; 3678 vcpu->arch.smi_count = data; 3679 break; 3680 case MSR_KVM_WALL_CLOCK_NEW: 3681 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) 3682 return 1; 3683 3684 vcpu->kvm->arch.wall_clock = data; 3685 kvm_write_wall_clock(vcpu->kvm, data, 0); 3686 break; 3687 case MSR_KVM_WALL_CLOCK: 3688 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) 3689 return 1; 3690 3691 vcpu->kvm->arch.wall_clock = data; 3692 kvm_write_wall_clock(vcpu->kvm, data, 0); 3693 break; 3694 case MSR_KVM_SYSTEM_TIME_NEW: 3695 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) 3696 return 1; 3697 3698 kvm_write_system_time(vcpu, data, false, msr_info->host_initiated); 3699 break; 3700 case MSR_KVM_SYSTEM_TIME: 3701 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) 3702 return 1; 3703 3704 kvm_write_system_time(vcpu, data, true, msr_info->host_initiated); 3705 break; 3706 case MSR_KVM_ASYNC_PF_EN: 3707 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF)) 3708 return 1; 3709 3710 if (kvm_pv_enable_async_pf(vcpu, data)) 3711 return 1; 3712 break; 3713 case MSR_KVM_ASYNC_PF_INT: 3714 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) 3715 return 1; 3716 3717 if (kvm_pv_enable_async_pf_int(vcpu, data)) 3718 return 1; 3719 break; 3720 case MSR_KVM_ASYNC_PF_ACK: 3721 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) 3722 return 1; 3723 if (data & 0x1) { 3724 vcpu->arch.apf.pageready_pending = false; 3725 kvm_check_async_pf_completion(vcpu); 3726 } 3727 break; 3728 case MSR_KVM_STEAL_TIME: 3729 if (!guest_pv_has(vcpu, KVM_FEATURE_STEAL_TIME)) 3730 return 1; 3731 3732 if (unlikely(!sched_info_on())) 3733 return 1; 3734 3735 if (data & KVM_STEAL_RESERVED_MASK) 3736 return 1; 3737 3738 vcpu->arch.st.msr_val = data; 3739 3740 if (!(data & KVM_MSR_ENABLED)) 3741 break; 3742 3743 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); 3744 3745 break; 3746 case MSR_KVM_PV_EOI_EN: 3747 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_EOI)) 3748 return 1; 3749 3750 if (kvm_lapic_set_pv_eoi(vcpu, data, sizeof(u8))) 3751 return 1; 3752 break; 3753 3754 case MSR_KVM_POLL_CONTROL: 3755 if (!guest_pv_has(vcpu, KVM_FEATURE_POLL_CONTROL)) 3756 return 1; 3757 3758 /* only enable bit supported */ 3759 if (data & (-1ULL << 1)) 3760 return 1; 3761 3762 vcpu->arch.msr_kvm_poll_control = data; 3763 break; 3764 3765 case MSR_IA32_MCG_CTL: 3766 case MSR_IA32_MCG_STATUS: 3767 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: 3768 case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1: 3769 return set_msr_mce(vcpu, msr_info); 3770 3771 case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3: 3772 case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1: 3773 pr = true; 3774 fallthrough; 3775 case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3: 3776 case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1: 3777 if (kvm_pmu_is_valid_msr(vcpu, msr)) 3778 return kvm_pmu_set_msr(vcpu, msr_info); 3779 3780 if (pr || data != 0) 3781 vcpu_unimpl(vcpu, "disabled perfctr wrmsr: " 3782 "0x%x data 0x%llx\n", msr, data); 3783 break; 3784 case MSR_K7_CLK_CTL: 3785 /* 3786 * Ignore all writes to this no longer documented MSR. 3787 * Writes are only relevant for old K7 processors, 3788 * all pre-dating SVM, but a recommended workaround from 3789 * AMD for these chips. It is possible to specify the 3790 * affected processor models on the command line, hence 3791 * the need to ignore the workaround. 3792 */ 3793 break; 3794 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: 3795 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER: 3796 case HV_X64_MSR_SYNDBG_OPTIONS: 3797 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: 3798 case HV_X64_MSR_CRASH_CTL: 3799 case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT: 3800 case HV_X64_MSR_REENLIGHTENMENT_CONTROL: 3801 case HV_X64_MSR_TSC_EMULATION_CONTROL: 3802 case HV_X64_MSR_TSC_EMULATION_STATUS: 3803 return kvm_hv_set_msr_common(vcpu, msr, data, 3804 msr_info->host_initiated); 3805 case MSR_IA32_BBL_CR_CTL3: 3806 /* Drop writes to this legacy MSR -- see rdmsr 3807 * counterpart for further detail. 3808 */ 3809 if (report_ignored_msrs) 3810 vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n", 3811 msr, data); 3812 break; 3813 case MSR_AMD64_OSVW_ID_LENGTH: 3814 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) 3815 return 1; 3816 vcpu->arch.osvw.length = data; 3817 break; 3818 case MSR_AMD64_OSVW_STATUS: 3819 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) 3820 return 1; 3821 vcpu->arch.osvw.status = data; 3822 break; 3823 case MSR_PLATFORM_INFO: 3824 if (!msr_info->host_initiated || 3825 (!(data & MSR_PLATFORM_INFO_CPUID_FAULT) && 3826 cpuid_fault_enabled(vcpu))) 3827 return 1; 3828 vcpu->arch.msr_platform_info = data; 3829 break; 3830 case MSR_MISC_FEATURES_ENABLES: 3831 if (data & ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT || 3832 (data & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT && 3833 !supports_cpuid_fault(vcpu))) 3834 return 1; 3835 vcpu->arch.msr_misc_features_enables = data; 3836 break; 3837 #ifdef CONFIG_X86_64 3838 case MSR_IA32_XFD: 3839 if (!msr_info->host_initiated && 3840 !guest_cpuid_has(vcpu, X86_FEATURE_XFD)) 3841 return 1; 3842 3843 if (data & ~kvm_guest_supported_xfd(vcpu)) 3844 return 1; 3845 3846 fpu_update_guest_xfd(&vcpu->arch.guest_fpu, data); 3847 break; 3848 case MSR_IA32_XFD_ERR: 3849 if (!msr_info->host_initiated && 3850 !guest_cpuid_has(vcpu, X86_FEATURE_XFD)) 3851 return 1; 3852 3853 if (data & ~kvm_guest_supported_xfd(vcpu)) 3854 return 1; 3855 3856 vcpu->arch.guest_fpu.xfd_err = data; 3857 break; 3858 #endif 3859 case MSR_IA32_PEBS_ENABLE: 3860 case MSR_IA32_DS_AREA: 3861 case MSR_PEBS_DATA_CFG: 3862 case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5: 3863 if (kvm_pmu_is_valid_msr(vcpu, msr)) 3864 return kvm_pmu_set_msr(vcpu, msr_info); 3865 /* 3866 * Userspace is allowed to write '0' to MSRs that KVM reports 3867 * as to-be-saved, even if an MSRs isn't fully supported. 3868 */ 3869 return !msr_info->host_initiated || data; 3870 default: 3871 if (kvm_pmu_is_valid_msr(vcpu, msr)) 3872 return kvm_pmu_set_msr(vcpu, msr_info); 3873 return KVM_MSR_RET_INVALID; 3874 } 3875 return 0; 3876 } 3877 EXPORT_SYMBOL_GPL(kvm_set_msr_common); 3878 3879 static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) 3880 { 3881 u64 data; 3882 u64 mcg_cap = vcpu->arch.mcg_cap; 3883 unsigned bank_num = mcg_cap & 0xff; 3884 u32 offset, last_msr; 3885 3886 switch (msr) { 3887 case MSR_IA32_P5_MC_ADDR: 3888 case MSR_IA32_P5_MC_TYPE: 3889 data = 0; 3890 break; 3891 case MSR_IA32_MCG_CAP: 3892 data = vcpu->arch.mcg_cap; 3893 break; 3894 case MSR_IA32_MCG_CTL: 3895 if (!(mcg_cap & MCG_CTL_P) && !host) 3896 return 1; 3897 data = vcpu->arch.mcg_ctl; 3898 break; 3899 case MSR_IA32_MCG_STATUS: 3900 data = vcpu->arch.mcg_status; 3901 break; 3902 case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1: 3903 last_msr = MSR_IA32_MCx_CTL2(bank_num) - 1; 3904 if (msr > last_msr) 3905 return 1; 3906 3907 if (!(mcg_cap & MCG_CMCI_P) && !host) 3908 return 1; 3909 offset = array_index_nospec(msr - MSR_IA32_MC0_CTL2, 3910 last_msr + 1 - MSR_IA32_MC0_CTL2); 3911 data = vcpu->arch.mci_ctl2_banks[offset]; 3912 break; 3913 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: 3914 last_msr = MSR_IA32_MCx_CTL(bank_num) - 1; 3915 if (msr > last_msr) 3916 return 1; 3917 3918 offset = array_index_nospec(msr - MSR_IA32_MC0_CTL, 3919 last_msr + 1 - MSR_IA32_MC0_CTL); 3920 data = vcpu->arch.mce_banks[offset]; 3921 break; 3922 default: 3923 return 1; 3924 } 3925 *pdata = data; 3926 return 0; 3927 } 3928 3929 int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 3930 { 3931 switch (msr_info->index) { 3932 case MSR_IA32_PLATFORM_ID: 3933 case MSR_IA32_EBL_CR_POWERON: 3934 case MSR_IA32_LASTBRANCHFROMIP: 3935 case MSR_IA32_LASTBRANCHTOIP: 3936 case MSR_IA32_LASTINTFROMIP: 3937 case MSR_IA32_LASTINTTOIP: 3938 case MSR_AMD64_SYSCFG: 3939 case MSR_K8_TSEG_ADDR: 3940 case MSR_K8_TSEG_MASK: 3941 case MSR_VM_HSAVE_PA: 3942 case MSR_K8_INT_PENDING_MSG: 3943 case MSR_AMD64_NB_CFG: 3944 case MSR_FAM10H_MMIO_CONF_BASE: 3945 case MSR_AMD64_BU_CFG2: 3946 case MSR_IA32_PERF_CTL: 3947 case MSR_AMD64_DC_CFG: 3948 case MSR_F15H_EX_CFG: 3949 /* 3950 * Intel Sandy Bridge CPUs must support the RAPL (running average power 3951 * limit) MSRs. Just return 0, as we do not want to expose the host 3952 * data here. Do not conditionalize this on CPUID, as KVM does not do 3953 * so for existing CPU-specific MSRs. 3954 */ 3955 case MSR_RAPL_POWER_UNIT: 3956 case MSR_PP0_ENERGY_STATUS: /* Power plane 0 (core) */ 3957 case MSR_PP1_ENERGY_STATUS: /* Power plane 1 (graphics uncore) */ 3958 case MSR_PKG_ENERGY_STATUS: /* Total package */ 3959 case MSR_DRAM_ENERGY_STATUS: /* DRAM controller */ 3960 msr_info->data = 0; 3961 break; 3962 case MSR_IA32_PEBS_ENABLE: 3963 case MSR_IA32_DS_AREA: 3964 case MSR_PEBS_DATA_CFG: 3965 case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5: 3966 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) 3967 return kvm_pmu_get_msr(vcpu, msr_info); 3968 /* 3969 * Userspace is allowed to read MSRs that KVM reports as 3970 * to-be-saved, even if an MSR isn't fully supported. 3971 */ 3972 if (!msr_info->host_initiated) 3973 return 1; 3974 msr_info->data = 0; 3975 break; 3976 case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3: 3977 case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3: 3978 case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1: 3979 case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1: 3980 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) 3981 return kvm_pmu_get_msr(vcpu, msr_info); 3982 msr_info->data = 0; 3983 break; 3984 case MSR_IA32_UCODE_REV: 3985 msr_info->data = vcpu->arch.microcode_version; 3986 break; 3987 case MSR_IA32_ARCH_CAPABILITIES: 3988 if (!msr_info->host_initiated && 3989 !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES)) 3990 return 1; 3991 msr_info->data = vcpu->arch.arch_capabilities; 3992 break; 3993 case MSR_IA32_PERF_CAPABILITIES: 3994 if (!msr_info->host_initiated && 3995 !guest_cpuid_has(vcpu, X86_FEATURE_PDCM)) 3996 return 1; 3997 msr_info->data = vcpu->arch.perf_capabilities; 3998 break; 3999 case MSR_IA32_POWER_CTL: 4000 msr_info->data = vcpu->arch.msr_ia32_power_ctl; 4001 break; 4002 case MSR_IA32_TSC: { 4003 /* 4004 * Intel SDM states that MSR_IA32_TSC read adds the TSC offset 4005 * even when not intercepted. AMD manual doesn't explicitly 4006 * state this but appears to behave the same. 4007 * 4008 * On userspace reads and writes, however, we unconditionally 4009 * return L1's TSC value to ensure backwards-compatible 4010 * behavior for migration. 4011 */ 4012 u64 offset, ratio; 4013 4014 if (msr_info->host_initiated) { 4015 offset = vcpu->arch.l1_tsc_offset; 4016 ratio = vcpu->arch.l1_tsc_scaling_ratio; 4017 } else { 4018 offset = vcpu->arch.tsc_offset; 4019 ratio = vcpu->arch.tsc_scaling_ratio; 4020 } 4021 4022 msr_info->data = kvm_scale_tsc(rdtsc(), ratio) + offset; 4023 break; 4024 } 4025 case MSR_MTRRcap: 4026 case 0x200 ... MSR_IA32_MC0_CTL2 - 1: 4027 case MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) ... 0x2ff: 4028 return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data); 4029 case 0xcd: /* fsb frequency */ 4030 msr_info->data = 3; 4031 break; 4032 /* 4033 * MSR_EBC_FREQUENCY_ID 4034 * Conservative value valid for even the basic CPU models. 4035 * Models 0,1: 000 in bits 23:21 indicating a bus speed of 4036 * 100MHz, model 2 000 in bits 18:16 indicating 100MHz, 4037 * and 266MHz for model 3, or 4. Set Core Clock 4038 * Frequency to System Bus Frequency Ratio to 1 (bits 4039 * 31:24) even though these are only valid for CPU 4040 * models > 2, however guests may end up dividing or 4041 * multiplying by zero otherwise. 4042 */ 4043 case MSR_EBC_FREQUENCY_ID: 4044 msr_info->data = 1 << 24; 4045 break; 4046 case MSR_IA32_APICBASE: 4047 msr_info->data = kvm_get_apic_base(vcpu); 4048 break; 4049 case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff: 4050 return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data); 4051 case MSR_IA32_TSC_DEADLINE: 4052 msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu); 4053 break; 4054 case MSR_IA32_TSC_ADJUST: 4055 msr_info->data = (u64)vcpu->arch.ia32_tsc_adjust_msr; 4056 break; 4057 case MSR_IA32_MISC_ENABLE: 4058 msr_info->data = vcpu->arch.ia32_misc_enable_msr; 4059 break; 4060 case MSR_IA32_SMBASE: 4061 if (!IS_ENABLED(CONFIG_KVM_SMM) || !msr_info->host_initiated) 4062 return 1; 4063 msr_info->data = vcpu->arch.smbase; 4064 break; 4065 case MSR_SMI_COUNT: 4066 msr_info->data = vcpu->arch.smi_count; 4067 break; 4068 case MSR_IA32_PERF_STATUS: 4069 /* TSC increment by tick */ 4070 msr_info->data = 1000ULL; 4071 /* CPU multiplier */ 4072 msr_info->data |= (((uint64_t)4ULL) << 40); 4073 break; 4074 case MSR_EFER: 4075 msr_info->data = vcpu->arch.efer; 4076 break; 4077 case MSR_KVM_WALL_CLOCK: 4078 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) 4079 return 1; 4080 4081 msr_info->data = vcpu->kvm->arch.wall_clock; 4082 break; 4083 case MSR_KVM_WALL_CLOCK_NEW: 4084 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) 4085 return 1; 4086 4087 msr_info->data = vcpu->kvm->arch.wall_clock; 4088 break; 4089 case MSR_KVM_SYSTEM_TIME: 4090 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) 4091 return 1; 4092 4093 msr_info->data = vcpu->arch.time; 4094 break; 4095 case MSR_KVM_SYSTEM_TIME_NEW: 4096 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) 4097 return 1; 4098 4099 msr_info->data = vcpu->arch.time; 4100 break; 4101 case MSR_KVM_ASYNC_PF_EN: 4102 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF)) 4103 return 1; 4104 4105 msr_info->data = vcpu->arch.apf.msr_en_val; 4106 break; 4107 case MSR_KVM_ASYNC_PF_INT: 4108 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) 4109 return 1; 4110 4111 msr_info->data = vcpu->arch.apf.msr_int_val; 4112 break; 4113 case MSR_KVM_ASYNC_PF_ACK: 4114 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) 4115 return 1; 4116 4117 msr_info->data = 0; 4118 break; 4119 case MSR_KVM_STEAL_TIME: 4120 if (!guest_pv_has(vcpu, KVM_FEATURE_STEAL_TIME)) 4121 return 1; 4122 4123 msr_info->data = vcpu->arch.st.msr_val; 4124 break; 4125 case MSR_KVM_PV_EOI_EN: 4126 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_EOI)) 4127 return 1; 4128 4129 msr_info->data = vcpu->arch.pv_eoi.msr_val; 4130 break; 4131 case MSR_KVM_POLL_CONTROL: 4132 if (!guest_pv_has(vcpu, KVM_FEATURE_POLL_CONTROL)) 4133 return 1; 4134 4135 msr_info->data = vcpu->arch.msr_kvm_poll_control; 4136 break; 4137 case MSR_IA32_P5_MC_ADDR: 4138 case MSR_IA32_P5_MC_TYPE: 4139 case MSR_IA32_MCG_CAP: 4140 case MSR_IA32_MCG_CTL: 4141 case MSR_IA32_MCG_STATUS: 4142 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: 4143 case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1: 4144 return get_msr_mce(vcpu, msr_info->index, &msr_info->data, 4145 msr_info->host_initiated); 4146 case MSR_IA32_XSS: 4147 if (!msr_info->host_initiated && 4148 !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)) 4149 return 1; 4150 msr_info->data = vcpu->arch.ia32_xss; 4151 break; 4152 case MSR_K7_CLK_CTL: 4153 /* 4154 * Provide expected ramp-up count for K7. All other 4155 * are set to zero, indicating minimum divisors for 4156 * every field. 4157 * 4158 * This prevents guest kernels on AMD host with CPU 4159 * type 6, model 8 and higher from exploding due to 4160 * the rdmsr failing. 4161 */ 4162 msr_info->data = 0x20000000; 4163 break; 4164 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: 4165 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER: 4166 case HV_X64_MSR_SYNDBG_OPTIONS: 4167 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: 4168 case HV_X64_MSR_CRASH_CTL: 4169 case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT: 4170 case HV_X64_MSR_REENLIGHTENMENT_CONTROL: 4171 case HV_X64_MSR_TSC_EMULATION_CONTROL: 4172 case HV_X64_MSR_TSC_EMULATION_STATUS: 4173 return kvm_hv_get_msr_common(vcpu, 4174 msr_info->index, &msr_info->data, 4175 msr_info->host_initiated); 4176 case MSR_IA32_BBL_CR_CTL3: 4177 /* This legacy MSR exists but isn't fully documented in current 4178 * silicon. It is however accessed by winxp in very narrow 4179 * scenarios where it sets bit #19, itself documented as 4180 * a "reserved" bit. Best effort attempt to source coherent 4181 * read data here should the balance of the register be 4182 * interpreted by the guest: 4183 * 4184 * L2 cache control register 3: 64GB range, 256KB size, 4185 * enabled, latency 0x1, configured 4186 */ 4187 msr_info->data = 0xbe702111; 4188 break; 4189 case MSR_AMD64_OSVW_ID_LENGTH: 4190 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) 4191 return 1; 4192 msr_info->data = vcpu->arch.osvw.length; 4193 break; 4194 case MSR_AMD64_OSVW_STATUS: 4195 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) 4196 return 1; 4197 msr_info->data = vcpu->arch.osvw.status; 4198 break; 4199 case MSR_PLATFORM_INFO: 4200 if (!msr_info->host_initiated && 4201 !vcpu->kvm->arch.guest_can_read_msr_platform_info) 4202 return 1; 4203 msr_info->data = vcpu->arch.msr_platform_info; 4204 break; 4205 case MSR_MISC_FEATURES_ENABLES: 4206 msr_info->data = vcpu->arch.msr_misc_features_enables; 4207 break; 4208 case MSR_K7_HWCR: 4209 msr_info->data = vcpu->arch.msr_hwcr; 4210 break; 4211 #ifdef CONFIG_X86_64 4212 case MSR_IA32_XFD: 4213 if (!msr_info->host_initiated && 4214 !guest_cpuid_has(vcpu, X86_FEATURE_XFD)) 4215 return 1; 4216 4217 msr_info->data = vcpu->arch.guest_fpu.fpstate->xfd; 4218 break; 4219 case MSR_IA32_XFD_ERR: 4220 if (!msr_info->host_initiated && 4221 !guest_cpuid_has(vcpu, X86_FEATURE_XFD)) 4222 return 1; 4223 4224 msr_info->data = vcpu->arch.guest_fpu.xfd_err; 4225 break; 4226 #endif 4227 default: 4228 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) 4229 return kvm_pmu_get_msr(vcpu, msr_info); 4230 return KVM_MSR_RET_INVALID; 4231 } 4232 return 0; 4233 } 4234 EXPORT_SYMBOL_GPL(kvm_get_msr_common); 4235 4236 /* 4237 * Read or write a bunch of msrs. All parameters are kernel addresses. 4238 * 4239 * @return number of msrs set successfully. 4240 */ 4241 static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs, 4242 struct kvm_msr_entry *entries, 4243 int (*do_msr)(struct kvm_vcpu *vcpu, 4244 unsigned index, u64 *data)) 4245 { 4246 int i; 4247 4248 for (i = 0; i < msrs->nmsrs; ++i) 4249 if (do_msr(vcpu, entries[i].index, &entries[i].data)) 4250 break; 4251 4252 return i; 4253 } 4254 4255 /* 4256 * Read or write a bunch of msrs. Parameters are user addresses. 4257 * 4258 * @return number of msrs set successfully. 4259 */ 4260 static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs, 4261 int (*do_msr)(struct kvm_vcpu *vcpu, 4262 unsigned index, u64 *data), 4263 int writeback) 4264 { 4265 struct kvm_msrs msrs; 4266 struct kvm_msr_entry *entries; 4267 int r, n; 4268 unsigned size; 4269 4270 r = -EFAULT; 4271 if (copy_from_user(&msrs, user_msrs, sizeof(msrs))) 4272 goto out; 4273 4274 r = -E2BIG; 4275 if (msrs.nmsrs >= MAX_IO_MSRS) 4276 goto out; 4277 4278 size = sizeof(struct kvm_msr_entry) * msrs.nmsrs; 4279 entries = memdup_user(user_msrs->entries, size); 4280 if (IS_ERR(entries)) { 4281 r = PTR_ERR(entries); 4282 goto out; 4283 } 4284 4285 r = n = __msr_io(vcpu, &msrs, entries, do_msr); 4286 if (r < 0) 4287 goto out_free; 4288 4289 r = -EFAULT; 4290 if (writeback && copy_to_user(user_msrs->entries, entries, size)) 4291 goto out_free; 4292 4293 r = n; 4294 4295 out_free: 4296 kfree(entries); 4297 out: 4298 return r; 4299 } 4300 4301 static inline bool kvm_can_mwait_in_guest(void) 4302 { 4303 return boot_cpu_has(X86_FEATURE_MWAIT) && 4304 !boot_cpu_has_bug(X86_BUG_MONITOR) && 4305 boot_cpu_has(X86_FEATURE_ARAT); 4306 } 4307 4308 static int kvm_ioctl_get_supported_hv_cpuid(struct kvm_vcpu *vcpu, 4309 struct kvm_cpuid2 __user *cpuid_arg) 4310 { 4311 struct kvm_cpuid2 cpuid; 4312 int r; 4313 4314 r = -EFAULT; 4315 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) 4316 return r; 4317 4318 r = kvm_get_hv_cpuid(vcpu, &cpuid, cpuid_arg->entries); 4319 if (r) 4320 return r; 4321 4322 r = -EFAULT; 4323 if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid))) 4324 return r; 4325 4326 return 0; 4327 } 4328 4329 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) 4330 { 4331 int r = 0; 4332 4333 switch (ext) { 4334 case KVM_CAP_IRQCHIP: 4335 case KVM_CAP_HLT: 4336 case KVM_CAP_MMU_SHADOW_CACHE_CONTROL: 4337 case KVM_CAP_SET_TSS_ADDR: 4338 case KVM_CAP_EXT_CPUID: 4339 case KVM_CAP_EXT_EMUL_CPUID: 4340 case KVM_CAP_CLOCKSOURCE: 4341 case KVM_CAP_PIT: 4342 case KVM_CAP_NOP_IO_DELAY: 4343 case KVM_CAP_MP_STATE: 4344 case KVM_CAP_SYNC_MMU: 4345 case KVM_CAP_USER_NMI: 4346 case KVM_CAP_REINJECT_CONTROL: 4347 case KVM_CAP_IRQ_INJECT_STATUS: 4348 case KVM_CAP_IOEVENTFD: 4349 case KVM_CAP_IOEVENTFD_NO_LENGTH: 4350 case KVM_CAP_PIT2: 4351 case KVM_CAP_PIT_STATE2: 4352 case KVM_CAP_SET_IDENTITY_MAP_ADDR: 4353 case KVM_CAP_VCPU_EVENTS: 4354 case KVM_CAP_HYPERV: 4355 case KVM_CAP_HYPERV_VAPIC: 4356 case KVM_CAP_HYPERV_SPIN: 4357 case KVM_CAP_HYPERV_SYNIC: 4358 case KVM_CAP_HYPERV_SYNIC2: 4359 case KVM_CAP_HYPERV_VP_INDEX: 4360 case KVM_CAP_HYPERV_EVENTFD: 4361 case KVM_CAP_HYPERV_TLBFLUSH: 4362 case KVM_CAP_HYPERV_SEND_IPI: 4363 case KVM_CAP_HYPERV_CPUID: 4364 case KVM_CAP_HYPERV_ENFORCE_CPUID: 4365 case KVM_CAP_SYS_HYPERV_CPUID: 4366 case KVM_CAP_PCI_SEGMENT: 4367 case KVM_CAP_DEBUGREGS: 4368 case KVM_CAP_X86_ROBUST_SINGLESTEP: 4369 case KVM_CAP_XSAVE: 4370 case KVM_CAP_ASYNC_PF: 4371 case KVM_CAP_ASYNC_PF_INT: 4372 case KVM_CAP_GET_TSC_KHZ: 4373 case KVM_CAP_KVMCLOCK_CTRL: 4374 case KVM_CAP_READONLY_MEM: 4375 case KVM_CAP_HYPERV_TIME: 4376 case KVM_CAP_IOAPIC_POLARITY_IGNORED: 4377 case KVM_CAP_TSC_DEADLINE_TIMER: 4378 case KVM_CAP_DISABLE_QUIRKS: 4379 case KVM_CAP_SET_BOOT_CPU_ID: 4380 case KVM_CAP_SPLIT_IRQCHIP: 4381 case KVM_CAP_IMMEDIATE_EXIT: 4382 case KVM_CAP_PMU_EVENT_FILTER: 4383 case KVM_CAP_GET_MSR_FEATURES: 4384 case KVM_CAP_MSR_PLATFORM_INFO: 4385 case KVM_CAP_EXCEPTION_PAYLOAD: 4386 case KVM_CAP_X86_TRIPLE_FAULT_EVENT: 4387 case KVM_CAP_SET_GUEST_DEBUG: 4388 case KVM_CAP_LAST_CPU: 4389 case KVM_CAP_X86_USER_SPACE_MSR: 4390 case KVM_CAP_X86_MSR_FILTER: 4391 case KVM_CAP_ENFORCE_PV_FEATURE_CPUID: 4392 #ifdef CONFIG_X86_SGX_KVM 4393 case KVM_CAP_SGX_ATTRIBUTE: 4394 #endif 4395 case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM: 4396 case KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM: 4397 case KVM_CAP_SREGS2: 4398 case KVM_CAP_EXIT_ON_EMULATION_FAILURE: 4399 case KVM_CAP_VCPU_ATTRIBUTES: 4400 case KVM_CAP_SYS_ATTRIBUTES: 4401 case KVM_CAP_VAPIC: 4402 case KVM_CAP_ENABLE_CAP: 4403 case KVM_CAP_VM_DISABLE_NX_HUGE_PAGES: 4404 r = 1; 4405 break; 4406 case KVM_CAP_EXIT_HYPERCALL: 4407 r = KVM_EXIT_HYPERCALL_VALID_MASK; 4408 break; 4409 case KVM_CAP_SET_GUEST_DEBUG2: 4410 return KVM_GUESTDBG_VALID_MASK; 4411 #ifdef CONFIG_KVM_XEN 4412 case KVM_CAP_XEN_HVM: 4413 r = KVM_XEN_HVM_CONFIG_HYPERCALL_MSR | 4414 KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL | 4415 KVM_XEN_HVM_CONFIG_SHARED_INFO | 4416 KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL | 4417 KVM_XEN_HVM_CONFIG_EVTCHN_SEND; 4418 if (sched_info_on()) 4419 r |= KVM_XEN_HVM_CONFIG_RUNSTATE; 4420 break; 4421 #endif 4422 case KVM_CAP_SYNC_REGS: 4423 r = KVM_SYNC_X86_VALID_FIELDS; 4424 break; 4425 case KVM_CAP_ADJUST_CLOCK: 4426 r = KVM_CLOCK_VALID_FLAGS; 4427 break; 4428 case KVM_CAP_X86_DISABLE_EXITS: 4429 r |= KVM_X86_DISABLE_EXITS_HLT | KVM_X86_DISABLE_EXITS_PAUSE | 4430 KVM_X86_DISABLE_EXITS_CSTATE; 4431 if(kvm_can_mwait_in_guest()) 4432 r |= KVM_X86_DISABLE_EXITS_MWAIT; 4433 break; 4434 case KVM_CAP_X86_SMM: 4435 if (!IS_ENABLED(CONFIG_KVM_SMM)) 4436 break; 4437 4438 /* SMBASE is usually relocated above 1M on modern chipsets, 4439 * and SMM handlers might indeed rely on 4G segment limits, 4440 * so do not report SMM to be available if real mode is 4441 * emulated via vm86 mode. Still, do not go to great lengths 4442 * to avoid userspace's usage of the feature, because it is a 4443 * fringe case that is not enabled except via specific settings 4444 * of the module parameters. 4445 */ 4446 r = static_call(kvm_x86_has_emulated_msr)(kvm, MSR_IA32_SMBASE); 4447 break; 4448 case KVM_CAP_NR_VCPUS: 4449 r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS); 4450 break; 4451 case KVM_CAP_MAX_VCPUS: 4452 r = KVM_MAX_VCPUS; 4453 break; 4454 case KVM_CAP_MAX_VCPU_ID: 4455 r = KVM_MAX_VCPU_IDS; 4456 break; 4457 case KVM_CAP_PV_MMU: /* obsolete */ 4458 r = 0; 4459 break; 4460 case KVM_CAP_MCE: 4461 r = KVM_MAX_MCE_BANKS; 4462 break; 4463 case KVM_CAP_XCRS: 4464 r = boot_cpu_has(X86_FEATURE_XSAVE); 4465 break; 4466 case KVM_CAP_TSC_CONTROL: 4467 case KVM_CAP_VM_TSC_CONTROL: 4468 r = kvm_caps.has_tsc_control; 4469 break; 4470 case KVM_CAP_X2APIC_API: 4471 r = KVM_X2APIC_API_VALID_FLAGS; 4472 break; 4473 case KVM_CAP_NESTED_STATE: 4474 r = kvm_x86_ops.nested_ops->get_state ? 4475 kvm_x86_ops.nested_ops->get_state(NULL, NULL, 0) : 0; 4476 break; 4477 case KVM_CAP_HYPERV_DIRECT_TLBFLUSH: 4478 r = kvm_x86_ops.enable_direct_tlbflush != NULL; 4479 break; 4480 case KVM_CAP_HYPERV_ENLIGHTENED_VMCS: 4481 r = kvm_x86_ops.nested_ops->enable_evmcs != NULL; 4482 break; 4483 case KVM_CAP_SMALLER_MAXPHYADDR: 4484 r = (int) allow_smaller_maxphyaddr; 4485 break; 4486 case KVM_CAP_STEAL_TIME: 4487 r = sched_info_on(); 4488 break; 4489 case KVM_CAP_X86_BUS_LOCK_EXIT: 4490 if (kvm_caps.has_bus_lock_exit) 4491 r = KVM_BUS_LOCK_DETECTION_OFF | 4492 KVM_BUS_LOCK_DETECTION_EXIT; 4493 else 4494 r = 0; 4495 break; 4496 case KVM_CAP_XSAVE2: { 4497 u64 guest_perm = xstate_get_guest_group_perm(); 4498 4499 r = xstate_required_size(kvm_caps.supported_xcr0 & guest_perm, false); 4500 if (r < sizeof(struct kvm_xsave)) 4501 r = sizeof(struct kvm_xsave); 4502 break; 4503 } 4504 case KVM_CAP_PMU_CAPABILITY: 4505 r = enable_pmu ? KVM_CAP_PMU_VALID_MASK : 0; 4506 break; 4507 case KVM_CAP_DISABLE_QUIRKS2: 4508 r = KVM_X86_VALID_QUIRKS; 4509 break; 4510 case KVM_CAP_X86_NOTIFY_VMEXIT: 4511 r = kvm_caps.has_notify_vmexit; 4512 break; 4513 default: 4514 break; 4515 } 4516 return r; 4517 } 4518 4519 static inline void __user *kvm_get_attr_addr(struct kvm_device_attr *attr) 4520 { 4521 void __user *uaddr = (void __user*)(unsigned long)attr->addr; 4522 4523 if ((u64)(unsigned long)uaddr != attr->addr) 4524 return ERR_PTR_USR(-EFAULT); 4525 return uaddr; 4526 } 4527 4528 static int kvm_x86_dev_get_attr(struct kvm_device_attr *attr) 4529 { 4530 u64 __user *uaddr = kvm_get_attr_addr(attr); 4531 4532 if (attr->group) 4533 return -ENXIO; 4534 4535 if (IS_ERR(uaddr)) 4536 return PTR_ERR(uaddr); 4537 4538 switch (attr->attr) { 4539 case KVM_X86_XCOMP_GUEST_SUPP: 4540 if (put_user(kvm_caps.supported_xcr0, uaddr)) 4541 return -EFAULT; 4542 return 0; 4543 default: 4544 return -ENXIO; 4545 break; 4546 } 4547 } 4548 4549 static int kvm_x86_dev_has_attr(struct kvm_device_attr *attr) 4550 { 4551 if (attr->group) 4552 return -ENXIO; 4553 4554 switch (attr->attr) { 4555 case KVM_X86_XCOMP_GUEST_SUPP: 4556 return 0; 4557 default: 4558 return -ENXIO; 4559 } 4560 } 4561 4562 long kvm_arch_dev_ioctl(struct file *filp, 4563 unsigned int ioctl, unsigned long arg) 4564 { 4565 void __user *argp = (void __user *)arg; 4566 long r; 4567 4568 switch (ioctl) { 4569 case KVM_GET_MSR_INDEX_LIST: { 4570 struct kvm_msr_list __user *user_msr_list = argp; 4571 struct kvm_msr_list msr_list; 4572 unsigned n; 4573 4574 r = -EFAULT; 4575 if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list))) 4576 goto out; 4577 n = msr_list.nmsrs; 4578 msr_list.nmsrs = num_msrs_to_save + num_emulated_msrs; 4579 if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list))) 4580 goto out; 4581 r = -E2BIG; 4582 if (n < msr_list.nmsrs) 4583 goto out; 4584 r = -EFAULT; 4585 if (copy_to_user(user_msr_list->indices, &msrs_to_save, 4586 num_msrs_to_save * sizeof(u32))) 4587 goto out; 4588 if (copy_to_user(user_msr_list->indices + num_msrs_to_save, 4589 &emulated_msrs, 4590 num_emulated_msrs * sizeof(u32))) 4591 goto out; 4592 r = 0; 4593 break; 4594 } 4595 case KVM_GET_SUPPORTED_CPUID: 4596 case KVM_GET_EMULATED_CPUID: { 4597 struct kvm_cpuid2 __user *cpuid_arg = argp; 4598 struct kvm_cpuid2 cpuid; 4599 4600 r = -EFAULT; 4601 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) 4602 goto out; 4603 4604 r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries, 4605 ioctl); 4606 if (r) 4607 goto out; 4608 4609 r = -EFAULT; 4610 if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid))) 4611 goto out; 4612 r = 0; 4613 break; 4614 } 4615 case KVM_X86_GET_MCE_CAP_SUPPORTED: 4616 r = -EFAULT; 4617 if (copy_to_user(argp, &kvm_caps.supported_mce_cap, 4618 sizeof(kvm_caps.supported_mce_cap))) 4619 goto out; 4620 r = 0; 4621 break; 4622 case KVM_GET_MSR_FEATURE_INDEX_LIST: { 4623 struct kvm_msr_list __user *user_msr_list = argp; 4624 struct kvm_msr_list msr_list; 4625 unsigned int n; 4626 4627 r = -EFAULT; 4628 if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list))) 4629 goto out; 4630 n = msr_list.nmsrs; 4631 msr_list.nmsrs = num_msr_based_features; 4632 if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list))) 4633 goto out; 4634 r = -E2BIG; 4635 if (n < msr_list.nmsrs) 4636 goto out; 4637 r = -EFAULT; 4638 if (copy_to_user(user_msr_list->indices, &msr_based_features, 4639 num_msr_based_features * sizeof(u32))) 4640 goto out; 4641 r = 0; 4642 break; 4643 } 4644 case KVM_GET_MSRS: 4645 r = msr_io(NULL, argp, do_get_msr_feature, 1); 4646 break; 4647 case KVM_GET_SUPPORTED_HV_CPUID: 4648 r = kvm_ioctl_get_supported_hv_cpuid(NULL, argp); 4649 break; 4650 case KVM_GET_DEVICE_ATTR: { 4651 struct kvm_device_attr attr; 4652 r = -EFAULT; 4653 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 4654 break; 4655 r = kvm_x86_dev_get_attr(&attr); 4656 break; 4657 } 4658 case KVM_HAS_DEVICE_ATTR: { 4659 struct kvm_device_attr attr; 4660 r = -EFAULT; 4661 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 4662 break; 4663 r = kvm_x86_dev_has_attr(&attr); 4664 break; 4665 } 4666 default: 4667 r = -EINVAL; 4668 break; 4669 } 4670 out: 4671 return r; 4672 } 4673 4674 static void wbinvd_ipi(void *garbage) 4675 { 4676 wbinvd(); 4677 } 4678 4679 static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu) 4680 { 4681 return kvm_arch_has_noncoherent_dma(vcpu->kvm); 4682 } 4683 4684 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 4685 { 4686 /* Address WBINVD may be executed by guest */ 4687 if (need_emulate_wbinvd(vcpu)) { 4688 if (static_call(kvm_x86_has_wbinvd_exit)()) 4689 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); 4690 else if (vcpu->cpu != -1 && vcpu->cpu != cpu) 4691 smp_call_function_single(vcpu->cpu, 4692 wbinvd_ipi, NULL, 1); 4693 } 4694 4695 static_call(kvm_x86_vcpu_load)(vcpu, cpu); 4696 4697 /* Save host pkru register if supported */ 4698 vcpu->arch.host_pkru = read_pkru(); 4699 4700 /* Apply any externally detected TSC adjustments (due to suspend) */ 4701 if (unlikely(vcpu->arch.tsc_offset_adjustment)) { 4702 adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment); 4703 vcpu->arch.tsc_offset_adjustment = 0; 4704 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 4705 } 4706 4707 if (unlikely(vcpu->cpu != cpu) || kvm_check_tsc_unstable()) { 4708 s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 : 4709 rdtsc() - vcpu->arch.last_host_tsc; 4710 if (tsc_delta < 0) 4711 mark_tsc_unstable("KVM discovered backwards TSC"); 4712 4713 if (kvm_check_tsc_unstable()) { 4714 u64 offset = kvm_compute_l1_tsc_offset(vcpu, 4715 vcpu->arch.last_guest_tsc); 4716 kvm_vcpu_write_tsc_offset(vcpu, offset); 4717 vcpu->arch.tsc_catchup = 1; 4718 } 4719 4720 if (kvm_lapic_hv_timer_in_use(vcpu)) 4721 kvm_lapic_restart_hv_timer(vcpu); 4722 4723 /* 4724 * On a host with synchronized TSC, there is no need to update 4725 * kvmclock on vcpu->cpu migration 4726 */ 4727 if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1) 4728 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu); 4729 if (vcpu->cpu != cpu) 4730 kvm_make_request(KVM_REQ_MIGRATE_TIMER, vcpu); 4731 vcpu->cpu = cpu; 4732 } 4733 4734 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); 4735 } 4736 4737 static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) 4738 { 4739 struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache; 4740 struct kvm_steal_time __user *st; 4741 struct kvm_memslots *slots; 4742 static const u8 preempted = KVM_VCPU_PREEMPTED; 4743 gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS; 4744 4745 /* 4746 * The vCPU can be marked preempted if and only if the VM-Exit was on 4747 * an instruction boundary and will not trigger guest emulation of any 4748 * kind (see vcpu_run). Vendor specific code controls (conservatively) 4749 * when this is true, for example allowing the vCPU to be marked 4750 * preempted if and only if the VM-Exit was due to a host interrupt. 4751 */ 4752 if (!vcpu->arch.at_instruction_boundary) { 4753 vcpu->stat.preemption_other++; 4754 return; 4755 } 4756 4757 vcpu->stat.preemption_reported++; 4758 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) 4759 return; 4760 4761 if (vcpu->arch.st.preempted) 4762 return; 4763 4764 /* This happens on process exit */ 4765 if (unlikely(current->mm != vcpu->kvm->mm)) 4766 return; 4767 4768 slots = kvm_memslots(vcpu->kvm); 4769 4770 if (unlikely(slots->generation != ghc->generation || 4771 gpa != ghc->gpa || 4772 kvm_is_error_hva(ghc->hva) || !ghc->memslot)) 4773 return; 4774 4775 st = (struct kvm_steal_time __user *)ghc->hva; 4776 BUILD_BUG_ON(sizeof(st->preempted) != sizeof(preempted)); 4777 4778 if (!copy_to_user_nofault(&st->preempted, &preempted, sizeof(preempted))) 4779 vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED; 4780 4781 mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa)); 4782 } 4783 4784 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 4785 { 4786 int idx; 4787 4788 if (vcpu->preempted) { 4789 if (!vcpu->arch.guest_state_protected) 4790 vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu); 4791 4792 /* 4793 * Take the srcu lock as memslots will be accessed to check the gfn 4794 * cache generation against the memslots generation. 4795 */ 4796 idx = srcu_read_lock(&vcpu->kvm->srcu); 4797 if (kvm_xen_msr_enabled(vcpu->kvm)) 4798 kvm_xen_runstate_set_preempted(vcpu); 4799 else 4800 kvm_steal_time_set_preempted(vcpu); 4801 srcu_read_unlock(&vcpu->kvm->srcu, idx); 4802 } 4803 4804 static_call(kvm_x86_vcpu_put)(vcpu); 4805 vcpu->arch.last_host_tsc = rdtsc(); 4806 } 4807 4808 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, 4809 struct kvm_lapic_state *s) 4810 { 4811 static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); 4812 4813 return kvm_apic_get_state(vcpu, s); 4814 } 4815 4816 static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu, 4817 struct kvm_lapic_state *s) 4818 { 4819 int r; 4820 4821 r = kvm_apic_set_state(vcpu, s); 4822 if (r) 4823 return r; 4824 update_cr8_intercept(vcpu); 4825 4826 return 0; 4827 } 4828 4829 static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu) 4830 { 4831 /* 4832 * We can accept userspace's request for interrupt injection 4833 * as long as we have a place to store the interrupt number. 4834 * The actual injection will happen when the CPU is able to 4835 * deliver the interrupt. 4836 */ 4837 if (kvm_cpu_has_extint(vcpu)) 4838 return false; 4839 4840 /* Acknowledging ExtINT does not happen if LINT0 is masked. */ 4841 return (!lapic_in_kernel(vcpu) || 4842 kvm_apic_accept_pic_intr(vcpu)); 4843 } 4844 4845 static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu) 4846 { 4847 /* 4848 * Do not cause an interrupt window exit if an exception 4849 * is pending or an event needs reinjection; userspace 4850 * might want to inject the interrupt manually using KVM_SET_REGS 4851 * or KVM_SET_SREGS. For that to work, we must be at an 4852 * instruction boundary and with no events half-injected. 4853 */ 4854 return (kvm_arch_interrupt_allowed(vcpu) && 4855 kvm_cpu_accept_dm_intr(vcpu) && 4856 !kvm_event_needs_reinjection(vcpu) && 4857 !kvm_is_exception_pending(vcpu)); 4858 } 4859 4860 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, 4861 struct kvm_interrupt *irq) 4862 { 4863 if (irq->irq >= KVM_NR_INTERRUPTS) 4864 return -EINVAL; 4865 4866 if (!irqchip_in_kernel(vcpu->kvm)) { 4867 kvm_queue_interrupt(vcpu, irq->irq, false); 4868 kvm_make_request(KVM_REQ_EVENT, vcpu); 4869 return 0; 4870 } 4871 4872 /* 4873 * With in-kernel LAPIC, we only use this to inject EXTINT, so 4874 * fail for in-kernel 8259. 4875 */ 4876 if (pic_in_kernel(vcpu->kvm)) 4877 return -ENXIO; 4878 4879 if (vcpu->arch.pending_external_vector != -1) 4880 return -EEXIST; 4881 4882 vcpu->arch.pending_external_vector = irq->irq; 4883 kvm_make_request(KVM_REQ_EVENT, vcpu); 4884 return 0; 4885 } 4886 4887 static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu) 4888 { 4889 kvm_inject_nmi(vcpu); 4890 4891 return 0; 4892 } 4893 4894 static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu, 4895 struct kvm_tpr_access_ctl *tac) 4896 { 4897 if (tac->flags) 4898 return -EINVAL; 4899 vcpu->arch.tpr_access_reporting = !!tac->enabled; 4900 return 0; 4901 } 4902 4903 static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu, 4904 u64 mcg_cap) 4905 { 4906 int r; 4907 unsigned bank_num = mcg_cap & 0xff, bank; 4908 4909 r = -EINVAL; 4910 if (!bank_num || bank_num > KVM_MAX_MCE_BANKS) 4911 goto out; 4912 if (mcg_cap & ~(kvm_caps.supported_mce_cap | 0xff | 0xff0000)) 4913 goto out; 4914 r = 0; 4915 vcpu->arch.mcg_cap = mcg_cap; 4916 /* Init IA32_MCG_CTL to all 1s */ 4917 if (mcg_cap & MCG_CTL_P) 4918 vcpu->arch.mcg_ctl = ~(u64)0; 4919 /* Init IA32_MCi_CTL to all 1s, IA32_MCi_CTL2 to all 0s */ 4920 for (bank = 0; bank < bank_num; bank++) { 4921 vcpu->arch.mce_banks[bank*4] = ~(u64)0; 4922 if (mcg_cap & MCG_CMCI_P) 4923 vcpu->arch.mci_ctl2_banks[bank] = 0; 4924 } 4925 4926 kvm_apic_after_set_mcg_cap(vcpu); 4927 4928 static_call(kvm_x86_setup_mce)(vcpu); 4929 out: 4930 return r; 4931 } 4932 4933 /* 4934 * Validate this is an UCNA (uncorrectable no action) error by checking the 4935 * MCG_STATUS and MCi_STATUS registers: 4936 * - none of the bits for Machine Check Exceptions are set 4937 * - both the VAL (valid) and UC (uncorrectable) bits are set 4938 * MCI_STATUS_PCC - Processor Context Corrupted 4939 * MCI_STATUS_S - Signaled as a Machine Check Exception 4940 * MCI_STATUS_AR - Software recoverable Action Required 4941 */ 4942 static bool is_ucna(struct kvm_x86_mce *mce) 4943 { 4944 return !mce->mcg_status && 4945 !(mce->status & (MCI_STATUS_PCC | MCI_STATUS_S | MCI_STATUS_AR)) && 4946 (mce->status & MCI_STATUS_VAL) && 4947 (mce->status & MCI_STATUS_UC); 4948 } 4949 4950 static int kvm_vcpu_x86_set_ucna(struct kvm_vcpu *vcpu, struct kvm_x86_mce *mce, u64* banks) 4951 { 4952 u64 mcg_cap = vcpu->arch.mcg_cap; 4953 4954 banks[1] = mce->status; 4955 banks[2] = mce->addr; 4956 banks[3] = mce->misc; 4957 vcpu->arch.mcg_status = mce->mcg_status; 4958 4959 if (!(mcg_cap & MCG_CMCI_P) || 4960 !(vcpu->arch.mci_ctl2_banks[mce->bank] & MCI_CTL2_CMCI_EN)) 4961 return 0; 4962 4963 if (lapic_in_kernel(vcpu)) 4964 kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTCMCI); 4965 4966 return 0; 4967 } 4968 4969 static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu, 4970 struct kvm_x86_mce *mce) 4971 { 4972 u64 mcg_cap = vcpu->arch.mcg_cap; 4973 unsigned bank_num = mcg_cap & 0xff; 4974 u64 *banks = vcpu->arch.mce_banks; 4975 4976 if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL)) 4977 return -EINVAL; 4978 4979 banks += array_index_nospec(4 * mce->bank, 4 * bank_num); 4980 4981 if (is_ucna(mce)) 4982 return kvm_vcpu_x86_set_ucna(vcpu, mce, banks); 4983 4984 /* 4985 * if IA32_MCG_CTL is not all 1s, the uncorrected error 4986 * reporting is disabled 4987 */ 4988 if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) && 4989 vcpu->arch.mcg_ctl != ~(u64)0) 4990 return 0; 4991 /* 4992 * if IA32_MCi_CTL is not all 1s, the uncorrected error 4993 * reporting is disabled for the bank 4994 */ 4995 if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0) 4996 return 0; 4997 if (mce->status & MCI_STATUS_UC) { 4998 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) || 4999 !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) { 5000 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 5001 return 0; 5002 } 5003 if (banks[1] & MCI_STATUS_VAL) 5004 mce->status |= MCI_STATUS_OVER; 5005 banks[2] = mce->addr; 5006 banks[3] = mce->misc; 5007 vcpu->arch.mcg_status = mce->mcg_status; 5008 banks[1] = mce->status; 5009 kvm_queue_exception(vcpu, MC_VECTOR); 5010 } else if (!(banks[1] & MCI_STATUS_VAL) 5011 || !(banks[1] & MCI_STATUS_UC)) { 5012 if (banks[1] & MCI_STATUS_VAL) 5013 mce->status |= MCI_STATUS_OVER; 5014 banks[2] = mce->addr; 5015 banks[3] = mce->misc; 5016 banks[1] = mce->status; 5017 } else 5018 banks[1] |= MCI_STATUS_OVER; 5019 return 0; 5020 } 5021 5022 static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, 5023 struct kvm_vcpu_events *events) 5024 { 5025 struct kvm_queued_exception *ex; 5026 5027 process_nmi(vcpu); 5028 5029 if (kvm_check_request(KVM_REQ_SMI, vcpu)) 5030 process_smi(vcpu); 5031 5032 /* 5033 * KVM's ABI only allows for one exception to be migrated. Luckily, 5034 * the only time there can be two queued exceptions is if there's a 5035 * non-exiting _injected_ exception, and a pending exiting exception. 5036 * In that case, ignore the VM-Exiting exception as it's an extension 5037 * of the injected exception. 5038 */ 5039 if (vcpu->arch.exception_vmexit.pending && 5040 !vcpu->arch.exception.pending && 5041 !vcpu->arch.exception.injected) 5042 ex = &vcpu->arch.exception_vmexit; 5043 else 5044 ex = &vcpu->arch.exception; 5045 5046 /* 5047 * In guest mode, payload delivery should be deferred if the exception 5048 * will be intercepted by L1, e.g. KVM should not modifying CR2 if L1 5049 * intercepts #PF, ditto for DR6 and #DBs. If the per-VM capability, 5050 * KVM_CAP_EXCEPTION_PAYLOAD, is not set, userspace may or may not 5051 * propagate the payload and so it cannot be safely deferred. Deliver 5052 * the payload if the capability hasn't been requested. 5053 */ 5054 if (!vcpu->kvm->arch.exception_payload_enabled && 5055 ex->pending && ex->has_payload) 5056 kvm_deliver_exception_payload(vcpu, ex); 5057 5058 /* 5059 * The API doesn't provide the instruction length for software 5060 * exceptions, so don't report them. As long as the guest RIP 5061 * isn't advanced, we should expect to encounter the exception 5062 * again. 5063 */ 5064 if (kvm_exception_is_soft(ex->vector)) { 5065 events->exception.injected = 0; 5066 events->exception.pending = 0; 5067 } else { 5068 events->exception.injected = ex->injected; 5069 events->exception.pending = ex->pending; 5070 /* 5071 * For ABI compatibility, deliberately conflate 5072 * pending and injected exceptions when 5073 * KVM_CAP_EXCEPTION_PAYLOAD isn't enabled. 5074 */ 5075 if (!vcpu->kvm->arch.exception_payload_enabled) 5076 events->exception.injected |= ex->pending; 5077 } 5078 events->exception.nr = ex->vector; 5079 events->exception.has_error_code = ex->has_error_code; 5080 events->exception.error_code = ex->error_code; 5081 events->exception_has_payload = ex->has_payload; 5082 events->exception_payload = ex->payload; 5083 5084 events->interrupt.injected = 5085 vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft; 5086 events->interrupt.nr = vcpu->arch.interrupt.nr; 5087 events->interrupt.soft = 0; 5088 events->interrupt.shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu); 5089 5090 events->nmi.injected = vcpu->arch.nmi_injected; 5091 events->nmi.pending = vcpu->arch.nmi_pending != 0; 5092 events->nmi.masked = static_call(kvm_x86_get_nmi_mask)(vcpu); 5093 events->nmi.pad = 0; 5094 5095 events->sipi_vector = 0; /* never valid when reporting to user space */ 5096 5097 events->smi.smm = is_smm(vcpu); 5098 events->smi.pending = vcpu->arch.smi_pending; 5099 events->smi.smm_inside_nmi = 5100 !!(vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK); 5101 events->smi.latched_init = kvm_lapic_latched_init(vcpu); 5102 5103 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING 5104 | KVM_VCPUEVENT_VALID_SHADOW 5105 | KVM_VCPUEVENT_VALID_SMM); 5106 if (vcpu->kvm->arch.exception_payload_enabled) 5107 events->flags |= KVM_VCPUEVENT_VALID_PAYLOAD; 5108 if (vcpu->kvm->arch.triple_fault_event) { 5109 events->triple_fault.pending = kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu); 5110 events->flags |= KVM_VCPUEVENT_VALID_TRIPLE_FAULT; 5111 } 5112 5113 memset(&events->reserved, 0, sizeof(events->reserved)); 5114 } 5115 5116 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, 5117 struct kvm_vcpu_events *events) 5118 { 5119 if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING 5120 | KVM_VCPUEVENT_VALID_SIPI_VECTOR 5121 | KVM_VCPUEVENT_VALID_SHADOW 5122 | KVM_VCPUEVENT_VALID_SMM 5123 | KVM_VCPUEVENT_VALID_PAYLOAD 5124 | KVM_VCPUEVENT_VALID_TRIPLE_FAULT)) 5125 return -EINVAL; 5126 5127 if (events->flags & KVM_VCPUEVENT_VALID_PAYLOAD) { 5128 if (!vcpu->kvm->arch.exception_payload_enabled) 5129 return -EINVAL; 5130 if (events->exception.pending) 5131 events->exception.injected = 0; 5132 else 5133 events->exception_has_payload = 0; 5134 } else { 5135 events->exception.pending = 0; 5136 events->exception_has_payload = 0; 5137 } 5138 5139 if ((events->exception.injected || events->exception.pending) && 5140 (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR)) 5141 return -EINVAL; 5142 5143 /* INITs are latched while in SMM */ 5144 if (events->flags & KVM_VCPUEVENT_VALID_SMM && 5145 (events->smi.smm || events->smi.pending) && 5146 vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) 5147 return -EINVAL; 5148 5149 process_nmi(vcpu); 5150 5151 /* 5152 * Flag that userspace is stuffing an exception, the next KVM_RUN will 5153 * morph the exception to a VM-Exit if appropriate. Do this only for 5154 * pending exceptions, already-injected exceptions are not subject to 5155 * intercpetion. Note, userspace that conflates pending and injected 5156 * is hosed, and will incorrectly convert an injected exception into a 5157 * pending exception, which in turn may cause a spurious VM-Exit. 5158 */ 5159 vcpu->arch.exception_from_userspace = events->exception.pending; 5160 5161 vcpu->arch.exception_vmexit.pending = false; 5162 5163 vcpu->arch.exception.injected = events->exception.injected; 5164 vcpu->arch.exception.pending = events->exception.pending; 5165 vcpu->arch.exception.vector = events->exception.nr; 5166 vcpu->arch.exception.has_error_code = events->exception.has_error_code; 5167 vcpu->arch.exception.error_code = events->exception.error_code; 5168 vcpu->arch.exception.has_payload = events->exception_has_payload; 5169 vcpu->arch.exception.payload = events->exception_payload; 5170 5171 vcpu->arch.interrupt.injected = events->interrupt.injected; 5172 vcpu->arch.interrupt.nr = events->interrupt.nr; 5173 vcpu->arch.interrupt.soft = events->interrupt.soft; 5174 if (events->flags & KVM_VCPUEVENT_VALID_SHADOW) 5175 static_call(kvm_x86_set_interrupt_shadow)(vcpu, 5176 events->interrupt.shadow); 5177 5178 vcpu->arch.nmi_injected = events->nmi.injected; 5179 if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) 5180 vcpu->arch.nmi_pending = events->nmi.pending; 5181 static_call(kvm_x86_set_nmi_mask)(vcpu, events->nmi.masked); 5182 5183 if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR && 5184 lapic_in_kernel(vcpu)) 5185 vcpu->arch.apic->sipi_vector = events->sipi_vector; 5186 5187 if (events->flags & KVM_VCPUEVENT_VALID_SMM) { 5188 #ifdef CONFIG_KVM_SMM 5189 if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) { 5190 kvm_x86_ops.nested_ops->leave_nested(vcpu); 5191 kvm_smm_changed(vcpu, events->smi.smm); 5192 } 5193 5194 vcpu->arch.smi_pending = events->smi.pending; 5195 5196 if (events->smi.smm) { 5197 if (events->smi.smm_inside_nmi) 5198 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; 5199 else 5200 vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK; 5201 } 5202 5203 #else 5204 if (events->smi.smm || events->smi.pending || 5205 events->smi.smm_inside_nmi) 5206 return -EINVAL; 5207 #endif 5208 5209 if (lapic_in_kernel(vcpu)) { 5210 if (events->smi.latched_init) 5211 set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); 5212 else 5213 clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); 5214 } 5215 } 5216 5217 if (events->flags & KVM_VCPUEVENT_VALID_TRIPLE_FAULT) { 5218 if (!vcpu->kvm->arch.triple_fault_event) 5219 return -EINVAL; 5220 if (events->triple_fault.pending) 5221 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 5222 else 5223 kvm_clear_request(KVM_REQ_TRIPLE_FAULT, vcpu); 5224 } 5225 5226 kvm_make_request(KVM_REQ_EVENT, vcpu); 5227 5228 return 0; 5229 } 5230 5231 static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu, 5232 struct kvm_debugregs *dbgregs) 5233 { 5234 unsigned long val; 5235 5236 memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db)); 5237 kvm_get_dr(vcpu, 6, &val); 5238 dbgregs->dr6 = val; 5239 dbgregs->dr7 = vcpu->arch.dr7; 5240 dbgregs->flags = 0; 5241 memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved)); 5242 } 5243 5244 static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, 5245 struct kvm_debugregs *dbgregs) 5246 { 5247 if (dbgregs->flags) 5248 return -EINVAL; 5249 5250 if (!kvm_dr6_valid(dbgregs->dr6)) 5251 return -EINVAL; 5252 if (!kvm_dr7_valid(dbgregs->dr7)) 5253 return -EINVAL; 5254 5255 memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); 5256 kvm_update_dr0123(vcpu); 5257 vcpu->arch.dr6 = dbgregs->dr6; 5258 vcpu->arch.dr7 = dbgregs->dr7; 5259 kvm_update_dr7(vcpu); 5260 5261 return 0; 5262 } 5263 5264 static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, 5265 struct kvm_xsave *guest_xsave) 5266 { 5267 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) 5268 return; 5269 5270 fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu, 5271 guest_xsave->region, 5272 sizeof(guest_xsave->region), 5273 vcpu->arch.pkru); 5274 } 5275 5276 static void kvm_vcpu_ioctl_x86_get_xsave2(struct kvm_vcpu *vcpu, 5277 u8 *state, unsigned int size) 5278 { 5279 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) 5280 return; 5281 5282 fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu, 5283 state, size, vcpu->arch.pkru); 5284 } 5285 5286 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, 5287 struct kvm_xsave *guest_xsave) 5288 { 5289 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) 5290 return 0; 5291 5292 return fpu_copy_uabi_to_guest_fpstate(&vcpu->arch.guest_fpu, 5293 guest_xsave->region, 5294 kvm_caps.supported_xcr0, 5295 &vcpu->arch.pkru); 5296 } 5297 5298 static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu, 5299 struct kvm_xcrs *guest_xcrs) 5300 { 5301 if (!boot_cpu_has(X86_FEATURE_XSAVE)) { 5302 guest_xcrs->nr_xcrs = 0; 5303 return; 5304 } 5305 5306 guest_xcrs->nr_xcrs = 1; 5307 guest_xcrs->flags = 0; 5308 guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK; 5309 guest_xcrs->xcrs[0].value = vcpu->arch.xcr0; 5310 } 5311 5312 static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu, 5313 struct kvm_xcrs *guest_xcrs) 5314 { 5315 int i, r = 0; 5316 5317 if (!boot_cpu_has(X86_FEATURE_XSAVE)) 5318 return -EINVAL; 5319 5320 if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags) 5321 return -EINVAL; 5322 5323 for (i = 0; i < guest_xcrs->nr_xcrs; i++) 5324 /* Only support XCR0 currently */ 5325 if (guest_xcrs->xcrs[i].xcr == XCR_XFEATURE_ENABLED_MASK) { 5326 r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK, 5327 guest_xcrs->xcrs[i].value); 5328 break; 5329 } 5330 if (r) 5331 r = -EINVAL; 5332 return r; 5333 } 5334 5335 /* 5336 * kvm_set_guest_paused() indicates to the guest kernel that it has been 5337 * stopped by the hypervisor. This function will be called from the host only. 5338 * EINVAL is returned when the host attempts to set the flag for a guest that 5339 * does not support pv clocks. 5340 */ 5341 static int kvm_set_guest_paused(struct kvm_vcpu *vcpu) 5342 { 5343 if (!vcpu->arch.pv_time.active) 5344 return -EINVAL; 5345 vcpu->arch.pvclock_set_guest_stopped_request = true; 5346 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 5347 return 0; 5348 } 5349 5350 static int kvm_arch_tsc_has_attr(struct kvm_vcpu *vcpu, 5351 struct kvm_device_attr *attr) 5352 { 5353 int r; 5354 5355 switch (attr->attr) { 5356 case KVM_VCPU_TSC_OFFSET: 5357 r = 0; 5358 break; 5359 default: 5360 r = -ENXIO; 5361 } 5362 5363 return r; 5364 } 5365 5366 static int kvm_arch_tsc_get_attr(struct kvm_vcpu *vcpu, 5367 struct kvm_device_attr *attr) 5368 { 5369 u64 __user *uaddr = kvm_get_attr_addr(attr); 5370 int r; 5371 5372 if (IS_ERR(uaddr)) 5373 return PTR_ERR(uaddr); 5374 5375 switch (attr->attr) { 5376 case KVM_VCPU_TSC_OFFSET: 5377 r = -EFAULT; 5378 if (put_user(vcpu->arch.l1_tsc_offset, uaddr)) 5379 break; 5380 r = 0; 5381 break; 5382 default: 5383 r = -ENXIO; 5384 } 5385 5386 return r; 5387 } 5388 5389 static int kvm_arch_tsc_set_attr(struct kvm_vcpu *vcpu, 5390 struct kvm_device_attr *attr) 5391 { 5392 u64 __user *uaddr = kvm_get_attr_addr(attr); 5393 struct kvm *kvm = vcpu->kvm; 5394 int r; 5395 5396 if (IS_ERR(uaddr)) 5397 return PTR_ERR(uaddr); 5398 5399 switch (attr->attr) { 5400 case KVM_VCPU_TSC_OFFSET: { 5401 u64 offset, tsc, ns; 5402 unsigned long flags; 5403 bool matched; 5404 5405 r = -EFAULT; 5406 if (get_user(offset, uaddr)) 5407 break; 5408 5409 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); 5410 5411 matched = (vcpu->arch.virtual_tsc_khz && 5412 kvm->arch.last_tsc_khz == vcpu->arch.virtual_tsc_khz && 5413 kvm->arch.last_tsc_offset == offset); 5414 5415 tsc = kvm_scale_tsc(rdtsc(), vcpu->arch.l1_tsc_scaling_ratio) + offset; 5416 ns = get_kvmclock_base_ns(); 5417 5418 __kvm_synchronize_tsc(vcpu, offset, tsc, ns, matched); 5419 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); 5420 5421 r = 0; 5422 break; 5423 } 5424 default: 5425 r = -ENXIO; 5426 } 5427 5428 return r; 5429 } 5430 5431 static int kvm_vcpu_ioctl_device_attr(struct kvm_vcpu *vcpu, 5432 unsigned int ioctl, 5433 void __user *argp) 5434 { 5435 struct kvm_device_attr attr; 5436 int r; 5437 5438 if (copy_from_user(&attr, argp, sizeof(attr))) 5439 return -EFAULT; 5440 5441 if (attr.group != KVM_VCPU_TSC_CTRL) 5442 return -ENXIO; 5443 5444 switch (ioctl) { 5445 case KVM_HAS_DEVICE_ATTR: 5446 r = kvm_arch_tsc_has_attr(vcpu, &attr); 5447 break; 5448 case KVM_GET_DEVICE_ATTR: 5449 r = kvm_arch_tsc_get_attr(vcpu, &attr); 5450 break; 5451 case KVM_SET_DEVICE_ATTR: 5452 r = kvm_arch_tsc_set_attr(vcpu, &attr); 5453 break; 5454 } 5455 5456 return r; 5457 } 5458 5459 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, 5460 struct kvm_enable_cap *cap) 5461 { 5462 int r; 5463 uint16_t vmcs_version; 5464 void __user *user_ptr; 5465 5466 if (cap->flags) 5467 return -EINVAL; 5468 5469 switch (cap->cap) { 5470 case KVM_CAP_HYPERV_SYNIC2: 5471 if (cap->args[0]) 5472 return -EINVAL; 5473 fallthrough; 5474 5475 case KVM_CAP_HYPERV_SYNIC: 5476 if (!irqchip_in_kernel(vcpu->kvm)) 5477 return -EINVAL; 5478 return kvm_hv_activate_synic(vcpu, cap->cap == 5479 KVM_CAP_HYPERV_SYNIC2); 5480 case KVM_CAP_HYPERV_ENLIGHTENED_VMCS: 5481 if (!kvm_x86_ops.nested_ops->enable_evmcs) 5482 return -ENOTTY; 5483 r = kvm_x86_ops.nested_ops->enable_evmcs(vcpu, &vmcs_version); 5484 if (!r) { 5485 user_ptr = (void __user *)(uintptr_t)cap->args[0]; 5486 if (copy_to_user(user_ptr, &vmcs_version, 5487 sizeof(vmcs_version))) 5488 r = -EFAULT; 5489 } 5490 return r; 5491 case KVM_CAP_HYPERV_DIRECT_TLBFLUSH: 5492 if (!kvm_x86_ops.enable_direct_tlbflush) 5493 return -ENOTTY; 5494 5495 return static_call(kvm_x86_enable_direct_tlbflush)(vcpu); 5496 5497 case KVM_CAP_HYPERV_ENFORCE_CPUID: 5498 return kvm_hv_set_enforce_cpuid(vcpu, cap->args[0]); 5499 5500 case KVM_CAP_ENFORCE_PV_FEATURE_CPUID: 5501 vcpu->arch.pv_cpuid.enforce = cap->args[0]; 5502 if (vcpu->arch.pv_cpuid.enforce) 5503 kvm_update_pv_runtime(vcpu); 5504 5505 return 0; 5506 default: 5507 return -EINVAL; 5508 } 5509 } 5510 5511 long kvm_arch_vcpu_ioctl(struct file *filp, 5512 unsigned int ioctl, unsigned long arg) 5513 { 5514 struct kvm_vcpu *vcpu = filp->private_data; 5515 void __user *argp = (void __user *)arg; 5516 int r; 5517 union { 5518 struct kvm_sregs2 *sregs2; 5519 struct kvm_lapic_state *lapic; 5520 struct kvm_xsave *xsave; 5521 struct kvm_xcrs *xcrs; 5522 void *buffer; 5523 } u; 5524 5525 vcpu_load(vcpu); 5526 5527 u.buffer = NULL; 5528 switch (ioctl) { 5529 case KVM_GET_LAPIC: { 5530 r = -EINVAL; 5531 if (!lapic_in_kernel(vcpu)) 5532 goto out; 5533 u.lapic = kzalloc(sizeof(struct kvm_lapic_state), 5534 GFP_KERNEL_ACCOUNT); 5535 5536 r = -ENOMEM; 5537 if (!u.lapic) 5538 goto out; 5539 r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic); 5540 if (r) 5541 goto out; 5542 r = -EFAULT; 5543 if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state))) 5544 goto out; 5545 r = 0; 5546 break; 5547 } 5548 case KVM_SET_LAPIC: { 5549 r = -EINVAL; 5550 if (!lapic_in_kernel(vcpu)) 5551 goto out; 5552 u.lapic = memdup_user(argp, sizeof(*u.lapic)); 5553 if (IS_ERR(u.lapic)) { 5554 r = PTR_ERR(u.lapic); 5555 goto out_nofree; 5556 } 5557 5558 r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic); 5559 break; 5560 } 5561 case KVM_INTERRUPT: { 5562 struct kvm_interrupt irq; 5563 5564 r = -EFAULT; 5565 if (copy_from_user(&irq, argp, sizeof(irq))) 5566 goto out; 5567 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); 5568 break; 5569 } 5570 case KVM_NMI: { 5571 r = kvm_vcpu_ioctl_nmi(vcpu); 5572 break; 5573 } 5574 case KVM_SMI: { 5575 r = kvm_inject_smi(vcpu); 5576 break; 5577 } 5578 case KVM_SET_CPUID: { 5579 struct kvm_cpuid __user *cpuid_arg = argp; 5580 struct kvm_cpuid cpuid; 5581 5582 r = -EFAULT; 5583 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) 5584 goto out; 5585 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries); 5586 break; 5587 } 5588 case KVM_SET_CPUID2: { 5589 struct kvm_cpuid2 __user *cpuid_arg = argp; 5590 struct kvm_cpuid2 cpuid; 5591 5592 r = -EFAULT; 5593 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) 5594 goto out; 5595 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid, 5596 cpuid_arg->entries); 5597 break; 5598 } 5599 case KVM_GET_CPUID2: { 5600 struct kvm_cpuid2 __user *cpuid_arg = argp; 5601 struct kvm_cpuid2 cpuid; 5602 5603 r = -EFAULT; 5604 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) 5605 goto out; 5606 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid, 5607 cpuid_arg->entries); 5608 if (r) 5609 goto out; 5610 r = -EFAULT; 5611 if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid))) 5612 goto out; 5613 r = 0; 5614 break; 5615 } 5616 case KVM_GET_MSRS: { 5617 int idx = srcu_read_lock(&vcpu->kvm->srcu); 5618 r = msr_io(vcpu, argp, do_get_msr, 1); 5619 srcu_read_unlock(&vcpu->kvm->srcu, idx); 5620 break; 5621 } 5622 case KVM_SET_MSRS: { 5623 int idx = srcu_read_lock(&vcpu->kvm->srcu); 5624 r = msr_io(vcpu, argp, do_set_msr, 0); 5625 srcu_read_unlock(&vcpu->kvm->srcu, idx); 5626 break; 5627 } 5628 case KVM_TPR_ACCESS_REPORTING: { 5629 struct kvm_tpr_access_ctl tac; 5630 5631 r = -EFAULT; 5632 if (copy_from_user(&tac, argp, sizeof(tac))) 5633 goto out; 5634 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac); 5635 if (r) 5636 goto out; 5637 r = -EFAULT; 5638 if (copy_to_user(argp, &tac, sizeof(tac))) 5639 goto out; 5640 r = 0; 5641 break; 5642 }; 5643 case KVM_SET_VAPIC_ADDR: { 5644 struct kvm_vapic_addr va; 5645 int idx; 5646 5647 r = -EINVAL; 5648 if (!lapic_in_kernel(vcpu)) 5649 goto out; 5650 r = -EFAULT; 5651 if (copy_from_user(&va, argp, sizeof(va))) 5652 goto out; 5653 idx = srcu_read_lock(&vcpu->kvm->srcu); 5654 r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); 5655 srcu_read_unlock(&vcpu->kvm->srcu, idx); 5656 break; 5657 } 5658 case KVM_X86_SETUP_MCE: { 5659 u64 mcg_cap; 5660 5661 r = -EFAULT; 5662 if (copy_from_user(&mcg_cap, argp, sizeof(mcg_cap))) 5663 goto out; 5664 r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap); 5665 break; 5666 } 5667 case KVM_X86_SET_MCE: { 5668 struct kvm_x86_mce mce; 5669 5670 r = -EFAULT; 5671 if (copy_from_user(&mce, argp, sizeof(mce))) 5672 goto out; 5673 r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce); 5674 break; 5675 } 5676 case KVM_GET_VCPU_EVENTS: { 5677 struct kvm_vcpu_events events; 5678 5679 kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events); 5680 5681 r = -EFAULT; 5682 if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events))) 5683 break; 5684 r = 0; 5685 break; 5686 } 5687 case KVM_SET_VCPU_EVENTS: { 5688 struct kvm_vcpu_events events; 5689 5690 r = -EFAULT; 5691 if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events))) 5692 break; 5693 5694 r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events); 5695 break; 5696 } 5697 case KVM_GET_DEBUGREGS: { 5698 struct kvm_debugregs dbgregs; 5699 5700 kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs); 5701 5702 r = -EFAULT; 5703 if (copy_to_user(argp, &dbgregs, 5704 sizeof(struct kvm_debugregs))) 5705 break; 5706 r = 0; 5707 break; 5708 } 5709 case KVM_SET_DEBUGREGS: { 5710 struct kvm_debugregs dbgregs; 5711 5712 r = -EFAULT; 5713 if (copy_from_user(&dbgregs, argp, 5714 sizeof(struct kvm_debugregs))) 5715 break; 5716 5717 r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs); 5718 break; 5719 } 5720 case KVM_GET_XSAVE: { 5721 r = -EINVAL; 5722 if (vcpu->arch.guest_fpu.uabi_size > sizeof(struct kvm_xsave)) 5723 break; 5724 5725 u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL_ACCOUNT); 5726 r = -ENOMEM; 5727 if (!u.xsave) 5728 break; 5729 5730 kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave); 5731 5732 r = -EFAULT; 5733 if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave))) 5734 break; 5735 r = 0; 5736 break; 5737 } 5738 case KVM_SET_XSAVE: { 5739 int size = vcpu->arch.guest_fpu.uabi_size; 5740 5741 u.xsave = memdup_user(argp, size); 5742 if (IS_ERR(u.xsave)) { 5743 r = PTR_ERR(u.xsave); 5744 goto out_nofree; 5745 } 5746 5747 r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave); 5748 break; 5749 } 5750 5751 case KVM_GET_XSAVE2: { 5752 int size = vcpu->arch.guest_fpu.uabi_size; 5753 5754 u.xsave = kzalloc(size, GFP_KERNEL_ACCOUNT); 5755 r = -ENOMEM; 5756 if (!u.xsave) 5757 break; 5758 5759 kvm_vcpu_ioctl_x86_get_xsave2(vcpu, u.buffer, size); 5760 5761 r = -EFAULT; 5762 if (copy_to_user(argp, u.xsave, size)) 5763 break; 5764 5765 r = 0; 5766 break; 5767 } 5768 5769 case KVM_GET_XCRS: { 5770 u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL_ACCOUNT); 5771 r = -ENOMEM; 5772 if (!u.xcrs) 5773 break; 5774 5775 kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs); 5776 5777 r = -EFAULT; 5778 if (copy_to_user(argp, u.xcrs, 5779 sizeof(struct kvm_xcrs))) 5780 break; 5781 r = 0; 5782 break; 5783 } 5784 case KVM_SET_XCRS: { 5785 u.xcrs = memdup_user(argp, sizeof(*u.xcrs)); 5786 if (IS_ERR(u.xcrs)) { 5787 r = PTR_ERR(u.xcrs); 5788 goto out_nofree; 5789 } 5790 5791 r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs); 5792 break; 5793 } 5794 case KVM_SET_TSC_KHZ: { 5795 u32 user_tsc_khz; 5796 5797 r = -EINVAL; 5798 user_tsc_khz = (u32)arg; 5799 5800 if (kvm_caps.has_tsc_control && 5801 user_tsc_khz >= kvm_caps.max_guest_tsc_khz) 5802 goto out; 5803 5804 if (user_tsc_khz == 0) 5805 user_tsc_khz = tsc_khz; 5806 5807 if (!kvm_set_tsc_khz(vcpu, user_tsc_khz)) 5808 r = 0; 5809 5810 goto out; 5811 } 5812 case KVM_GET_TSC_KHZ: { 5813 r = vcpu->arch.virtual_tsc_khz; 5814 goto out; 5815 } 5816 case KVM_KVMCLOCK_CTRL: { 5817 r = kvm_set_guest_paused(vcpu); 5818 goto out; 5819 } 5820 case KVM_ENABLE_CAP: { 5821 struct kvm_enable_cap cap; 5822 5823 r = -EFAULT; 5824 if (copy_from_user(&cap, argp, sizeof(cap))) 5825 goto out; 5826 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); 5827 break; 5828 } 5829 case KVM_GET_NESTED_STATE: { 5830 struct kvm_nested_state __user *user_kvm_nested_state = argp; 5831 u32 user_data_size; 5832 5833 r = -EINVAL; 5834 if (!kvm_x86_ops.nested_ops->get_state) 5835 break; 5836 5837 BUILD_BUG_ON(sizeof(user_data_size) != sizeof(user_kvm_nested_state->size)); 5838 r = -EFAULT; 5839 if (get_user(user_data_size, &user_kvm_nested_state->size)) 5840 break; 5841 5842 r = kvm_x86_ops.nested_ops->get_state(vcpu, user_kvm_nested_state, 5843 user_data_size); 5844 if (r < 0) 5845 break; 5846 5847 if (r > user_data_size) { 5848 if (put_user(r, &user_kvm_nested_state->size)) 5849 r = -EFAULT; 5850 else 5851 r = -E2BIG; 5852 break; 5853 } 5854 5855 r = 0; 5856 break; 5857 } 5858 case KVM_SET_NESTED_STATE: { 5859 struct kvm_nested_state __user *user_kvm_nested_state = argp; 5860 struct kvm_nested_state kvm_state; 5861 int idx; 5862 5863 r = -EINVAL; 5864 if (!kvm_x86_ops.nested_ops->set_state) 5865 break; 5866 5867 r = -EFAULT; 5868 if (copy_from_user(&kvm_state, user_kvm_nested_state, sizeof(kvm_state))) 5869 break; 5870 5871 r = -EINVAL; 5872 if (kvm_state.size < sizeof(kvm_state)) 5873 break; 5874 5875 if (kvm_state.flags & 5876 ~(KVM_STATE_NESTED_RUN_PENDING | KVM_STATE_NESTED_GUEST_MODE 5877 | KVM_STATE_NESTED_EVMCS | KVM_STATE_NESTED_MTF_PENDING 5878 | KVM_STATE_NESTED_GIF_SET)) 5879 break; 5880 5881 /* nested_run_pending implies guest_mode. */ 5882 if ((kvm_state.flags & KVM_STATE_NESTED_RUN_PENDING) 5883 && !(kvm_state.flags & KVM_STATE_NESTED_GUEST_MODE)) 5884 break; 5885 5886 idx = srcu_read_lock(&vcpu->kvm->srcu); 5887 r = kvm_x86_ops.nested_ops->set_state(vcpu, user_kvm_nested_state, &kvm_state); 5888 srcu_read_unlock(&vcpu->kvm->srcu, idx); 5889 break; 5890 } 5891 case KVM_GET_SUPPORTED_HV_CPUID: 5892 r = kvm_ioctl_get_supported_hv_cpuid(vcpu, argp); 5893 break; 5894 #ifdef CONFIG_KVM_XEN 5895 case KVM_XEN_VCPU_GET_ATTR: { 5896 struct kvm_xen_vcpu_attr xva; 5897 5898 r = -EFAULT; 5899 if (copy_from_user(&xva, argp, sizeof(xva))) 5900 goto out; 5901 r = kvm_xen_vcpu_get_attr(vcpu, &xva); 5902 if (!r && copy_to_user(argp, &xva, sizeof(xva))) 5903 r = -EFAULT; 5904 break; 5905 } 5906 case KVM_XEN_VCPU_SET_ATTR: { 5907 struct kvm_xen_vcpu_attr xva; 5908 5909 r = -EFAULT; 5910 if (copy_from_user(&xva, argp, sizeof(xva))) 5911 goto out; 5912 r = kvm_xen_vcpu_set_attr(vcpu, &xva); 5913 break; 5914 } 5915 #endif 5916 case KVM_GET_SREGS2: { 5917 u.sregs2 = kzalloc(sizeof(struct kvm_sregs2), GFP_KERNEL); 5918 r = -ENOMEM; 5919 if (!u.sregs2) 5920 goto out; 5921 __get_sregs2(vcpu, u.sregs2); 5922 r = -EFAULT; 5923 if (copy_to_user(argp, u.sregs2, sizeof(struct kvm_sregs2))) 5924 goto out; 5925 r = 0; 5926 break; 5927 } 5928 case KVM_SET_SREGS2: { 5929 u.sregs2 = memdup_user(argp, sizeof(struct kvm_sregs2)); 5930 if (IS_ERR(u.sregs2)) { 5931 r = PTR_ERR(u.sregs2); 5932 u.sregs2 = NULL; 5933 goto out; 5934 } 5935 r = __set_sregs2(vcpu, u.sregs2); 5936 break; 5937 } 5938 case KVM_HAS_DEVICE_ATTR: 5939 case KVM_GET_DEVICE_ATTR: 5940 case KVM_SET_DEVICE_ATTR: 5941 r = kvm_vcpu_ioctl_device_attr(vcpu, ioctl, argp); 5942 break; 5943 default: 5944 r = -EINVAL; 5945 } 5946 out: 5947 kfree(u.buffer); 5948 out_nofree: 5949 vcpu_put(vcpu); 5950 return r; 5951 } 5952 5953 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) 5954 { 5955 return VM_FAULT_SIGBUS; 5956 } 5957 5958 static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr) 5959 { 5960 int ret; 5961 5962 if (addr > (unsigned int)(-3 * PAGE_SIZE)) 5963 return -EINVAL; 5964 ret = static_call(kvm_x86_set_tss_addr)(kvm, addr); 5965 return ret; 5966 } 5967 5968 static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm, 5969 u64 ident_addr) 5970 { 5971 return static_call(kvm_x86_set_identity_map_addr)(kvm, ident_addr); 5972 } 5973 5974 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, 5975 unsigned long kvm_nr_mmu_pages) 5976 { 5977 if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES) 5978 return -EINVAL; 5979 5980 mutex_lock(&kvm->slots_lock); 5981 5982 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages); 5983 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; 5984 5985 mutex_unlock(&kvm->slots_lock); 5986 return 0; 5987 } 5988 5989 static unsigned long kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm) 5990 { 5991 return kvm->arch.n_max_mmu_pages; 5992 } 5993 5994 static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) 5995 { 5996 struct kvm_pic *pic = kvm->arch.vpic; 5997 int r; 5998 5999 r = 0; 6000 switch (chip->chip_id) { 6001 case KVM_IRQCHIP_PIC_MASTER: 6002 memcpy(&chip->chip.pic, &pic->pics[0], 6003 sizeof(struct kvm_pic_state)); 6004 break; 6005 case KVM_IRQCHIP_PIC_SLAVE: 6006 memcpy(&chip->chip.pic, &pic->pics[1], 6007 sizeof(struct kvm_pic_state)); 6008 break; 6009 case KVM_IRQCHIP_IOAPIC: 6010 kvm_get_ioapic(kvm, &chip->chip.ioapic); 6011 break; 6012 default: 6013 r = -EINVAL; 6014 break; 6015 } 6016 return r; 6017 } 6018 6019 static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) 6020 { 6021 struct kvm_pic *pic = kvm->arch.vpic; 6022 int r; 6023 6024 r = 0; 6025 switch (chip->chip_id) { 6026 case KVM_IRQCHIP_PIC_MASTER: 6027 spin_lock(&pic->lock); 6028 memcpy(&pic->pics[0], &chip->chip.pic, 6029 sizeof(struct kvm_pic_state)); 6030 spin_unlock(&pic->lock); 6031 break; 6032 case KVM_IRQCHIP_PIC_SLAVE: 6033 spin_lock(&pic->lock); 6034 memcpy(&pic->pics[1], &chip->chip.pic, 6035 sizeof(struct kvm_pic_state)); 6036 spin_unlock(&pic->lock); 6037 break; 6038 case KVM_IRQCHIP_IOAPIC: 6039 kvm_set_ioapic(kvm, &chip->chip.ioapic); 6040 break; 6041 default: 6042 r = -EINVAL; 6043 break; 6044 } 6045 kvm_pic_update_irq(pic); 6046 return r; 6047 } 6048 6049 static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps) 6050 { 6051 struct kvm_kpit_state *kps = &kvm->arch.vpit->pit_state; 6052 6053 BUILD_BUG_ON(sizeof(*ps) != sizeof(kps->channels)); 6054 6055 mutex_lock(&kps->lock); 6056 memcpy(ps, &kps->channels, sizeof(*ps)); 6057 mutex_unlock(&kps->lock); 6058 return 0; 6059 } 6060 6061 static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps) 6062 { 6063 int i; 6064 struct kvm_pit *pit = kvm->arch.vpit; 6065 6066 mutex_lock(&pit->pit_state.lock); 6067 memcpy(&pit->pit_state.channels, ps, sizeof(*ps)); 6068 for (i = 0; i < 3; i++) 6069 kvm_pit_load_count(pit, i, ps->channels[i].count, 0); 6070 mutex_unlock(&pit->pit_state.lock); 6071 return 0; 6072 } 6073 6074 static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) 6075 { 6076 mutex_lock(&kvm->arch.vpit->pit_state.lock); 6077 memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels, 6078 sizeof(ps->channels)); 6079 ps->flags = kvm->arch.vpit->pit_state.flags; 6080 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 6081 memset(&ps->reserved, 0, sizeof(ps->reserved)); 6082 return 0; 6083 } 6084 6085 static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) 6086 { 6087 int start = 0; 6088 int i; 6089 u32 prev_legacy, cur_legacy; 6090 struct kvm_pit *pit = kvm->arch.vpit; 6091 6092 mutex_lock(&pit->pit_state.lock); 6093 prev_legacy = pit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY; 6094 cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY; 6095 if (!prev_legacy && cur_legacy) 6096 start = 1; 6097 memcpy(&pit->pit_state.channels, &ps->channels, 6098 sizeof(pit->pit_state.channels)); 6099 pit->pit_state.flags = ps->flags; 6100 for (i = 0; i < 3; i++) 6101 kvm_pit_load_count(pit, i, pit->pit_state.channels[i].count, 6102 start && i == 0); 6103 mutex_unlock(&pit->pit_state.lock); 6104 return 0; 6105 } 6106 6107 static int kvm_vm_ioctl_reinject(struct kvm *kvm, 6108 struct kvm_reinject_control *control) 6109 { 6110 struct kvm_pit *pit = kvm->arch.vpit; 6111 6112 /* pit->pit_state.lock was overloaded to prevent userspace from getting 6113 * an inconsistent state after running multiple KVM_REINJECT_CONTROL 6114 * ioctls in parallel. Use a separate lock if that ioctl isn't rare. 6115 */ 6116 mutex_lock(&pit->pit_state.lock); 6117 kvm_pit_set_reinject(pit, control->pit_reinject); 6118 mutex_unlock(&pit->pit_state.lock); 6119 6120 return 0; 6121 } 6122 6123 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) 6124 { 6125 6126 /* 6127 * Flush all CPUs' dirty log buffers to the dirty_bitmap. Called 6128 * before reporting dirty_bitmap to userspace. KVM flushes the buffers 6129 * on all VM-Exits, thus we only need to kick running vCPUs to force a 6130 * VM-Exit. 6131 */ 6132 struct kvm_vcpu *vcpu; 6133 unsigned long i; 6134 6135 kvm_for_each_vcpu(i, vcpu, kvm) 6136 kvm_vcpu_kick(vcpu); 6137 } 6138 6139 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, 6140 bool line_status) 6141 { 6142 if (!irqchip_in_kernel(kvm)) 6143 return -ENXIO; 6144 6145 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 6146 irq_event->irq, irq_event->level, 6147 line_status); 6148 return 0; 6149 } 6150 6151 int kvm_vm_ioctl_enable_cap(struct kvm *kvm, 6152 struct kvm_enable_cap *cap) 6153 { 6154 int r; 6155 6156 if (cap->flags) 6157 return -EINVAL; 6158 6159 switch (cap->cap) { 6160 case KVM_CAP_DISABLE_QUIRKS2: 6161 r = -EINVAL; 6162 if (cap->args[0] & ~KVM_X86_VALID_QUIRKS) 6163 break; 6164 fallthrough; 6165 case KVM_CAP_DISABLE_QUIRKS: 6166 kvm->arch.disabled_quirks = cap->args[0]; 6167 r = 0; 6168 break; 6169 case KVM_CAP_SPLIT_IRQCHIP: { 6170 mutex_lock(&kvm->lock); 6171 r = -EINVAL; 6172 if (cap->args[0] > MAX_NR_RESERVED_IOAPIC_PINS) 6173 goto split_irqchip_unlock; 6174 r = -EEXIST; 6175 if (irqchip_in_kernel(kvm)) 6176 goto split_irqchip_unlock; 6177 if (kvm->created_vcpus) 6178 goto split_irqchip_unlock; 6179 r = kvm_setup_empty_irq_routing(kvm); 6180 if (r) 6181 goto split_irqchip_unlock; 6182 /* Pairs with irqchip_in_kernel. */ 6183 smp_wmb(); 6184 kvm->arch.irqchip_mode = KVM_IRQCHIP_SPLIT; 6185 kvm->arch.nr_reserved_ioapic_pins = cap->args[0]; 6186 kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_ABSENT); 6187 r = 0; 6188 split_irqchip_unlock: 6189 mutex_unlock(&kvm->lock); 6190 break; 6191 } 6192 case KVM_CAP_X2APIC_API: 6193 r = -EINVAL; 6194 if (cap->args[0] & ~KVM_X2APIC_API_VALID_FLAGS) 6195 break; 6196 6197 if (cap->args[0] & KVM_X2APIC_API_USE_32BIT_IDS) 6198 kvm->arch.x2apic_format = true; 6199 if (cap->args[0] & KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK) 6200 kvm->arch.x2apic_broadcast_quirk_disabled = true; 6201 6202 r = 0; 6203 break; 6204 case KVM_CAP_X86_DISABLE_EXITS: 6205 r = -EINVAL; 6206 if (cap->args[0] & ~KVM_X86_DISABLE_VALID_EXITS) 6207 break; 6208 6209 if ((cap->args[0] & KVM_X86_DISABLE_EXITS_MWAIT) && 6210 kvm_can_mwait_in_guest()) 6211 kvm->arch.mwait_in_guest = true; 6212 if (cap->args[0] & KVM_X86_DISABLE_EXITS_HLT) 6213 kvm->arch.hlt_in_guest = true; 6214 if (cap->args[0] & KVM_X86_DISABLE_EXITS_PAUSE) 6215 kvm->arch.pause_in_guest = true; 6216 if (cap->args[0] & KVM_X86_DISABLE_EXITS_CSTATE) 6217 kvm->arch.cstate_in_guest = true; 6218 r = 0; 6219 break; 6220 case KVM_CAP_MSR_PLATFORM_INFO: 6221 kvm->arch.guest_can_read_msr_platform_info = cap->args[0]; 6222 r = 0; 6223 break; 6224 case KVM_CAP_EXCEPTION_PAYLOAD: 6225 kvm->arch.exception_payload_enabled = cap->args[0]; 6226 r = 0; 6227 break; 6228 case KVM_CAP_X86_TRIPLE_FAULT_EVENT: 6229 kvm->arch.triple_fault_event = cap->args[0]; 6230 r = 0; 6231 break; 6232 case KVM_CAP_X86_USER_SPACE_MSR: 6233 r = -EINVAL; 6234 if (cap->args[0] & ~(KVM_MSR_EXIT_REASON_INVAL | 6235 KVM_MSR_EXIT_REASON_UNKNOWN | 6236 KVM_MSR_EXIT_REASON_FILTER)) 6237 break; 6238 kvm->arch.user_space_msr_mask = cap->args[0]; 6239 r = 0; 6240 break; 6241 case KVM_CAP_X86_BUS_LOCK_EXIT: 6242 r = -EINVAL; 6243 if (cap->args[0] & ~KVM_BUS_LOCK_DETECTION_VALID_MODE) 6244 break; 6245 6246 if ((cap->args[0] & KVM_BUS_LOCK_DETECTION_OFF) && 6247 (cap->args[0] & KVM_BUS_LOCK_DETECTION_EXIT)) 6248 break; 6249 6250 if (kvm_caps.has_bus_lock_exit && 6251 cap->args[0] & KVM_BUS_LOCK_DETECTION_EXIT) 6252 kvm->arch.bus_lock_detection_enabled = true; 6253 r = 0; 6254 break; 6255 #ifdef CONFIG_X86_SGX_KVM 6256 case KVM_CAP_SGX_ATTRIBUTE: { 6257 unsigned long allowed_attributes = 0; 6258 6259 r = sgx_set_attribute(&allowed_attributes, cap->args[0]); 6260 if (r) 6261 break; 6262 6263 /* KVM only supports the PROVISIONKEY privileged attribute. */ 6264 if ((allowed_attributes & SGX_ATTR_PROVISIONKEY) && 6265 !(allowed_attributes & ~SGX_ATTR_PROVISIONKEY)) 6266 kvm->arch.sgx_provisioning_allowed = true; 6267 else 6268 r = -EINVAL; 6269 break; 6270 } 6271 #endif 6272 case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM: 6273 r = -EINVAL; 6274 if (!kvm_x86_ops.vm_copy_enc_context_from) 6275 break; 6276 6277 r = static_call(kvm_x86_vm_copy_enc_context_from)(kvm, cap->args[0]); 6278 break; 6279 case KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM: 6280 r = -EINVAL; 6281 if (!kvm_x86_ops.vm_move_enc_context_from) 6282 break; 6283 6284 r = static_call(kvm_x86_vm_move_enc_context_from)(kvm, cap->args[0]); 6285 break; 6286 case KVM_CAP_EXIT_HYPERCALL: 6287 if (cap->args[0] & ~KVM_EXIT_HYPERCALL_VALID_MASK) { 6288 r = -EINVAL; 6289 break; 6290 } 6291 kvm->arch.hypercall_exit_enabled = cap->args[0]; 6292 r = 0; 6293 break; 6294 case KVM_CAP_EXIT_ON_EMULATION_FAILURE: 6295 r = -EINVAL; 6296 if (cap->args[0] & ~1) 6297 break; 6298 kvm->arch.exit_on_emulation_error = cap->args[0]; 6299 r = 0; 6300 break; 6301 case KVM_CAP_PMU_CAPABILITY: 6302 r = -EINVAL; 6303 if (!enable_pmu || (cap->args[0] & ~KVM_CAP_PMU_VALID_MASK)) 6304 break; 6305 6306 mutex_lock(&kvm->lock); 6307 if (!kvm->created_vcpus) { 6308 kvm->arch.enable_pmu = !(cap->args[0] & KVM_PMU_CAP_DISABLE); 6309 r = 0; 6310 } 6311 mutex_unlock(&kvm->lock); 6312 break; 6313 case KVM_CAP_MAX_VCPU_ID: 6314 r = -EINVAL; 6315 if (cap->args[0] > KVM_MAX_VCPU_IDS) 6316 break; 6317 6318 mutex_lock(&kvm->lock); 6319 if (kvm->arch.max_vcpu_ids == cap->args[0]) { 6320 r = 0; 6321 } else if (!kvm->arch.max_vcpu_ids) { 6322 kvm->arch.max_vcpu_ids = cap->args[0]; 6323 r = 0; 6324 } 6325 mutex_unlock(&kvm->lock); 6326 break; 6327 case KVM_CAP_X86_NOTIFY_VMEXIT: 6328 r = -EINVAL; 6329 if ((u32)cap->args[0] & ~KVM_X86_NOTIFY_VMEXIT_VALID_BITS) 6330 break; 6331 if (!kvm_caps.has_notify_vmexit) 6332 break; 6333 if (!((u32)cap->args[0] & KVM_X86_NOTIFY_VMEXIT_ENABLED)) 6334 break; 6335 mutex_lock(&kvm->lock); 6336 if (!kvm->created_vcpus) { 6337 kvm->arch.notify_window = cap->args[0] >> 32; 6338 kvm->arch.notify_vmexit_flags = (u32)cap->args[0]; 6339 r = 0; 6340 } 6341 mutex_unlock(&kvm->lock); 6342 break; 6343 case KVM_CAP_VM_DISABLE_NX_HUGE_PAGES: 6344 r = -EINVAL; 6345 6346 /* 6347 * Since the risk of disabling NX hugepages is a guest crashing 6348 * the system, ensure the userspace process has permission to 6349 * reboot the system. 6350 * 6351 * Note that unlike the reboot() syscall, the process must have 6352 * this capability in the root namespace because exposing 6353 * /dev/kvm into a container does not limit the scope of the 6354 * iTLB multihit bug to that container. In other words, 6355 * this must use capable(), not ns_capable(). 6356 */ 6357 if (!capable(CAP_SYS_BOOT)) { 6358 r = -EPERM; 6359 break; 6360 } 6361 6362 if (cap->args[0]) 6363 break; 6364 6365 mutex_lock(&kvm->lock); 6366 if (!kvm->created_vcpus) { 6367 kvm->arch.disable_nx_huge_pages = true; 6368 r = 0; 6369 } 6370 mutex_unlock(&kvm->lock); 6371 break; 6372 default: 6373 r = -EINVAL; 6374 break; 6375 } 6376 return r; 6377 } 6378 6379 static struct kvm_x86_msr_filter *kvm_alloc_msr_filter(bool default_allow) 6380 { 6381 struct kvm_x86_msr_filter *msr_filter; 6382 6383 msr_filter = kzalloc(sizeof(*msr_filter), GFP_KERNEL_ACCOUNT); 6384 if (!msr_filter) 6385 return NULL; 6386 6387 msr_filter->default_allow = default_allow; 6388 return msr_filter; 6389 } 6390 6391 static void kvm_free_msr_filter(struct kvm_x86_msr_filter *msr_filter) 6392 { 6393 u32 i; 6394 6395 if (!msr_filter) 6396 return; 6397 6398 for (i = 0; i < msr_filter->count; i++) 6399 kfree(msr_filter->ranges[i].bitmap); 6400 6401 kfree(msr_filter); 6402 } 6403 6404 static int kvm_add_msr_filter(struct kvm_x86_msr_filter *msr_filter, 6405 struct kvm_msr_filter_range *user_range) 6406 { 6407 unsigned long *bitmap = NULL; 6408 size_t bitmap_size; 6409 6410 if (!user_range->nmsrs) 6411 return 0; 6412 6413 if (user_range->flags & ~(KVM_MSR_FILTER_READ | KVM_MSR_FILTER_WRITE)) 6414 return -EINVAL; 6415 6416 if (!user_range->flags) 6417 return -EINVAL; 6418 6419 bitmap_size = BITS_TO_LONGS(user_range->nmsrs) * sizeof(long); 6420 if (!bitmap_size || bitmap_size > KVM_MSR_FILTER_MAX_BITMAP_SIZE) 6421 return -EINVAL; 6422 6423 bitmap = memdup_user((__user u8*)user_range->bitmap, bitmap_size); 6424 if (IS_ERR(bitmap)) 6425 return PTR_ERR(bitmap); 6426 6427 msr_filter->ranges[msr_filter->count] = (struct msr_bitmap_range) { 6428 .flags = user_range->flags, 6429 .base = user_range->base, 6430 .nmsrs = user_range->nmsrs, 6431 .bitmap = bitmap, 6432 }; 6433 6434 msr_filter->count++; 6435 return 0; 6436 } 6437 6438 static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, 6439 struct kvm_msr_filter *filter) 6440 { 6441 struct kvm_x86_msr_filter *new_filter, *old_filter; 6442 bool default_allow; 6443 bool empty = true; 6444 int r = 0; 6445 u32 i; 6446 6447 if (filter->flags & ~KVM_MSR_FILTER_DEFAULT_DENY) 6448 return -EINVAL; 6449 6450 for (i = 0; i < ARRAY_SIZE(filter->ranges); i++) 6451 empty &= !filter->ranges[i].nmsrs; 6452 6453 default_allow = !(filter->flags & KVM_MSR_FILTER_DEFAULT_DENY); 6454 if (empty && !default_allow) 6455 return -EINVAL; 6456 6457 new_filter = kvm_alloc_msr_filter(default_allow); 6458 if (!new_filter) 6459 return -ENOMEM; 6460 6461 for (i = 0; i < ARRAY_SIZE(filter->ranges); i++) { 6462 r = kvm_add_msr_filter(new_filter, &filter->ranges[i]); 6463 if (r) { 6464 kvm_free_msr_filter(new_filter); 6465 return r; 6466 } 6467 } 6468 6469 mutex_lock(&kvm->lock); 6470 6471 /* The per-VM filter is protected by kvm->lock... */ 6472 old_filter = srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1); 6473 6474 rcu_assign_pointer(kvm->arch.msr_filter, new_filter); 6475 synchronize_srcu(&kvm->srcu); 6476 6477 kvm_free_msr_filter(old_filter); 6478 6479 kvm_make_all_cpus_request(kvm, KVM_REQ_MSR_FILTER_CHANGED); 6480 mutex_unlock(&kvm->lock); 6481 6482 return 0; 6483 } 6484 6485 #ifdef CONFIG_KVM_COMPAT 6486 /* for KVM_X86_SET_MSR_FILTER */ 6487 struct kvm_msr_filter_range_compat { 6488 __u32 flags; 6489 __u32 nmsrs; 6490 __u32 base; 6491 __u32 bitmap; 6492 }; 6493 6494 struct kvm_msr_filter_compat { 6495 __u32 flags; 6496 struct kvm_msr_filter_range_compat ranges[KVM_MSR_FILTER_MAX_RANGES]; 6497 }; 6498 6499 #define KVM_X86_SET_MSR_FILTER_COMPAT _IOW(KVMIO, 0xc6, struct kvm_msr_filter_compat) 6500 6501 long kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl, 6502 unsigned long arg) 6503 { 6504 void __user *argp = (void __user *)arg; 6505 struct kvm *kvm = filp->private_data; 6506 long r = -ENOTTY; 6507 6508 switch (ioctl) { 6509 case KVM_X86_SET_MSR_FILTER_COMPAT: { 6510 struct kvm_msr_filter __user *user_msr_filter = argp; 6511 struct kvm_msr_filter_compat filter_compat; 6512 struct kvm_msr_filter filter; 6513 int i; 6514 6515 if (copy_from_user(&filter_compat, user_msr_filter, 6516 sizeof(filter_compat))) 6517 return -EFAULT; 6518 6519 filter.flags = filter_compat.flags; 6520 for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) { 6521 struct kvm_msr_filter_range_compat *cr; 6522 6523 cr = &filter_compat.ranges[i]; 6524 filter.ranges[i] = (struct kvm_msr_filter_range) { 6525 .flags = cr->flags, 6526 .nmsrs = cr->nmsrs, 6527 .base = cr->base, 6528 .bitmap = (__u8 *)(ulong)cr->bitmap, 6529 }; 6530 } 6531 6532 r = kvm_vm_ioctl_set_msr_filter(kvm, &filter); 6533 break; 6534 } 6535 } 6536 6537 return r; 6538 } 6539 #endif 6540 6541 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER 6542 static int kvm_arch_suspend_notifier(struct kvm *kvm) 6543 { 6544 struct kvm_vcpu *vcpu; 6545 unsigned long i; 6546 int ret = 0; 6547 6548 mutex_lock(&kvm->lock); 6549 kvm_for_each_vcpu(i, vcpu, kvm) { 6550 if (!vcpu->arch.pv_time.active) 6551 continue; 6552 6553 ret = kvm_set_guest_paused(vcpu); 6554 if (ret) { 6555 kvm_err("Failed to pause guest VCPU%d: %d\n", 6556 vcpu->vcpu_id, ret); 6557 break; 6558 } 6559 } 6560 mutex_unlock(&kvm->lock); 6561 6562 return ret ? NOTIFY_BAD : NOTIFY_DONE; 6563 } 6564 6565 int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state) 6566 { 6567 switch (state) { 6568 case PM_HIBERNATION_PREPARE: 6569 case PM_SUSPEND_PREPARE: 6570 return kvm_arch_suspend_notifier(kvm); 6571 } 6572 6573 return NOTIFY_DONE; 6574 } 6575 #endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */ 6576 6577 static int kvm_vm_ioctl_get_clock(struct kvm *kvm, void __user *argp) 6578 { 6579 struct kvm_clock_data data = { 0 }; 6580 6581 get_kvmclock(kvm, &data); 6582 if (copy_to_user(argp, &data, sizeof(data))) 6583 return -EFAULT; 6584 6585 return 0; 6586 } 6587 6588 static int kvm_vm_ioctl_set_clock(struct kvm *kvm, void __user *argp) 6589 { 6590 struct kvm_arch *ka = &kvm->arch; 6591 struct kvm_clock_data data; 6592 u64 now_raw_ns; 6593 6594 if (copy_from_user(&data, argp, sizeof(data))) 6595 return -EFAULT; 6596 6597 /* 6598 * Only KVM_CLOCK_REALTIME is used, but allow passing the 6599 * result of KVM_GET_CLOCK back to KVM_SET_CLOCK. 6600 */ 6601 if (data.flags & ~KVM_CLOCK_VALID_FLAGS) 6602 return -EINVAL; 6603 6604 kvm_hv_request_tsc_page_update(kvm); 6605 kvm_start_pvclock_update(kvm); 6606 pvclock_update_vm_gtod_copy(kvm); 6607 6608 /* 6609 * This pairs with kvm_guest_time_update(): when masterclock is 6610 * in use, we use master_kernel_ns + kvmclock_offset to set 6611 * unsigned 'system_time' so if we use get_kvmclock_ns() (which 6612 * is slightly ahead) here we risk going negative on unsigned 6613 * 'system_time' when 'data.clock' is very small. 6614 */ 6615 if (data.flags & KVM_CLOCK_REALTIME) { 6616 u64 now_real_ns = ktime_get_real_ns(); 6617 6618 /* 6619 * Avoid stepping the kvmclock backwards. 6620 */ 6621 if (now_real_ns > data.realtime) 6622 data.clock += now_real_ns - data.realtime; 6623 } 6624 6625 if (ka->use_master_clock) 6626 now_raw_ns = ka->master_kernel_ns; 6627 else 6628 now_raw_ns = get_kvmclock_base_ns(); 6629 ka->kvmclock_offset = data.clock - now_raw_ns; 6630 kvm_end_pvclock_update(kvm); 6631 return 0; 6632 } 6633 6634 long kvm_arch_vm_ioctl(struct file *filp, 6635 unsigned int ioctl, unsigned long arg) 6636 { 6637 struct kvm *kvm = filp->private_data; 6638 void __user *argp = (void __user *)arg; 6639 int r = -ENOTTY; 6640 /* 6641 * This union makes it completely explicit to gcc-3.x 6642 * that these two variables' stack usage should be 6643 * combined, not added together. 6644 */ 6645 union { 6646 struct kvm_pit_state ps; 6647 struct kvm_pit_state2 ps2; 6648 struct kvm_pit_config pit_config; 6649 } u; 6650 6651 switch (ioctl) { 6652 case KVM_SET_TSS_ADDR: 6653 r = kvm_vm_ioctl_set_tss_addr(kvm, arg); 6654 break; 6655 case KVM_SET_IDENTITY_MAP_ADDR: { 6656 u64 ident_addr; 6657 6658 mutex_lock(&kvm->lock); 6659 r = -EINVAL; 6660 if (kvm->created_vcpus) 6661 goto set_identity_unlock; 6662 r = -EFAULT; 6663 if (copy_from_user(&ident_addr, argp, sizeof(ident_addr))) 6664 goto set_identity_unlock; 6665 r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr); 6666 set_identity_unlock: 6667 mutex_unlock(&kvm->lock); 6668 break; 6669 } 6670 case KVM_SET_NR_MMU_PAGES: 6671 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg); 6672 break; 6673 case KVM_GET_NR_MMU_PAGES: 6674 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm); 6675 break; 6676 case KVM_CREATE_IRQCHIP: { 6677 mutex_lock(&kvm->lock); 6678 6679 r = -EEXIST; 6680 if (irqchip_in_kernel(kvm)) 6681 goto create_irqchip_unlock; 6682 6683 r = -EINVAL; 6684 if (kvm->created_vcpus) 6685 goto create_irqchip_unlock; 6686 6687 r = kvm_pic_init(kvm); 6688 if (r) 6689 goto create_irqchip_unlock; 6690 6691 r = kvm_ioapic_init(kvm); 6692 if (r) { 6693 kvm_pic_destroy(kvm); 6694 goto create_irqchip_unlock; 6695 } 6696 6697 r = kvm_setup_default_irq_routing(kvm); 6698 if (r) { 6699 kvm_ioapic_destroy(kvm); 6700 kvm_pic_destroy(kvm); 6701 goto create_irqchip_unlock; 6702 } 6703 /* Write kvm->irq_routing before enabling irqchip_in_kernel. */ 6704 smp_wmb(); 6705 kvm->arch.irqchip_mode = KVM_IRQCHIP_KERNEL; 6706 kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_ABSENT); 6707 create_irqchip_unlock: 6708 mutex_unlock(&kvm->lock); 6709 break; 6710 } 6711 case KVM_CREATE_PIT: 6712 u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY; 6713 goto create_pit; 6714 case KVM_CREATE_PIT2: 6715 r = -EFAULT; 6716 if (copy_from_user(&u.pit_config, argp, 6717 sizeof(struct kvm_pit_config))) 6718 goto out; 6719 create_pit: 6720 mutex_lock(&kvm->lock); 6721 r = -EEXIST; 6722 if (kvm->arch.vpit) 6723 goto create_pit_unlock; 6724 r = -ENOMEM; 6725 kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags); 6726 if (kvm->arch.vpit) 6727 r = 0; 6728 create_pit_unlock: 6729 mutex_unlock(&kvm->lock); 6730 break; 6731 case KVM_GET_IRQCHIP: { 6732 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ 6733 struct kvm_irqchip *chip; 6734 6735 chip = memdup_user(argp, sizeof(*chip)); 6736 if (IS_ERR(chip)) { 6737 r = PTR_ERR(chip); 6738 goto out; 6739 } 6740 6741 r = -ENXIO; 6742 if (!irqchip_kernel(kvm)) 6743 goto get_irqchip_out; 6744 r = kvm_vm_ioctl_get_irqchip(kvm, chip); 6745 if (r) 6746 goto get_irqchip_out; 6747 r = -EFAULT; 6748 if (copy_to_user(argp, chip, sizeof(*chip))) 6749 goto get_irqchip_out; 6750 r = 0; 6751 get_irqchip_out: 6752 kfree(chip); 6753 break; 6754 } 6755 case KVM_SET_IRQCHIP: { 6756 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ 6757 struct kvm_irqchip *chip; 6758 6759 chip = memdup_user(argp, sizeof(*chip)); 6760 if (IS_ERR(chip)) { 6761 r = PTR_ERR(chip); 6762 goto out; 6763 } 6764 6765 r = -ENXIO; 6766 if (!irqchip_kernel(kvm)) 6767 goto set_irqchip_out; 6768 r = kvm_vm_ioctl_set_irqchip(kvm, chip); 6769 set_irqchip_out: 6770 kfree(chip); 6771 break; 6772 } 6773 case KVM_GET_PIT: { 6774 r = -EFAULT; 6775 if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state))) 6776 goto out; 6777 r = -ENXIO; 6778 if (!kvm->arch.vpit) 6779 goto out; 6780 r = kvm_vm_ioctl_get_pit(kvm, &u.ps); 6781 if (r) 6782 goto out; 6783 r = -EFAULT; 6784 if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state))) 6785 goto out; 6786 r = 0; 6787 break; 6788 } 6789 case KVM_SET_PIT: { 6790 r = -EFAULT; 6791 if (copy_from_user(&u.ps, argp, sizeof(u.ps))) 6792 goto out; 6793 mutex_lock(&kvm->lock); 6794 r = -ENXIO; 6795 if (!kvm->arch.vpit) 6796 goto set_pit_out; 6797 r = kvm_vm_ioctl_set_pit(kvm, &u.ps); 6798 set_pit_out: 6799 mutex_unlock(&kvm->lock); 6800 break; 6801 } 6802 case KVM_GET_PIT2: { 6803 r = -ENXIO; 6804 if (!kvm->arch.vpit) 6805 goto out; 6806 r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2); 6807 if (r) 6808 goto out; 6809 r = -EFAULT; 6810 if (copy_to_user(argp, &u.ps2, sizeof(u.ps2))) 6811 goto out; 6812 r = 0; 6813 break; 6814 } 6815 case KVM_SET_PIT2: { 6816 r = -EFAULT; 6817 if (copy_from_user(&u.ps2, argp, sizeof(u.ps2))) 6818 goto out; 6819 mutex_lock(&kvm->lock); 6820 r = -ENXIO; 6821 if (!kvm->arch.vpit) 6822 goto set_pit2_out; 6823 r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2); 6824 set_pit2_out: 6825 mutex_unlock(&kvm->lock); 6826 break; 6827 } 6828 case KVM_REINJECT_CONTROL: { 6829 struct kvm_reinject_control control; 6830 r = -EFAULT; 6831 if (copy_from_user(&control, argp, sizeof(control))) 6832 goto out; 6833 r = -ENXIO; 6834 if (!kvm->arch.vpit) 6835 goto out; 6836 r = kvm_vm_ioctl_reinject(kvm, &control); 6837 break; 6838 } 6839 case KVM_SET_BOOT_CPU_ID: 6840 r = 0; 6841 mutex_lock(&kvm->lock); 6842 if (kvm->created_vcpus) 6843 r = -EBUSY; 6844 else 6845 kvm->arch.bsp_vcpu_id = arg; 6846 mutex_unlock(&kvm->lock); 6847 break; 6848 #ifdef CONFIG_KVM_XEN 6849 case KVM_XEN_HVM_CONFIG: { 6850 struct kvm_xen_hvm_config xhc; 6851 r = -EFAULT; 6852 if (copy_from_user(&xhc, argp, sizeof(xhc))) 6853 goto out; 6854 r = kvm_xen_hvm_config(kvm, &xhc); 6855 break; 6856 } 6857 case KVM_XEN_HVM_GET_ATTR: { 6858 struct kvm_xen_hvm_attr xha; 6859 6860 r = -EFAULT; 6861 if (copy_from_user(&xha, argp, sizeof(xha))) 6862 goto out; 6863 r = kvm_xen_hvm_get_attr(kvm, &xha); 6864 if (!r && copy_to_user(argp, &xha, sizeof(xha))) 6865 r = -EFAULT; 6866 break; 6867 } 6868 case KVM_XEN_HVM_SET_ATTR: { 6869 struct kvm_xen_hvm_attr xha; 6870 6871 r = -EFAULT; 6872 if (copy_from_user(&xha, argp, sizeof(xha))) 6873 goto out; 6874 r = kvm_xen_hvm_set_attr(kvm, &xha); 6875 break; 6876 } 6877 case KVM_XEN_HVM_EVTCHN_SEND: { 6878 struct kvm_irq_routing_xen_evtchn uxe; 6879 6880 r = -EFAULT; 6881 if (copy_from_user(&uxe, argp, sizeof(uxe))) 6882 goto out; 6883 r = kvm_xen_hvm_evtchn_send(kvm, &uxe); 6884 break; 6885 } 6886 #endif 6887 case KVM_SET_CLOCK: 6888 r = kvm_vm_ioctl_set_clock(kvm, argp); 6889 break; 6890 case KVM_GET_CLOCK: 6891 r = kvm_vm_ioctl_get_clock(kvm, argp); 6892 break; 6893 case KVM_SET_TSC_KHZ: { 6894 u32 user_tsc_khz; 6895 6896 r = -EINVAL; 6897 user_tsc_khz = (u32)arg; 6898 6899 if (kvm_caps.has_tsc_control && 6900 user_tsc_khz >= kvm_caps.max_guest_tsc_khz) 6901 goto out; 6902 6903 if (user_tsc_khz == 0) 6904 user_tsc_khz = tsc_khz; 6905 6906 WRITE_ONCE(kvm->arch.default_tsc_khz, user_tsc_khz); 6907 r = 0; 6908 6909 goto out; 6910 } 6911 case KVM_GET_TSC_KHZ: { 6912 r = READ_ONCE(kvm->arch.default_tsc_khz); 6913 goto out; 6914 } 6915 case KVM_MEMORY_ENCRYPT_OP: { 6916 r = -ENOTTY; 6917 if (!kvm_x86_ops.mem_enc_ioctl) 6918 goto out; 6919 6920 r = static_call(kvm_x86_mem_enc_ioctl)(kvm, argp); 6921 break; 6922 } 6923 case KVM_MEMORY_ENCRYPT_REG_REGION: { 6924 struct kvm_enc_region region; 6925 6926 r = -EFAULT; 6927 if (copy_from_user(®ion, argp, sizeof(region))) 6928 goto out; 6929 6930 r = -ENOTTY; 6931 if (!kvm_x86_ops.mem_enc_register_region) 6932 goto out; 6933 6934 r = static_call(kvm_x86_mem_enc_register_region)(kvm, ®ion); 6935 break; 6936 } 6937 case KVM_MEMORY_ENCRYPT_UNREG_REGION: { 6938 struct kvm_enc_region region; 6939 6940 r = -EFAULT; 6941 if (copy_from_user(®ion, argp, sizeof(region))) 6942 goto out; 6943 6944 r = -ENOTTY; 6945 if (!kvm_x86_ops.mem_enc_unregister_region) 6946 goto out; 6947 6948 r = static_call(kvm_x86_mem_enc_unregister_region)(kvm, ®ion); 6949 break; 6950 } 6951 case KVM_HYPERV_EVENTFD: { 6952 struct kvm_hyperv_eventfd hvevfd; 6953 6954 r = -EFAULT; 6955 if (copy_from_user(&hvevfd, argp, sizeof(hvevfd))) 6956 goto out; 6957 r = kvm_vm_ioctl_hv_eventfd(kvm, &hvevfd); 6958 break; 6959 } 6960 case KVM_SET_PMU_EVENT_FILTER: 6961 r = kvm_vm_ioctl_set_pmu_event_filter(kvm, argp); 6962 break; 6963 case KVM_X86_SET_MSR_FILTER: { 6964 struct kvm_msr_filter __user *user_msr_filter = argp; 6965 struct kvm_msr_filter filter; 6966 6967 if (copy_from_user(&filter, user_msr_filter, sizeof(filter))) 6968 return -EFAULT; 6969 6970 r = kvm_vm_ioctl_set_msr_filter(kvm, &filter); 6971 break; 6972 } 6973 default: 6974 r = -ENOTTY; 6975 } 6976 out: 6977 return r; 6978 } 6979 6980 static void kvm_init_msr_list(void) 6981 { 6982 u32 dummy[2]; 6983 unsigned i; 6984 6985 BUILD_BUG_ON_MSG(KVM_PMC_MAX_FIXED != 3, 6986 "Please update the fixed PMCs in msrs_to_saved_all[]"); 6987 6988 num_msrs_to_save = 0; 6989 num_emulated_msrs = 0; 6990 num_msr_based_features = 0; 6991 6992 for (i = 0; i < ARRAY_SIZE(msrs_to_save_all); i++) { 6993 if (rdmsr_safe(msrs_to_save_all[i], &dummy[0], &dummy[1]) < 0) 6994 continue; 6995 6996 /* 6997 * Even MSRs that are valid in the host may not be exposed 6998 * to the guests in some cases. 6999 */ 7000 switch (msrs_to_save_all[i]) { 7001 case MSR_IA32_BNDCFGS: 7002 if (!kvm_mpx_supported()) 7003 continue; 7004 break; 7005 case MSR_TSC_AUX: 7006 if (!kvm_cpu_cap_has(X86_FEATURE_RDTSCP) && 7007 !kvm_cpu_cap_has(X86_FEATURE_RDPID)) 7008 continue; 7009 break; 7010 case MSR_IA32_UMWAIT_CONTROL: 7011 if (!kvm_cpu_cap_has(X86_FEATURE_WAITPKG)) 7012 continue; 7013 break; 7014 case MSR_IA32_RTIT_CTL: 7015 case MSR_IA32_RTIT_STATUS: 7016 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT)) 7017 continue; 7018 break; 7019 case MSR_IA32_RTIT_CR3_MATCH: 7020 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) || 7021 !intel_pt_validate_hw_cap(PT_CAP_cr3_filtering)) 7022 continue; 7023 break; 7024 case MSR_IA32_RTIT_OUTPUT_BASE: 7025 case MSR_IA32_RTIT_OUTPUT_MASK: 7026 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) || 7027 (!intel_pt_validate_hw_cap(PT_CAP_topa_output) && 7028 !intel_pt_validate_hw_cap(PT_CAP_single_range_output))) 7029 continue; 7030 break; 7031 case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: 7032 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) || 7033 msrs_to_save_all[i] - MSR_IA32_RTIT_ADDR0_A >= 7034 intel_pt_validate_hw_cap(PT_CAP_num_address_ranges) * 2) 7035 continue; 7036 break; 7037 case MSR_ARCH_PERFMON_PERFCTR0 ... MSR_ARCH_PERFMON_PERFCTR_MAX: 7038 if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_PERFCTR0 >= 7039 min(KVM_INTEL_PMC_MAX_GENERIC, kvm_pmu_cap.num_counters_gp)) 7040 continue; 7041 break; 7042 case MSR_ARCH_PERFMON_EVENTSEL0 ... MSR_ARCH_PERFMON_EVENTSEL_MAX: 7043 if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_EVENTSEL0 >= 7044 min(KVM_INTEL_PMC_MAX_GENERIC, kvm_pmu_cap.num_counters_gp)) 7045 continue; 7046 break; 7047 case MSR_IA32_XFD: 7048 case MSR_IA32_XFD_ERR: 7049 if (!kvm_cpu_cap_has(X86_FEATURE_XFD)) 7050 continue; 7051 break; 7052 default: 7053 break; 7054 } 7055 7056 msrs_to_save[num_msrs_to_save++] = msrs_to_save_all[i]; 7057 } 7058 7059 for (i = 0; i < ARRAY_SIZE(emulated_msrs_all); i++) { 7060 if (!static_call(kvm_x86_has_emulated_msr)(NULL, emulated_msrs_all[i])) 7061 continue; 7062 7063 emulated_msrs[num_emulated_msrs++] = emulated_msrs_all[i]; 7064 } 7065 7066 for (i = 0; i < ARRAY_SIZE(msr_based_features_all); i++) { 7067 struct kvm_msr_entry msr; 7068 7069 msr.index = msr_based_features_all[i]; 7070 if (kvm_get_msr_feature(&msr)) 7071 continue; 7072 7073 msr_based_features[num_msr_based_features++] = msr_based_features_all[i]; 7074 } 7075 } 7076 7077 static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len, 7078 const void *v) 7079 { 7080 int handled = 0; 7081 int n; 7082 7083 do { 7084 n = min(len, 8); 7085 if (!(lapic_in_kernel(vcpu) && 7086 !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v)) 7087 && kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, n, v)) 7088 break; 7089 handled += n; 7090 addr += n; 7091 len -= n; 7092 v += n; 7093 } while (len); 7094 7095 return handled; 7096 } 7097 7098 static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v) 7099 { 7100 int handled = 0; 7101 int n; 7102 7103 do { 7104 n = min(len, 8); 7105 if (!(lapic_in_kernel(vcpu) && 7106 !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev, 7107 addr, n, v)) 7108 && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v)) 7109 break; 7110 trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, v); 7111 handled += n; 7112 addr += n; 7113 len -= n; 7114 v += n; 7115 } while (len); 7116 7117 return handled; 7118 } 7119 7120 void kvm_set_segment(struct kvm_vcpu *vcpu, 7121 struct kvm_segment *var, int seg) 7122 { 7123 static_call(kvm_x86_set_segment)(vcpu, var, seg); 7124 } 7125 7126 void kvm_get_segment(struct kvm_vcpu *vcpu, 7127 struct kvm_segment *var, int seg) 7128 { 7129 static_call(kvm_x86_get_segment)(vcpu, var, seg); 7130 } 7131 7132 gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u64 access, 7133 struct x86_exception *exception) 7134 { 7135 struct kvm_mmu *mmu = vcpu->arch.mmu; 7136 gpa_t t_gpa; 7137 7138 BUG_ON(!mmu_is_nested(vcpu)); 7139 7140 /* NPT walks are always user-walks */ 7141 access |= PFERR_USER_MASK; 7142 t_gpa = mmu->gva_to_gpa(vcpu, mmu, gpa, access, exception); 7143 7144 return t_gpa; 7145 } 7146 7147 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, 7148 struct x86_exception *exception) 7149 { 7150 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 7151 7152 u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; 7153 return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); 7154 } 7155 EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_read); 7156 7157 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, 7158 struct x86_exception *exception) 7159 { 7160 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 7161 7162 u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; 7163 access |= PFERR_WRITE_MASK; 7164 return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); 7165 } 7166 EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_write); 7167 7168 /* uses this to access any guest's mapped memory without checking CPL */ 7169 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, 7170 struct x86_exception *exception) 7171 { 7172 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 7173 7174 return mmu->gva_to_gpa(vcpu, mmu, gva, 0, exception); 7175 } 7176 7177 static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, 7178 struct kvm_vcpu *vcpu, u64 access, 7179 struct x86_exception *exception) 7180 { 7181 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 7182 void *data = val; 7183 int r = X86EMUL_CONTINUE; 7184 7185 while (bytes) { 7186 gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access, exception); 7187 unsigned offset = addr & (PAGE_SIZE-1); 7188 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset); 7189 int ret; 7190 7191 if (gpa == INVALID_GPA) 7192 return X86EMUL_PROPAGATE_FAULT; 7193 ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, data, 7194 offset, toread); 7195 if (ret < 0) { 7196 r = X86EMUL_IO_NEEDED; 7197 goto out; 7198 } 7199 7200 bytes -= toread; 7201 data += toread; 7202 addr += toread; 7203 } 7204 out: 7205 return r; 7206 } 7207 7208 /* used for instruction fetching */ 7209 static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt, 7210 gva_t addr, void *val, unsigned int bytes, 7211 struct x86_exception *exception) 7212 { 7213 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7214 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 7215 u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; 7216 unsigned offset; 7217 int ret; 7218 7219 /* Inline kvm_read_guest_virt_helper for speed. */ 7220 gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access|PFERR_FETCH_MASK, 7221 exception); 7222 if (unlikely(gpa == INVALID_GPA)) 7223 return X86EMUL_PROPAGATE_FAULT; 7224 7225 offset = addr & (PAGE_SIZE-1); 7226 if (WARN_ON(offset + bytes > PAGE_SIZE)) 7227 bytes = (unsigned)PAGE_SIZE - offset; 7228 ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, val, 7229 offset, bytes); 7230 if (unlikely(ret < 0)) 7231 return X86EMUL_IO_NEEDED; 7232 7233 return X86EMUL_CONTINUE; 7234 } 7235 7236 int kvm_read_guest_virt(struct kvm_vcpu *vcpu, 7237 gva_t addr, void *val, unsigned int bytes, 7238 struct x86_exception *exception) 7239 { 7240 u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; 7241 7242 /* 7243 * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED 7244 * is returned, but our callers are not ready for that and they blindly 7245 * call kvm_inject_page_fault. Ensure that they at least do not leak 7246 * uninitialized kernel stack memory into cr2 and error code. 7247 */ 7248 memset(exception, 0, sizeof(*exception)); 7249 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, 7250 exception); 7251 } 7252 EXPORT_SYMBOL_GPL(kvm_read_guest_virt); 7253 7254 static int emulator_read_std(struct x86_emulate_ctxt *ctxt, 7255 gva_t addr, void *val, unsigned int bytes, 7256 struct x86_exception *exception, bool system) 7257 { 7258 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7259 u64 access = 0; 7260 7261 if (system) 7262 access |= PFERR_IMPLICIT_ACCESS; 7263 else if (static_call(kvm_x86_get_cpl)(vcpu) == 3) 7264 access |= PFERR_USER_MASK; 7265 7266 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception); 7267 } 7268 7269 static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, 7270 struct kvm_vcpu *vcpu, u64 access, 7271 struct x86_exception *exception) 7272 { 7273 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 7274 void *data = val; 7275 int r = X86EMUL_CONTINUE; 7276 7277 while (bytes) { 7278 gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access, exception); 7279 unsigned offset = addr & (PAGE_SIZE-1); 7280 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset); 7281 int ret; 7282 7283 if (gpa == INVALID_GPA) 7284 return X86EMUL_PROPAGATE_FAULT; 7285 ret = kvm_vcpu_write_guest(vcpu, gpa, data, towrite); 7286 if (ret < 0) { 7287 r = X86EMUL_IO_NEEDED; 7288 goto out; 7289 } 7290 7291 bytes -= towrite; 7292 data += towrite; 7293 addr += towrite; 7294 } 7295 out: 7296 return r; 7297 } 7298 7299 static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, 7300 unsigned int bytes, struct x86_exception *exception, 7301 bool system) 7302 { 7303 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7304 u64 access = PFERR_WRITE_MASK; 7305 7306 if (system) 7307 access |= PFERR_IMPLICIT_ACCESS; 7308 else if (static_call(kvm_x86_get_cpl)(vcpu) == 3) 7309 access |= PFERR_USER_MASK; 7310 7311 return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, 7312 access, exception); 7313 } 7314 7315 int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val, 7316 unsigned int bytes, struct x86_exception *exception) 7317 { 7318 /* kvm_write_guest_virt_system can pull in tons of pages. */ 7319 vcpu->arch.l1tf_flush_l1d = true; 7320 7321 return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, 7322 PFERR_WRITE_MASK, exception); 7323 } 7324 EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system); 7325 7326 static int kvm_can_emulate_insn(struct kvm_vcpu *vcpu, int emul_type, 7327 void *insn, int insn_len) 7328 { 7329 return static_call(kvm_x86_can_emulate_instruction)(vcpu, emul_type, 7330 insn, insn_len); 7331 } 7332 7333 int handle_ud(struct kvm_vcpu *vcpu) 7334 { 7335 static const char kvm_emulate_prefix[] = { __KVM_EMULATE_PREFIX }; 7336 int fep_flags = READ_ONCE(force_emulation_prefix); 7337 int emul_type = EMULTYPE_TRAP_UD; 7338 char sig[5]; /* ud2; .ascii "kvm" */ 7339 struct x86_exception e; 7340 7341 if (unlikely(!kvm_can_emulate_insn(vcpu, emul_type, NULL, 0))) 7342 return 1; 7343 7344 if (fep_flags && 7345 kvm_read_guest_virt(vcpu, kvm_get_linear_rip(vcpu), 7346 sig, sizeof(sig), &e) == 0 && 7347 memcmp(sig, kvm_emulate_prefix, sizeof(sig)) == 0) { 7348 if (fep_flags & KVM_FEP_CLEAR_RFLAGS_RF) 7349 kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) & ~X86_EFLAGS_RF); 7350 kvm_rip_write(vcpu, kvm_rip_read(vcpu) + sizeof(sig)); 7351 emul_type = EMULTYPE_TRAP_UD_FORCED; 7352 } 7353 7354 return kvm_emulate_instruction(vcpu, emul_type); 7355 } 7356 EXPORT_SYMBOL_GPL(handle_ud); 7357 7358 static int vcpu_is_mmio_gpa(struct kvm_vcpu *vcpu, unsigned long gva, 7359 gpa_t gpa, bool write) 7360 { 7361 /* For APIC access vmexit */ 7362 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) 7363 return 1; 7364 7365 if (vcpu_match_mmio_gpa(vcpu, gpa)) { 7366 trace_vcpu_match_mmio(gva, gpa, write, true); 7367 return 1; 7368 } 7369 7370 return 0; 7371 } 7372 7373 static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, 7374 gpa_t *gpa, struct x86_exception *exception, 7375 bool write) 7376 { 7377 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 7378 u64 access = ((static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0) 7379 | (write ? PFERR_WRITE_MASK : 0); 7380 7381 /* 7382 * currently PKRU is only applied to ept enabled guest so 7383 * there is no pkey in EPT page table for L1 guest or EPT 7384 * shadow page table for L2 guest. 7385 */ 7386 if (vcpu_match_mmio_gva(vcpu, gva) && (!is_paging(vcpu) || 7387 !permission_fault(vcpu, vcpu->arch.walk_mmu, 7388 vcpu->arch.mmio_access, 0, access))) { 7389 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT | 7390 (gva & (PAGE_SIZE - 1)); 7391 trace_vcpu_match_mmio(gva, *gpa, write, false); 7392 return 1; 7393 } 7394 7395 *gpa = mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); 7396 7397 if (*gpa == INVALID_GPA) 7398 return -1; 7399 7400 return vcpu_is_mmio_gpa(vcpu, gva, *gpa, write); 7401 } 7402 7403 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, 7404 const void *val, int bytes) 7405 { 7406 int ret; 7407 7408 ret = kvm_vcpu_write_guest(vcpu, gpa, val, bytes); 7409 if (ret < 0) 7410 return 0; 7411 kvm_page_track_write(vcpu, gpa, val, bytes); 7412 return 1; 7413 } 7414 7415 struct read_write_emulator_ops { 7416 int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val, 7417 int bytes); 7418 int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa, 7419 void *val, int bytes); 7420 int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa, 7421 int bytes, void *val); 7422 int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa, 7423 void *val, int bytes); 7424 bool write; 7425 }; 7426 7427 static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes) 7428 { 7429 if (vcpu->mmio_read_completed) { 7430 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, 7431 vcpu->mmio_fragments[0].gpa, val); 7432 vcpu->mmio_read_completed = 0; 7433 return 1; 7434 } 7435 7436 return 0; 7437 } 7438 7439 static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, 7440 void *val, int bytes) 7441 { 7442 return !kvm_vcpu_read_guest(vcpu, gpa, val, bytes); 7443 } 7444 7445 static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, 7446 void *val, int bytes) 7447 { 7448 return emulator_write_phys(vcpu, gpa, val, bytes); 7449 } 7450 7451 static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val) 7452 { 7453 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, val); 7454 return vcpu_mmio_write(vcpu, gpa, bytes, val); 7455 } 7456 7457 static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, 7458 void *val, int bytes) 7459 { 7460 trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, NULL); 7461 return X86EMUL_IO_NEEDED; 7462 } 7463 7464 static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, 7465 void *val, int bytes) 7466 { 7467 struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0]; 7468 7469 memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); 7470 return X86EMUL_CONTINUE; 7471 } 7472 7473 static const struct read_write_emulator_ops read_emultor = { 7474 .read_write_prepare = read_prepare, 7475 .read_write_emulate = read_emulate, 7476 .read_write_mmio = vcpu_mmio_read, 7477 .read_write_exit_mmio = read_exit_mmio, 7478 }; 7479 7480 static const struct read_write_emulator_ops write_emultor = { 7481 .read_write_emulate = write_emulate, 7482 .read_write_mmio = write_mmio, 7483 .read_write_exit_mmio = write_exit_mmio, 7484 .write = true, 7485 }; 7486 7487 static int emulator_read_write_onepage(unsigned long addr, void *val, 7488 unsigned int bytes, 7489 struct x86_exception *exception, 7490 struct kvm_vcpu *vcpu, 7491 const struct read_write_emulator_ops *ops) 7492 { 7493 gpa_t gpa; 7494 int handled, ret; 7495 bool write = ops->write; 7496 struct kvm_mmio_fragment *frag; 7497 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 7498 7499 /* 7500 * If the exit was due to a NPF we may already have a GPA. 7501 * If the GPA is present, use it to avoid the GVA to GPA table walk. 7502 * Note, this cannot be used on string operations since string 7503 * operation using rep will only have the initial GPA from the NPF 7504 * occurred. 7505 */ 7506 if (ctxt->gpa_available && emulator_can_use_gpa(ctxt) && 7507 (addr & ~PAGE_MASK) == (ctxt->gpa_val & ~PAGE_MASK)) { 7508 gpa = ctxt->gpa_val; 7509 ret = vcpu_is_mmio_gpa(vcpu, addr, gpa, write); 7510 } else { 7511 ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write); 7512 if (ret < 0) 7513 return X86EMUL_PROPAGATE_FAULT; 7514 } 7515 7516 if (!ret && ops->read_write_emulate(vcpu, gpa, val, bytes)) 7517 return X86EMUL_CONTINUE; 7518 7519 /* 7520 * Is this MMIO handled locally? 7521 */ 7522 handled = ops->read_write_mmio(vcpu, gpa, bytes, val); 7523 if (handled == bytes) 7524 return X86EMUL_CONTINUE; 7525 7526 gpa += handled; 7527 bytes -= handled; 7528 val += handled; 7529 7530 WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS); 7531 frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++]; 7532 frag->gpa = gpa; 7533 frag->data = val; 7534 frag->len = bytes; 7535 return X86EMUL_CONTINUE; 7536 } 7537 7538 static int emulator_read_write(struct x86_emulate_ctxt *ctxt, 7539 unsigned long addr, 7540 void *val, unsigned int bytes, 7541 struct x86_exception *exception, 7542 const struct read_write_emulator_ops *ops) 7543 { 7544 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7545 gpa_t gpa; 7546 int rc; 7547 7548 if (ops->read_write_prepare && 7549 ops->read_write_prepare(vcpu, val, bytes)) 7550 return X86EMUL_CONTINUE; 7551 7552 vcpu->mmio_nr_fragments = 0; 7553 7554 /* Crossing a page boundary? */ 7555 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) { 7556 int now; 7557 7558 now = -addr & ~PAGE_MASK; 7559 rc = emulator_read_write_onepage(addr, val, now, exception, 7560 vcpu, ops); 7561 7562 if (rc != X86EMUL_CONTINUE) 7563 return rc; 7564 addr += now; 7565 if (ctxt->mode != X86EMUL_MODE_PROT64) 7566 addr = (u32)addr; 7567 val += now; 7568 bytes -= now; 7569 } 7570 7571 rc = emulator_read_write_onepage(addr, val, bytes, exception, 7572 vcpu, ops); 7573 if (rc != X86EMUL_CONTINUE) 7574 return rc; 7575 7576 if (!vcpu->mmio_nr_fragments) 7577 return rc; 7578 7579 gpa = vcpu->mmio_fragments[0].gpa; 7580 7581 vcpu->mmio_needed = 1; 7582 vcpu->mmio_cur_fragment = 0; 7583 7584 vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len); 7585 vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write; 7586 vcpu->run->exit_reason = KVM_EXIT_MMIO; 7587 vcpu->run->mmio.phys_addr = gpa; 7588 7589 return ops->read_write_exit_mmio(vcpu, gpa, val, bytes); 7590 } 7591 7592 static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt, 7593 unsigned long addr, 7594 void *val, 7595 unsigned int bytes, 7596 struct x86_exception *exception) 7597 { 7598 return emulator_read_write(ctxt, addr, val, bytes, 7599 exception, &read_emultor); 7600 } 7601 7602 static int emulator_write_emulated(struct x86_emulate_ctxt *ctxt, 7603 unsigned long addr, 7604 const void *val, 7605 unsigned int bytes, 7606 struct x86_exception *exception) 7607 { 7608 return emulator_read_write(ctxt, addr, (void *)val, bytes, 7609 exception, &write_emultor); 7610 } 7611 7612 #define emulator_try_cmpxchg_user(t, ptr, old, new) \ 7613 (__try_cmpxchg_user((t __user *)(ptr), (t *)(old), *(t *)(new), efault ## t)) 7614 7615 static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt, 7616 unsigned long addr, 7617 const void *old, 7618 const void *new, 7619 unsigned int bytes, 7620 struct x86_exception *exception) 7621 { 7622 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7623 u64 page_line_mask; 7624 unsigned long hva; 7625 gpa_t gpa; 7626 int r; 7627 7628 /* guests cmpxchg8b have to be emulated atomically */ 7629 if (bytes > 8 || (bytes & (bytes - 1))) 7630 goto emul_write; 7631 7632 gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL); 7633 7634 if (gpa == INVALID_GPA || 7635 (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) 7636 goto emul_write; 7637 7638 /* 7639 * Emulate the atomic as a straight write to avoid #AC if SLD is 7640 * enabled in the host and the access splits a cache line. 7641 */ 7642 if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) 7643 page_line_mask = ~(cache_line_size() - 1); 7644 else 7645 page_line_mask = PAGE_MASK; 7646 7647 if (((gpa + bytes - 1) & page_line_mask) != (gpa & page_line_mask)) 7648 goto emul_write; 7649 7650 hva = kvm_vcpu_gfn_to_hva(vcpu, gpa_to_gfn(gpa)); 7651 if (kvm_is_error_hva(hva)) 7652 goto emul_write; 7653 7654 hva += offset_in_page(gpa); 7655 7656 switch (bytes) { 7657 case 1: 7658 r = emulator_try_cmpxchg_user(u8, hva, old, new); 7659 break; 7660 case 2: 7661 r = emulator_try_cmpxchg_user(u16, hva, old, new); 7662 break; 7663 case 4: 7664 r = emulator_try_cmpxchg_user(u32, hva, old, new); 7665 break; 7666 case 8: 7667 r = emulator_try_cmpxchg_user(u64, hva, old, new); 7668 break; 7669 default: 7670 BUG(); 7671 } 7672 7673 if (r < 0) 7674 return X86EMUL_UNHANDLEABLE; 7675 if (r) 7676 return X86EMUL_CMPXCHG_FAILED; 7677 7678 kvm_page_track_write(vcpu, gpa, new, bytes); 7679 7680 return X86EMUL_CONTINUE; 7681 7682 emul_write: 7683 printk_once(KERN_WARNING "kvm: emulating exchange as write\n"); 7684 7685 return emulator_write_emulated(ctxt, addr, new, bytes, exception); 7686 } 7687 7688 static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size, 7689 unsigned short port, void *data, 7690 unsigned int count, bool in) 7691 { 7692 unsigned i; 7693 int r; 7694 7695 WARN_ON_ONCE(vcpu->arch.pio.count); 7696 for (i = 0; i < count; i++) { 7697 if (in) 7698 r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, port, size, data); 7699 else 7700 r = kvm_io_bus_write(vcpu, KVM_PIO_BUS, port, size, data); 7701 7702 if (r) { 7703 if (i == 0) 7704 goto userspace_io; 7705 7706 /* 7707 * Userspace must have unregistered the device while PIO 7708 * was running. Drop writes / read as 0. 7709 */ 7710 if (in) 7711 memset(data, 0, size * (count - i)); 7712 break; 7713 } 7714 7715 data += size; 7716 } 7717 return 1; 7718 7719 userspace_io: 7720 vcpu->arch.pio.port = port; 7721 vcpu->arch.pio.in = in; 7722 vcpu->arch.pio.count = count; 7723 vcpu->arch.pio.size = size; 7724 7725 if (in) 7726 memset(vcpu->arch.pio_data, 0, size * count); 7727 else 7728 memcpy(vcpu->arch.pio_data, data, size * count); 7729 7730 vcpu->run->exit_reason = KVM_EXIT_IO; 7731 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; 7732 vcpu->run->io.size = size; 7733 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE; 7734 vcpu->run->io.count = count; 7735 vcpu->run->io.port = port; 7736 return 0; 7737 } 7738 7739 static int emulator_pio_in(struct kvm_vcpu *vcpu, int size, 7740 unsigned short port, void *val, unsigned int count) 7741 { 7742 int r = emulator_pio_in_out(vcpu, size, port, val, count, true); 7743 if (r) 7744 trace_kvm_pio(KVM_PIO_IN, port, size, count, val); 7745 7746 return r; 7747 } 7748 7749 static void complete_emulator_pio_in(struct kvm_vcpu *vcpu, void *val) 7750 { 7751 int size = vcpu->arch.pio.size; 7752 unsigned int count = vcpu->arch.pio.count; 7753 memcpy(val, vcpu->arch.pio_data, size * count); 7754 trace_kvm_pio(KVM_PIO_IN, vcpu->arch.pio.port, size, count, vcpu->arch.pio_data); 7755 vcpu->arch.pio.count = 0; 7756 } 7757 7758 static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt, 7759 int size, unsigned short port, void *val, 7760 unsigned int count) 7761 { 7762 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7763 if (vcpu->arch.pio.count) { 7764 /* 7765 * Complete a previous iteration that required userspace I/O. 7766 * Note, @count isn't guaranteed to match pio.count as userspace 7767 * can modify ECX before rerunning the vCPU. Ignore any such 7768 * shenanigans as KVM doesn't support modifying the rep count, 7769 * and the emulator ensures @count doesn't overflow the buffer. 7770 */ 7771 complete_emulator_pio_in(vcpu, val); 7772 return 1; 7773 } 7774 7775 return emulator_pio_in(vcpu, size, port, val, count); 7776 } 7777 7778 static int emulator_pio_out(struct kvm_vcpu *vcpu, int size, 7779 unsigned short port, const void *val, 7780 unsigned int count) 7781 { 7782 trace_kvm_pio(KVM_PIO_OUT, port, size, count, val); 7783 return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false); 7784 } 7785 7786 static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt, 7787 int size, unsigned short port, 7788 const void *val, unsigned int count) 7789 { 7790 return emulator_pio_out(emul_to_vcpu(ctxt), size, port, val, count); 7791 } 7792 7793 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg) 7794 { 7795 return static_call(kvm_x86_get_segment_base)(vcpu, seg); 7796 } 7797 7798 static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address) 7799 { 7800 kvm_mmu_invlpg(emul_to_vcpu(ctxt), address); 7801 } 7802 7803 static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu) 7804 { 7805 if (!need_emulate_wbinvd(vcpu)) 7806 return X86EMUL_CONTINUE; 7807 7808 if (static_call(kvm_x86_has_wbinvd_exit)()) { 7809 int cpu = get_cpu(); 7810 7811 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); 7812 on_each_cpu_mask(vcpu->arch.wbinvd_dirty_mask, 7813 wbinvd_ipi, NULL, 1); 7814 put_cpu(); 7815 cpumask_clear(vcpu->arch.wbinvd_dirty_mask); 7816 } else 7817 wbinvd(); 7818 return X86EMUL_CONTINUE; 7819 } 7820 7821 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu) 7822 { 7823 kvm_emulate_wbinvd_noskip(vcpu); 7824 return kvm_skip_emulated_instruction(vcpu); 7825 } 7826 EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd); 7827 7828 7829 7830 static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt) 7831 { 7832 kvm_emulate_wbinvd_noskip(emul_to_vcpu(ctxt)); 7833 } 7834 7835 static void emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, 7836 unsigned long *dest) 7837 { 7838 kvm_get_dr(emul_to_vcpu(ctxt), dr, dest); 7839 } 7840 7841 static int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, 7842 unsigned long value) 7843 { 7844 7845 return kvm_set_dr(emul_to_vcpu(ctxt), dr, value); 7846 } 7847 7848 static u64 mk_cr_64(u64 curr_cr, u32 new_val) 7849 { 7850 return (curr_cr & ~((1ULL << 32) - 1)) | new_val; 7851 } 7852 7853 static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr) 7854 { 7855 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7856 unsigned long value; 7857 7858 switch (cr) { 7859 case 0: 7860 value = kvm_read_cr0(vcpu); 7861 break; 7862 case 2: 7863 value = vcpu->arch.cr2; 7864 break; 7865 case 3: 7866 value = kvm_read_cr3(vcpu); 7867 break; 7868 case 4: 7869 value = kvm_read_cr4(vcpu); 7870 break; 7871 case 8: 7872 value = kvm_get_cr8(vcpu); 7873 break; 7874 default: 7875 kvm_err("%s: unexpected cr %u\n", __func__, cr); 7876 return 0; 7877 } 7878 7879 return value; 7880 } 7881 7882 static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val) 7883 { 7884 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7885 int res = 0; 7886 7887 switch (cr) { 7888 case 0: 7889 res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val)); 7890 break; 7891 case 2: 7892 vcpu->arch.cr2 = val; 7893 break; 7894 case 3: 7895 res = kvm_set_cr3(vcpu, val); 7896 break; 7897 case 4: 7898 res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val)); 7899 break; 7900 case 8: 7901 res = kvm_set_cr8(vcpu, val); 7902 break; 7903 default: 7904 kvm_err("%s: unexpected cr %u\n", __func__, cr); 7905 res = -1; 7906 } 7907 7908 return res; 7909 } 7910 7911 static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt) 7912 { 7913 return static_call(kvm_x86_get_cpl)(emul_to_vcpu(ctxt)); 7914 } 7915 7916 static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) 7917 { 7918 static_call(kvm_x86_get_gdt)(emul_to_vcpu(ctxt), dt); 7919 } 7920 7921 static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) 7922 { 7923 static_call(kvm_x86_get_idt)(emul_to_vcpu(ctxt), dt); 7924 } 7925 7926 static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) 7927 { 7928 static_call(kvm_x86_set_gdt)(emul_to_vcpu(ctxt), dt); 7929 } 7930 7931 static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) 7932 { 7933 static_call(kvm_x86_set_idt)(emul_to_vcpu(ctxt), dt); 7934 } 7935 7936 static unsigned long emulator_get_cached_segment_base( 7937 struct x86_emulate_ctxt *ctxt, int seg) 7938 { 7939 return get_segment_base(emul_to_vcpu(ctxt), seg); 7940 } 7941 7942 static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector, 7943 struct desc_struct *desc, u32 *base3, 7944 int seg) 7945 { 7946 struct kvm_segment var; 7947 7948 kvm_get_segment(emul_to_vcpu(ctxt), &var, seg); 7949 *selector = var.selector; 7950 7951 if (var.unusable) { 7952 memset(desc, 0, sizeof(*desc)); 7953 if (base3) 7954 *base3 = 0; 7955 return false; 7956 } 7957 7958 if (var.g) 7959 var.limit >>= 12; 7960 set_desc_limit(desc, var.limit); 7961 set_desc_base(desc, (unsigned long)var.base); 7962 #ifdef CONFIG_X86_64 7963 if (base3) 7964 *base3 = var.base >> 32; 7965 #endif 7966 desc->type = var.type; 7967 desc->s = var.s; 7968 desc->dpl = var.dpl; 7969 desc->p = var.present; 7970 desc->avl = var.avl; 7971 desc->l = var.l; 7972 desc->d = var.db; 7973 desc->g = var.g; 7974 7975 return true; 7976 } 7977 7978 static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector, 7979 struct desc_struct *desc, u32 base3, 7980 int seg) 7981 { 7982 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7983 struct kvm_segment var; 7984 7985 var.selector = selector; 7986 var.base = get_desc_base(desc); 7987 #ifdef CONFIG_X86_64 7988 var.base |= ((u64)base3) << 32; 7989 #endif 7990 var.limit = get_desc_limit(desc); 7991 if (desc->g) 7992 var.limit = (var.limit << 12) | 0xfff; 7993 var.type = desc->type; 7994 var.dpl = desc->dpl; 7995 var.db = desc->d; 7996 var.s = desc->s; 7997 var.l = desc->l; 7998 var.g = desc->g; 7999 var.avl = desc->avl; 8000 var.present = desc->p; 8001 var.unusable = !var.present; 8002 var.padding = 0; 8003 8004 kvm_set_segment(vcpu, &var, seg); 8005 return; 8006 } 8007 8008 static int emulator_get_msr_with_filter(struct x86_emulate_ctxt *ctxt, 8009 u32 msr_index, u64 *pdata) 8010 { 8011 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 8012 int r; 8013 8014 r = kvm_get_msr_with_filter(vcpu, msr_index, pdata); 8015 if (r < 0) 8016 return X86EMUL_UNHANDLEABLE; 8017 8018 if (r) { 8019 if (kvm_msr_user_space(vcpu, msr_index, KVM_EXIT_X86_RDMSR, 0, 8020 complete_emulated_rdmsr, r)) 8021 return X86EMUL_IO_NEEDED; 8022 8023 trace_kvm_msr_read_ex(msr_index); 8024 return X86EMUL_PROPAGATE_FAULT; 8025 } 8026 8027 trace_kvm_msr_read(msr_index, *pdata); 8028 return X86EMUL_CONTINUE; 8029 } 8030 8031 static int emulator_set_msr_with_filter(struct x86_emulate_ctxt *ctxt, 8032 u32 msr_index, u64 data) 8033 { 8034 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 8035 int r; 8036 8037 r = kvm_set_msr_with_filter(vcpu, msr_index, data); 8038 if (r < 0) 8039 return X86EMUL_UNHANDLEABLE; 8040 8041 if (r) { 8042 if (kvm_msr_user_space(vcpu, msr_index, KVM_EXIT_X86_WRMSR, data, 8043 complete_emulated_msr_access, r)) 8044 return X86EMUL_IO_NEEDED; 8045 8046 trace_kvm_msr_write_ex(msr_index, data); 8047 return X86EMUL_PROPAGATE_FAULT; 8048 } 8049 8050 trace_kvm_msr_write(msr_index, data); 8051 return X86EMUL_CONTINUE; 8052 } 8053 8054 static int emulator_get_msr(struct x86_emulate_ctxt *ctxt, 8055 u32 msr_index, u64 *pdata) 8056 { 8057 return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata); 8058 } 8059 8060 static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt, 8061 u32 pmc) 8062 { 8063 if (kvm_pmu_is_valid_rdpmc_ecx(emul_to_vcpu(ctxt), pmc)) 8064 return 0; 8065 return -EINVAL; 8066 } 8067 8068 static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt, 8069 u32 pmc, u64 *pdata) 8070 { 8071 return kvm_pmu_rdpmc(emul_to_vcpu(ctxt), pmc, pdata); 8072 } 8073 8074 static void emulator_halt(struct x86_emulate_ctxt *ctxt) 8075 { 8076 emul_to_vcpu(ctxt)->arch.halt_request = 1; 8077 } 8078 8079 static int emulator_intercept(struct x86_emulate_ctxt *ctxt, 8080 struct x86_instruction_info *info, 8081 enum x86_intercept_stage stage) 8082 { 8083 return static_call(kvm_x86_check_intercept)(emul_to_vcpu(ctxt), info, stage, 8084 &ctxt->exception); 8085 } 8086 8087 static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt, 8088 u32 *eax, u32 *ebx, u32 *ecx, u32 *edx, 8089 bool exact_only) 8090 { 8091 return kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx, exact_only); 8092 } 8093 8094 static bool emulator_guest_has_long_mode(struct x86_emulate_ctxt *ctxt) 8095 { 8096 return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_LM); 8097 } 8098 8099 static bool emulator_guest_has_movbe(struct x86_emulate_ctxt *ctxt) 8100 { 8101 return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_MOVBE); 8102 } 8103 8104 static bool emulator_guest_has_fxsr(struct x86_emulate_ctxt *ctxt) 8105 { 8106 return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_FXSR); 8107 } 8108 8109 static bool emulator_guest_has_rdpid(struct x86_emulate_ctxt *ctxt) 8110 { 8111 return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_RDPID); 8112 } 8113 8114 static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg) 8115 { 8116 return kvm_register_read_raw(emul_to_vcpu(ctxt), reg); 8117 } 8118 8119 static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val) 8120 { 8121 kvm_register_write_raw(emul_to_vcpu(ctxt), reg, val); 8122 } 8123 8124 static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked) 8125 { 8126 static_call(kvm_x86_set_nmi_mask)(emul_to_vcpu(ctxt), masked); 8127 } 8128 8129 static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt) 8130 { 8131 return emul_to_vcpu(ctxt)->arch.hflags; 8132 } 8133 8134 #ifndef CONFIG_KVM_SMM 8135 static int emulator_leave_smm(struct x86_emulate_ctxt *ctxt) 8136 { 8137 WARN_ON_ONCE(1); 8138 return X86EMUL_UNHANDLEABLE; 8139 } 8140 #endif 8141 8142 static void emulator_triple_fault(struct x86_emulate_ctxt *ctxt) 8143 { 8144 kvm_make_request(KVM_REQ_TRIPLE_FAULT, emul_to_vcpu(ctxt)); 8145 } 8146 8147 static int emulator_set_xcr(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr) 8148 { 8149 return __kvm_set_xcr(emul_to_vcpu(ctxt), index, xcr); 8150 } 8151 8152 static void emulator_vm_bugged(struct x86_emulate_ctxt *ctxt) 8153 { 8154 struct kvm *kvm = emul_to_vcpu(ctxt)->kvm; 8155 8156 if (!kvm->vm_bugged) 8157 kvm_vm_bugged(kvm); 8158 } 8159 8160 static const struct x86_emulate_ops emulate_ops = { 8161 .vm_bugged = emulator_vm_bugged, 8162 .read_gpr = emulator_read_gpr, 8163 .write_gpr = emulator_write_gpr, 8164 .read_std = emulator_read_std, 8165 .write_std = emulator_write_std, 8166 .fetch = kvm_fetch_guest_virt, 8167 .read_emulated = emulator_read_emulated, 8168 .write_emulated = emulator_write_emulated, 8169 .cmpxchg_emulated = emulator_cmpxchg_emulated, 8170 .invlpg = emulator_invlpg, 8171 .pio_in_emulated = emulator_pio_in_emulated, 8172 .pio_out_emulated = emulator_pio_out_emulated, 8173 .get_segment = emulator_get_segment, 8174 .set_segment = emulator_set_segment, 8175 .get_cached_segment_base = emulator_get_cached_segment_base, 8176 .get_gdt = emulator_get_gdt, 8177 .get_idt = emulator_get_idt, 8178 .set_gdt = emulator_set_gdt, 8179 .set_idt = emulator_set_idt, 8180 .get_cr = emulator_get_cr, 8181 .set_cr = emulator_set_cr, 8182 .cpl = emulator_get_cpl, 8183 .get_dr = emulator_get_dr, 8184 .set_dr = emulator_set_dr, 8185 .set_msr_with_filter = emulator_set_msr_with_filter, 8186 .get_msr_with_filter = emulator_get_msr_with_filter, 8187 .get_msr = emulator_get_msr, 8188 .check_pmc = emulator_check_pmc, 8189 .read_pmc = emulator_read_pmc, 8190 .halt = emulator_halt, 8191 .wbinvd = emulator_wbinvd, 8192 .fix_hypercall = emulator_fix_hypercall, 8193 .intercept = emulator_intercept, 8194 .get_cpuid = emulator_get_cpuid, 8195 .guest_has_long_mode = emulator_guest_has_long_mode, 8196 .guest_has_movbe = emulator_guest_has_movbe, 8197 .guest_has_fxsr = emulator_guest_has_fxsr, 8198 .guest_has_rdpid = emulator_guest_has_rdpid, 8199 .set_nmi_mask = emulator_set_nmi_mask, 8200 .get_hflags = emulator_get_hflags, 8201 .leave_smm = emulator_leave_smm, 8202 .triple_fault = emulator_triple_fault, 8203 .set_xcr = emulator_set_xcr, 8204 }; 8205 8206 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) 8207 { 8208 u32 int_shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu); 8209 /* 8210 * an sti; sti; sequence only disable interrupts for the first 8211 * instruction. So, if the last instruction, be it emulated or 8212 * not, left the system with the INT_STI flag enabled, it 8213 * means that the last instruction is an sti. We should not 8214 * leave the flag on in this case. The same goes for mov ss 8215 */ 8216 if (int_shadow & mask) 8217 mask = 0; 8218 if (unlikely(int_shadow || mask)) { 8219 static_call(kvm_x86_set_interrupt_shadow)(vcpu, mask); 8220 if (!mask) 8221 kvm_make_request(KVM_REQ_EVENT, vcpu); 8222 } 8223 } 8224 8225 static void inject_emulated_exception(struct kvm_vcpu *vcpu) 8226 { 8227 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 8228 8229 if (ctxt->exception.vector == PF_VECTOR) 8230 kvm_inject_emulated_page_fault(vcpu, &ctxt->exception); 8231 else if (ctxt->exception.error_code_valid) 8232 kvm_queue_exception_e(vcpu, ctxt->exception.vector, 8233 ctxt->exception.error_code); 8234 else 8235 kvm_queue_exception(vcpu, ctxt->exception.vector); 8236 } 8237 8238 static struct x86_emulate_ctxt *alloc_emulate_ctxt(struct kvm_vcpu *vcpu) 8239 { 8240 struct x86_emulate_ctxt *ctxt; 8241 8242 ctxt = kmem_cache_zalloc(x86_emulator_cache, GFP_KERNEL_ACCOUNT); 8243 if (!ctxt) { 8244 pr_err("kvm: failed to allocate vcpu's emulator\n"); 8245 return NULL; 8246 } 8247 8248 ctxt->vcpu = vcpu; 8249 ctxt->ops = &emulate_ops; 8250 vcpu->arch.emulate_ctxt = ctxt; 8251 8252 return ctxt; 8253 } 8254 8255 static void init_emulate_ctxt(struct kvm_vcpu *vcpu) 8256 { 8257 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 8258 int cs_db, cs_l; 8259 8260 static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l); 8261 8262 ctxt->gpa_available = false; 8263 ctxt->eflags = kvm_get_rflags(vcpu); 8264 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0; 8265 8266 ctxt->eip = kvm_rip_read(vcpu); 8267 ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : 8268 (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 : 8269 (cs_l && is_long_mode(vcpu)) ? X86EMUL_MODE_PROT64 : 8270 cs_db ? X86EMUL_MODE_PROT32 : 8271 X86EMUL_MODE_PROT16; 8272 BUILD_BUG_ON(HF_GUEST_MASK != X86EMUL_GUEST_MASK); 8273 BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK); 8274 BUILD_BUG_ON(HF_SMM_INSIDE_NMI_MASK != X86EMUL_SMM_INSIDE_NMI_MASK); 8275 8276 ctxt->interruptibility = 0; 8277 ctxt->have_exception = false; 8278 ctxt->exception.vector = -1; 8279 ctxt->perm_ok = false; 8280 8281 init_decode_cache(ctxt); 8282 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; 8283 } 8284 8285 void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip) 8286 { 8287 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 8288 int ret; 8289 8290 init_emulate_ctxt(vcpu); 8291 8292 ctxt->op_bytes = 2; 8293 ctxt->ad_bytes = 2; 8294 ctxt->_eip = ctxt->eip + inc_eip; 8295 ret = emulate_int_real(ctxt, irq); 8296 8297 if (ret != X86EMUL_CONTINUE) { 8298 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 8299 } else { 8300 ctxt->eip = ctxt->_eip; 8301 kvm_rip_write(vcpu, ctxt->eip); 8302 kvm_set_rflags(vcpu, ctxt->eflags); 8303 } 8304 } 8305 EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt); 8306 8307 static void prepare_emulation_failure_exit(struct kvm_vcpu *vcpu, u64 *data, 8308 u8 ndata, u8 *insn_bytes, u8 insn_size) 8309 { 8310 struct kvm_run *run = vcpu->run; 8311 u64 info[5]; 8312 u8 info_start; 8313 8314 /* 8315 * Zero the whole array used to retrieve the exit info, as casting to 8316 * u32 for select entries will leave some chunks uninitialized. 8317 */ 8318 memset(&info, 0, sizeof(info)); 8319 8320 static_call(kvm_x86_get_exit_info)(vcpu, (u32 *)&info[0], &info[1], 8321 &info[2], (u32 *)&info[3], 8322 (u32 *)&info[4]); 8323 8324 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 8325 run->emulation_failure.suberror = KVM_INTERNAL_ERROR_EMULATION; 8326 8327 /* 8328 * There's currently space for 13 entries, but 5 are used for the exit 8329 * reason and info. Restrict to 4 to reduce the maintenance burden 8330 * when expanding kvm_run.emulation_failure in the future. 8331 */ 8332 if (WARN_ON_ONCE(ndata > 4)) 8333 ndata = 4; 8334 8335 /* Always include the flags as a 'data' entry. */ 8336 info_start = 1; 8337 run->emulation_failure.flags = 0; 8338 8339 if (insn_size) { 8340 BUILD_BUG_ON((sizeof(run->emulation_failure.insn_size) + 8341 sizeof(run->emulation_failure.insn_bytes) != 16)); 8342 info_start += 2; 8343 run->emulation_failure.flags |= 8344 KVM_INTERNAL_ERROR_EMULATION_FLAG_INSTRUCTION_BYTES; 8345 run->emulation_failure.insn_size = insn_size; 8346 memset(run->emulation_failure.insn_bytes, 0x90, 8347 sizeof(run->emulation_failure.insn_bytes)); 8348 memcpy(run->emulation_failure.insn_bytes, insn_bytes, insn_size); 8349 } 8350 8351 memcpy(&run->internal.data[info_start], info, sizeof(info)); 8352 memcpy(&run->internal.data[info_start + ARRAY_SIZE(info)], data, 8353 ndata * sizeof(data[0])); 8354 8355 run->emulation_failure.ndata = info_start + ARRAY_SIZE(info) + ndata; 8356 } 8357 8358 static void prepare_emulation_ctxt_failure_exit(struct kvm_vcpu *vcpu) 8359 { 8360 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 8361 8362 prepare_emulation_failure_exit(vcpu, NULL, 0, ctxt->fetch.data, 8363 ctxt->fetch.end - ctxt->fetch.data); 8364 } 8365 8366 void __kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu, u64 *data, 8367 u8 ndata) 8368 { 8369 prepare_emulation_failure_exit(vcpu, data, ndata, NULL, 0); 8370 } 8371 EXPORT_SYMBOL_GPL(__kvm_prepare_emulation_failure_exit); 8372 8373 void kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu) 8374 { 8375 __kvm_prepare_emulation_failure_exit(vcpu, NULL, 0); 8376 } 8377 EXPORT_SYMBOL_GPL(kvm_prepare_emulation_failure_exit); 8378 8379 static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type) 8380 { 8381 struct kvm *kvm = vcpu->kvm; 8382 8383 ++vcpu->stat.insn_emulation_fail; 8384 trace_kvm_emulate_insn_failed(vcpu); 8385 8386 if (emulation_type & EMULTYPE_VMWARE_GP) { 8387 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); 8388 return 1; 8389 } 8390 8391 if (kvm->arch.exit_on_emulation_error || 8392 (emulation_type & EMULTYPE_SKIP)) { 8393 prepare_emulation_ctxt_failure_exit(vcpu); 8394 return 0; 8395 } 8396 8397 kvm_queue_exception(vcpu, UD_VECTOR); 8398 8399 if (!is_guest_mode(vcpu) && static_call(kvm_x86_get_cpl)(vcpu) == 0) { 8400 prepare_emulation_ctxt_failure_exit(vcpu); 8401 return 0; 8402 } 8403 8404 return 1; 8405 } 8406 8407 static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, 8408 bool write_fault_to_shadow_pgtable, 8409 int emulation_type) 8410 { 8411 gpa_t gpa = cr2_or_gpa; 8412 kvm_pfn_t pfn; 8413 8414 if (!(emulation_type & EMULTYPE_ALLOW_RETRY_PF)) 8415 return false; 8416 8417 if (WARN_ON_ONCE(is_guest_mode(vcpu)) || 8418 WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF))) 8419 return false; 8420 8421 if (!vcpu->arch.mmu->root_role.direct) { 8422 /* 8423 * Write permission should be allowed since only 8424 * write access need to be emulated. 8425 */ 8426 gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL); 8427 8428 /* 8429 * If the mapping is invalid in guest, let cpu retry 8430 * it to generate fault. 8431 */ 8432 if (gpa == INVALID_GPA) 8433 return true; 8434 } 8435 8436 /* 8437 * Do not retry the unhandleable instruction if it faults on the 8438 * readonly host memory, otherwise it will goto a infinite loop: 8439 * retry instruction -> write #PF -> emulation fail -> retry 8440 * instruction -> ... 8441 */ 8442 pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa)); 8443 8444 /* 8445 * If the instruction failed on the error pfn, it can not be fixed, 8446 * report the error to userspace. 8447 */ 8448 if (is_error_noslot_pfn(pfn)) 8449 return false; 8450 8451 kvm_release_pfn_clean(pfn); 8452 8453 /* The instructions are well-emulated on direct mmu. */ 8454 if (vcpu->arch.mmu->root_role.direct) { 8455 unsigned int indirect_shadow_pages; 8456 8457 write_lock(&vcpu->kvm->mmu_lock); 8458 indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages; 8459 write_unlock(&vcpu->kvm->mmu_lock); 8460 8461 if (indirect_shadow_pages) 8462 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); 8463 8464 return true; 8465 } 8466 8467 /* 8468 * if emulation was due to access to shadowed page table 8469 * and it failed try to unshadow page and re-enter the 8470 * guest to let CPU execute the instruction. 8471 */ 8472 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); 8473 8474 /* 8475 * If the access faults on its page table, it can not 8476 * be fixed by unprotecting shadow page and it should 8477 * be reported to userspace. 8478 */ 8479 return !write_fault_to_shadow_pgtable; 8480 } 8481 8482 static bool retry_instruction(struct x86_emulate_ctxt *ctxt, 8483 gpa_t cr2_or_gpa, int emulation_type) 8484 { 8485 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 8486 unsigned long last_retry_eip, last_retry_addr, gpa = cr2_or_gpa; 8487 8488 last_retry_eip = vcpu->arch.last_retry_eip; 8489 last_retry_addr = vcpu->arch.last_retry_addr; 8490 8491 /* 8492 * If the emulation is caused by #PF and it is non-page_table 8493 * writing instruction, it means the VM-EXIT is caused by shadow 8494 * page protected, we can zap the shadow page and retry this 8495 * instruction directly. 8496 * 8497 * Note: if the guest uses a non-page-table modifying instruction 8498 * on the PDE that points to the instruction, then we will unmap 8499 * the instruction and go to an infinite loop. So, we cache the 8500 * last retried eip and the last fault address, if we meet the eip 8501 * and the address again, we can break out of the potential infinite 8502 * loop. 8503 */ 8504 vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0; 8505 8506 if (!(emulation_type & EMULTYPE_ALLOW_RETRY_PF)) 8507 return false; 8508 8509 if (WARN_ON_ONCE(is_guest_mode(vcpu)) || 8510 WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF))) 8511 return false; 8512 8513 if (x86_page_table_writing_insn(ctxt)) 8514 return false; 8515 8516 if (ctxt->eip == last_retry_eip && last_retry_addr == cr2_or_gpa) 8517 return false; 8518 8519 vcpu->arch.last_retry_eip = ctxt->eip; 8520 vcpu->arch.last_retry_addr = cr2_or_gpa; 8521 8522 if (!vcpu->arch.mmu->root_role.direct) 8523 gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL); 8524 8525 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); 8526 8527 return true; 8528 } 8529 8530 static int complete_emulated_mmio(struct kvm_vcpu *vcpu); 8531 static int complete_emulated_pio(struct kvm_vcpu *vcpu); 8532 8533 static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7, 8534 unsigned long *db) 8535 { 8536 u32 dr6 = 0; 8537 int i; 8538 u32 enable, rwlen; 8539 8540 enable = dr7; 8541 rwlen = dr7 >> 16; 8542 for (i = 0; i < 4; i++, enable >>= 2, rwlen >>= 4) 8543 if ((enable & 3) && (rwlen & 15) == type && db[i] == addr) 8544 dr6 |= (1 << i); 8545 return dr6; 8546 } 8547 8548 static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu) 8549 { 8550 struct kvm_run *kvm_run = vcpu->run; 8551 8552 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { 8553 kvm_run->debug.arch.dr6 = DR6_BS | DR6_ACTIVE_LOW; 8554 kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu); 8555 kvm_run->debug.arch.exception = DB_VECTOR; 8556 kvm_run->exit_reason = KVM_EXIT_DEBUG; 8557 return 0; 8558 } 8559 kvm_queue_exception_p(vcpu, DB_VECTOR, DR6_BS); 8560 return 1; 8561 } 8562 8563 int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu) 8564 { 8565 unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu); 8566 int r; 8567 8568 r = static_call(kvm_x86_skip_emulated_instruction)(vcpu); 8569 if (unlikely(!r)) 8570 return 0; 8571 8572 kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_INSTRUCTIONS); 8573 8574 /* 8575 * rflags is the old, "raw" value of the flags. The new value has 8576 * not been saved yet. 8577 * 8578 * This is correct even for TF set by the guest, because "the 8579 * processor will not generate this exception after the instruction 8580 * that sets the TF flag". 8581 */ 8582 if (unlikely(rflags & X86_EFLAGS_TF)) 8583 r = kvm_vcpu_do_singlestep(vcpu); 8584 return r; 8585 } 8586 EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction); 8587 8588 static bool kvm_is_code_breakpoint_inhibited(struct kvm_vcpu *vcpu) 8589 { 8590 u32 shadow; 8591 8592 if (kvm_get_rflags(vcpu) & X86_EFLAGS_RF) 8593 return true; 8594 8595 /* 8596 * Intel CPUs inhibit code #DBs when MOV/POP SS blocking is active, 8597 * but AMD CPUs do not. MOV/POP SS blocking is rare, check that first 8598 * to avoid the relatively expensive CPUID lookup. 8599 */ 8600 shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu); 8601 return (shadow & KVM_X86_SHADOW_INT_MOV_SS) && 8602 guest_cpuid_is_intel(vcpu); 8603 } 8604 8605 static bool kvm_vcpu_check_code_breakpoint(struct kvm_vcpu *vcpu, 8606 int emulation_type, int *r) 8607 { 8608 WARN_ON_ONCE(emulation_type & EMULTYPE_NO_DECODE); 8609 8610 /* 8611 * Do not check for code breakpoints if hardware has already done the 8612 * checks, as inferred from the emulation type. On NO_DECODE and SKIP, 8613 * the instruction has passed all exception checks, and all intercepted 8614 * exceptions that trigger emulation have lower priority than code 8615 * breakpoints, i.e. the fact that the intercepted exception occurred 8616 * means any code breakpoints have already been serviced. 8617 * 8618 * Note, KVM needs to check for code #DBs on EMULTYPE_TRAP_UD_FORCED as 8619 * hardware has checked the RIP of the magic prefix, but not the RIP of 8620 * the instruction being emulated. The intent of forced emulation is 8621 * to behave as if KVM intercepted the instruction without an exception 8622 * and without a prefix. 8623 */ 8624 if (emulation_type & (EMULTYPE_NO_DECODE | EMULTYPE_SKIP | 8625 EMULTYPE_TRAP_UD | EMULTYPE_VMWARE_GP | EMULTYPE_PF)) 8626 return false; 8627 8628 if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) && 8629 (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) { 8630 struct kvm_run *kvm_run = vcpu->run; 8631 unsigned long eip = kvm_get_linear_rip(vcpu); 8632 u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0, 8633 vcpu->arch.guest_debug_dr7, 8634 vcpu->arch.eff_db); 8635 8636 if (dr6 != 0) { 8637 kvm_run->debug.arch.dr6 = dr6 | DR6_ACTIVE_LOW; 8638 kvm_run->debug.arch.pc = eip; 8639 kvm_run->debug.arch.exception = DB_VECTOR; 8640 kvm_run->exit_reason = KVM_EXIT_DEBUG; 8641 *r = 0; 8642 return true; 8643 } 8644 } 8645 8646 if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) && 8647 !kvm_is_code_breakpoint_inhibited(vcpu)) { 8648 unsigned long eip = kvm_get_linear_rip(vcpu); 8649 u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0, 8650 vcpu->arch.dr7, 8651 vcpu->arch.db); 8652 8653 if (dr6 != 0) { 8654 kvm_queue_exception_p(vcpu, DB_VECTOR, dr6); 8655 *r = 1; 8656 return true; 8657 } 8658 } 8659 8660 return false; 8661 } 8662 8663 static bool is_vmware_backdoor_opcode(struct x86_emulate_ctxt *ctxt) 8664 { 8665 switch (ctxt->opcode_len) { 8666 case 1: 8667 switch (ctxt->b) { 8668 case 0xe4: /* IN */ 8669 case 0xe5: 8670 case 0xec: 8671 case 0xed: 8672 case 0xe6: /* OUT */ 8673 case 0xe7: 8674 case 0xee: 8675 case 0xef: 8676 case 0x6c: /* INS */ 8677 case 0x6d: 8678 case 0x6e: /* OUTS */ 8679 case 0x6f: 8680 return true; 8681 } 8682 break; 8683 case 2: 8684 switch (ctxt->b) { 8685 case 0x33: /* RDPMC */ 8686 return true; 8687 } 8688 break; 8689 } 8690 8691 return false; 8692 } 8693 8694 /* 8695 * Decode an instruction for emulation. The caller is responsible for handling 8696 * code breakpoints. Note, manually detecting code breakpoints is unnecessary 8697 * (and wrong) when emulating on an intercepted fault-like exception[*], as 8698 * code breakpoints have higher priority and thus have already been done by 8699 * hardware. 8700 * 8701 * [*] Except #MC, which is higher priority, but KVM should never emulate in 8702 * response to a machine check. 8703 */ 8704 int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type, 8705 void *insn, int insn_len) 8706 { 8707 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 8708 int r; 8709 8710 init_emulate_ctxt(vcpu); 8711 8712 r = x86_decode_insn(ctxt, insn, insn_len, emulation_type); 8713 8714 trace_kvm_emulate_insn_start(vcpu); 8715 ++vcpu->stat.insn_emulation; 8716 8717 return r; 8718 } 8719 EXPORT_SYMBOL_GPL(x86_decode_emulated_instruction); 8720 8721 int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, 8722 int emulation_type, void *insn, int insn_len) 8723 { 8724 int r; 8725 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 8726 bool writeback = true; 8727 bool write_fault_to_spt; 8728 8729 if (unlikely(!kvm_can_emulate_insn(vcpu, emulation_type, insn, insn_len))) 8730 return 1; 8731 8732 vcpu->arch.l1tf_flush_l1d = true; 8733 8734 /* 8735 * Clear write_fault_to_shadow_pgtable here to ensure it is 8736 * never reused. 8737 */ 8738 write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable; 8739 vcpu->arch.write_fault_to_shadow_pgtable = false; 8740 8741 if (!(emulation_type & EMULTYPE_NO_DECODE)) { 8742 kvm_clear_exception_queue(vcpu); 8743 8744 /* 8745 * Return immediately if RIP hits a code breakpoint, such #DBs 8746 * are fault-like and are higher priority than any faults on 8747 * the code fetch itself. 8748 */ 8749 if (kvm_vcpu_check_code_breakpoint(vcpu, emulation_type, &r)) 8750 return r; 8751 8752 r = x86_decode_emulated_instruction(vcpu, emulation_type, 8753 insn, insn_len); 8754 if (r != EMULATION_OK) { 8755 if ((emulation_type & EMULTYPE_TRAP_UD) || 8756 (emulation_type & EMULTYPE_TRAP_UD_FORCED)) { 8757 kvm_queue_exception(vcpu, UD_VECTOR); 8758 return 1; 8759 } 8760 if (reexecute_instruction(vcpu, cr2_or_gpa, 8761 write_fault_to_spt, 8762 emulation_type)) 8763 return 1; 8764 if (ctxt->have_exception) { 8765 /* 8766 * #UD should result in just EMULATION_FAILED, and trap-like 8767 * exception should not be encountered during decode. 8768 */ 8769 WARN_ON_ONCE(ctxt->exception.vector == UD_VECTOR || 8770 exception_type(ctxt->exception.vector) == EXCPT_TRAP); 8771 inject_emulated_exception(vcpu); 8772 return 1; 8773 } 8774 return handle_emulation_failure(vcpu, emulation_type); 8775 } 8776 } 8777 8778 if ((emulation_type & EMULTYPE_VMWARE_GP) && 8779 !is_vmware_backdoor_opcode(ctxt)) { 8780 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); 8781 return 1; 8782 } 8783 8784 /* 8785 * EMULTYPE_SKIP without EMULTYPE_COMPLETE_USER_EXIT is intended for 8786 * use *only* by vendor callbacks for kvm_skip_emulated_instruction(). 8787 * The caller is responsible for updating interruptibility state and 8788 * injecting single-step #DBs. 8789 */ 8790 if (emulation_type & EMULTYPE_SKIP) { 8791 if (ctxt->mode != X86EMUL_MODE_PROT64) 8792 ctxt->eip = (u32)ctxt->_eip; 8793 else 8794 ctxt->eip = ctxt->_eip; 8795 8796 if (emulation_type & EMULTYPE_COMPLETE_USER_EXIT) { 8797 r = 1; 8798 goto writeback; 8799 } 8800 8801 kvm_rip_write(vcpu, ctxt->eip); 8802 if (ctxt->eflags & X86_EFLAGS_RF) 8803 kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF); 8804 return 1; 8805 } 8806 8807 if (retry_instruction(ctxt, cr2_or_gpa, emulation_type)) 8808 return 1; 8809 8810 /* this is needed for vmware backdoor interface to work since it 8811 changes registers values during IO operation */ 8812 if (vcpu->arch.emulate_regs_need_sync_from_vcpu) { 8813 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; 8814 emulator_invalidate_register_cache(ctxt); 8815 } 8816 8817 restart: 8818 if (emulation_type & EMULTYPE_PF) { 8819 /* Save the faulting GPA (cr2) in the address field */ 8820 ctxt->exception.address = cr2_or_gpa; 8821 8822 /* With shadow page tables, cr2 contains a GVA or nGPA. */ 8823 if (vcpu->arch.mmu->root_role.direct) { 8824 ctxt->gpa_available = true; 8825 ctxt->gpa_val = cr2_or_gpa; 8826 } 8827 } else { 8828 /* Sanitize the address out of an abundance of paranoia. */ 8829 ctxt->exception.address = 0; 8830 } 8831 8832 r = x86_emulate_insn(ctxt); 8833 8834 if (r == EMULATION_INTERCEPTED) 8835 return 1; 8836 8837 if (r == EMULATION_FAILED) { 8838 if (reexecute_instruction(vcpu, cr2_or_gpa, write_fault_to_spt, 8839 emulation_type)) 8840 return 1; 8841 8842 return handle_emulation_failure(vcpu, emulation_type); 8843 } 8844 8845 if (ctxt->have_exception) { 8846 r = 1; 8847 inject_emulated_exception(vcpu); 8848 } else if (vcpu->arch.pio.count) { 8849 if (!vcpu->arch.pio.in) { 8850 /* FIXME: return into emulator if single-stepping. */ 8851 vcpu->arch.pio.count = 0; 8852 } else { 8853 writeback = false; 8854 vcpu->arch.complete_userspace_io = complete_emulated_pio; 8855 } 8856 r = 0; 8857 } else if (vcpu->mmio_needed) { 8858 ++vcpu->stat.mmio_exits; 8859 8860 if (!vcpu->mmio_is_write) 8861 writeback = false; 8862 r = 0; 8863 vcpu->arch.complete_userspace_io = complete_emulated_mmio; 8864 } else if (vcpu->arch.complete_userspace_io) { 8865 writeback = false; 8866 r = 0; 8867 } else if (r == EMULATION_RESTART) 8868 goto restart; 8869 else 8870 r = 1; 8871 8872 writeback: 8873 if (writeback) { 8874 unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu); 8875 toggle_interruptibility(vcpu, ctxt->interruptibility); 8876 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; 8877 8878 /* 8879 * Note, EXCPT_DB is assumed to be fault-like as the emulator 8880 * only supports code breakpoints and general detect #DB, both 8881 * of which are fault-like. 8882 */ 8883 if (!ctxt->have_exception || 8884 exception_type(ctxt->exception.vector) == EXCPT_TRAP) { 8885 kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_INSTRUCTIONS); 8886 if (ctxt->is_branch) 8887 kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_BRANCH_INSTRUCTIONS); 8888 kvm_rip_write(vcpu, ctxt->eip); 8889 if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP))) 8890 r = kvm_vcpu_do_singlestep(vcpu); 8891 static_call_cond(kvm_x86_update_emulated_instruction)(vcpu); 8892 __kvm_set_rflags(vcpu, ctxt->eflags); 8893 } 8894 8895 /* 8896 * For STI, interrupts are shadowed; so KVM_REQ_EVENT will 8897 * do nothing, and it will be requested again as soon as 8898 * the shadow expires. But we still need to check here, 8899 * because POPF has no interrupt shadow. 8900 */ 8901 if (unlikely((ctxt->eflags & ~rflags) & X86_EFLAGS_IF)) 8902 kvm_make_request(KVM_REQ_EVENT, vcpu); 8903 } else 8904 vcpu->arch.emulate_regs_need_sync_to_vcpu = true; 8905 8906 return r; 8907 } 8908 8909 int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type) 8910 { 8911 return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0); 8912 } 8913 EXPORT_SYMBOL_GPL(kvm_emulate_instruction); 8914 8915 int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu, 8916 void *insn, int insn_len) 8917 { 8918 return x86_emulate_instruction(vcpu, 0, 0, insn, insn_len); 8919 } 8920 EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer); 8921 8922 static int complete_fast_pio_out_port_0x7e(struct kvm_vcpu *vcpu) 8923 { 8924 vcpu->arch.pio.count = 0; 8925 return 1; 8926 } 8927 8928 static int complete_fast_pio_out(struct kvm_vcpu *vcpu) 8929 { 8930 vcpu->arch.pio.count = 0; 8931 8932 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) 8933 return 1; 8934 8935 return kvm_skip_emulated_instruction(vcpu); 8936 } 8937 8938 static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, 8939 unsigned short port) 8940 { 8941 unsigned long val = kvm_rax_read(vcpu); 8942 int ret = emulator_pio_out(vcpu, size, port, &val, 1); 8943 8944 if (ret) 8945 return ret; 8946 8947 /* 8948 * Workaround userspace that relies on old KVM behavior of %rip being 8949 * incremented prior to exiting to userspace to handle "OUT 0x7e". 8950 */ 8951 if (port == 0x7e && 8952 kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_OUT_7E_INC_RIP)) { 8953 vcpu->arch.complete_userspace_io = 8954 complete_fast_pio_out_port_0x7e; 8955 kvm_skip_emulated_instruction(vcpu); 8956 } else { 8957 vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu); 8958 vcpu->arch.complete_userspace_io = complete_fast_pio_out; 8959 } 8960 return 0; 8961 } 8962 8963 static int complete_fast_pio_in(struct kvm_vcpu *vcpu) 8964 { 8965 unsigned long val; 8966 8967 /* We should only ever be called with arch.pio.count equal to 1 */ 8968 BUG_ON(vcpu->arch.pio.count != 1); 8969 8970 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) { 8971 vcpu->arch.pio.count = 0; 8972 return 1; 8973 } 8974 8975 /* For size less than 4 we merge, else we zero extend */ 8976 val = (vcpu->arch.pio.size < 4) ? kvm_rax_read(vcpu) : 0; 8977 8978 complete_emulator_pio_in(vcpu, &val); 8979 kvm_rax_write(vcpu, val); 8980 8981 return kvm_skip_emulated_instruction(vcpu); 8982 } 8983 8984 static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size, 8985 unsigned short port) 8986 { 8987 unsigned long val; 8988 int ret; 8989 8990 /* For size less than 4 we merge, else we zero extend */ 8991 val = (size < 4) ? kvm_rax_read(vcpu) : 0; 8992 8993 ret = emulator_pio_in(vcpu, size, port, &val, 1); 8994 if (ret) { 8995 kvm_rax_write(vcpu, val); 8996 return ret; 8997 } 8998 8999 vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu); 9000 vcpu->arch.complete_userspace_io = complete_fast_pio_in; 9001 9002 return 0; 9003 } 9004 9005 int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in) 9006 { 9007 int ret; 9008 9009 if (in) 9010 ret = kvm_fast_pio_in(vcpu, size, port); 9011 else 9012 ret = kvm_fast_pio_out(vcpu, size, port); 9013 return ret && kvm_skip_emulated_instruction(vcpu); 9014 } 9015 EXPORT_SYMBOL_GPL(kvm_fast_pio); 9016 9017 static int kvmclock_cpu_down_prep(unsigned int cpu) 9018 { 9019 __this_cpu_write(cpu_tsc_khz, 0); 9020 return 0; 9021 } 9022 9023 static void tsc_khz_changed(void *data) 9024 { 9025 struct cpufreq_freqs *freq = data; 9026 unsigned long khz = 0; 9027 9028 if (data) 9029 khz = freq->new; 9030 else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) 9031 khz = cpufreq_quick_get(raw_smp_processor_id()); 9032 if (!khz) 9033 khz = tsc_khz; 9034 __this_cpu_write(cpu_tsc_khz, khz); 9035 } 9036 9037 #ifdef CONFIG_X86_64 9038 static void kvm_hyperv_tsc_notifier(void) 9039 { 9040 struct kvm *kvm; 9041 int cpu; 9042 9043 mutex_lock(&kvm_lock); 9044 list_for_each_entry(kvm, &vm_list, vm_list) 9045 kvm_make_mclock_inprogress_request(kvm); 9046 9047 /* no guest entries from this point */ 9048 hyperv_stop_tsc_emulation(); 9049 9050 /* TSC frequency always matches when on Hyper-V */ 9051 for_each_present_cpu(cpu) 9052 per_cpu(cpu_tsc_khz, cpu) = tsc_khz; 9053 kvm_caps.max_guest_tsc_khz = tsc_khz; 9054 9055 list_for_each_entry(kvm, &vm_list, vm_list) { 9056 __kvm_start_pvclock_update(kvm); 9057 pvclock_update_vm_gtod_copy(kvm); 9058 kvm_end_pvclock_update(kvm); 9059 } 9060 9061 mutex_unlock(&kvm_lock); 9062 } 9063 #endif 9064 9065 static void __kvmclock_cpufreq_notifier(struct cpufreq_freqs *freq, int cpu) 9066 { 9067 struct kvm *kvm; 9068 struct kvm_vcpu *vcpu; 9069 int send_ipi = 0; 9070 unsigned long i; 9071 9072 /* 9073 * We allow guests to temporarily run on slowing clocks, 9074 * provided we notify them after, or to run on accelerating 9075 * clocks, provided we notify them before. Thus time never 9076 * goes backwards. 9077 * 9078 * However, we have a problem. We can't atomically update 9079 * the frequency of a given CPU from this function; it is 9080 * merely a notifier, which can be called from any CPU. 9081 * Changing the TSC frequency at arbitrary points in time 9082 * requires a recomputation of local variables related to 9083 * the TSC for each VCPU. We must flag these local variables 9084 * to be updated and be sure the update takes place with the 9085 * new frequency before any guests proceed. 9086 * 9087 * Unfortunately, the combination of hotplug CPU and frequency 9088 * change creates an intractable locking scenario; the order 9089 * of when these callouts happen is undefined with respect to 9090 * CPU hotplug, and they can race with each other. As such, 9091 * merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is 9092 * undefined; you can actually have a CPU frequency change take 9093 * place in between the computation of X and the setting of the 9094 * variable. To protect against this problem, all updates of 9095 * the per_cpu tsc_khz variable are done in an interrupt 9096 * protected IPI, and all callers wishing to update the value 9097 * must wait for a synchronous IPI to complete (which is trivial 9098 * if the caller is on the CPU already). This establishes the 9099 * necessary total order on variable updates. 9100 * 9101 * Note that because a guest time update may take place 9102 * anytime after the setting of the VCPU's request bit, the 9103 * correct TSC value must be set before the request. However, 9104 * to ensure the update actually makes it to any guest which 9105 * starts running in hardware virtualization between the set 9106 * and the acquisition of the spinlock, we must also ping the 9107 * CPU after setting the request bit. 9108 * 9109 */ 9110 9111 smp_call_function_single(cpu, tsc_khz_changed, freq, 1); 9112 9113 mutex_lock(&kvm_lock); 9114 list_for_each_entry(kvm, &vm_list, vm_list) { 9115 kvm_for_each_vcpu(i, vcpu, kvm) { 9116 if (vcpu->cpu != cpu) 9117 continue; 9118 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 9119 if (vcpu->cpu != raw_smp_processor_id()) 9120 send_ipi = 1; 9121 } 9122 } 9123 mutex_unlock(&kvm_lock); 9124 9125 if (freq->old < freq->new && send_ipi) { 9126 /* 9127 * We upscale the frequency. Must make the guest 9128 * doesn't see old kvmclock values while running with 9129 * the new frequency, otherwise we risk the guest sees 9130 * time go backwards. 9131 * 9132 * In case we update the frequency for another cpu 9133 * (which might be in guest context) send an interrupt 9134 * to kick the cpu out of guest context. Next time 9135 * guest context is entered kvmclock will be updated, 9136 * so the guest will not see stale values. 9137 */ 9138 smp_call_function_single(cpu, tsc_khz_changed, freq, 1); 9139 } 9140 } 9141 9142 static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val, 9143 void *data) 9144 { 9145 struct cpufreq_freqs *freq = data; 9146 int cpu; 9147 9148 if (val == CPUFREQ_PRECHANGE && freq->old > freq->new) 9149 return 0; 9150 if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new) 9151 return 0; 9152 9153 for_each_cpu(cpu, freq->policy->cpus) 9154 __kvmclock_cpufreq_notifier(freq, cpu); 9155 9156 return 0; 9157 } 9158 9159 static struct notifier_block kvmclock_cpufreq_notifier_block = { 9160 .notifier_call = kvmclock_cpufreq_notifier 9161 }; 9162 9163 static int kvmclock_cpu_online(unsigned int cpu) 9164 { 9165 tsc_khz_changed(NULL); 9166 return 0; 9167 } 9168 9169 static void kvm_timer_init(void) 9170 { 9171 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { 9172 max_tsc_khz = tsc_khz; 9173 9174 if (IS_ENABLED(CONFIG_CPU_FREQ)) { 9175 struct cpufreq_policy *policy; 9176 int cpu; 9177 9178 cpu = get_cpu(); 9179 policy = cpufreq_cpu_get(cpu); 9180 if (policy) { 9181 if (policy->cpuinfo.max_freq) 9182 max_tsc_khz = policy->cpuinfo.max_freq; 9183 cpufreq_cpu_put(policy); 9184 } 9185 put_cpu(); 9186 } 9187 cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block, 9188 CPUFREQ_TRANSITION_NOTIFIER); 9189 } 9190 9191 cpuhp_setup_state(CPUHP_AP_X86_KVM_CLK_ONLINE, "x86/kvm/clk:online", 9192 kvmclock_cpu_online, kvmclock_cpu_down_prep); 9193 } 9194 9195 #ifdef CONFIG_X86_64 9196 static void pvclock_gtod_update_fn(struct work_struct *work) 9197 { 9198 struct kvm *kvm; 9199 struct kvm_vcpu *vcpu; 9200 unsigned long i; 9201 9202 mutex_lock(&kvm_lock); 9203 list_for_each_entry(kvm, &vm_list, vm_list) 9204 kvm_for_each_vcpu(i, vcpu, kvm) 9205 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); 9206 atomic_set(&kvm_guest_has_master_clock, 0); 9207 mutex_unlock(&kvm_lock); 9208 } 9209 9210 static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn); 9211 9212 /* 9213 * Indirection to move queue_work() out of the tk_core.seq write held 9214 * region to prevent possible deadlocks against time accessors which 9215 * are invoked with work related locks held. 9216 */ 9217 static void pvclock_irq_work_fn(struct irq_work *w) 9218 { 9219 queue_work(system_long_wq, &pvclock_gtod_work); 9220 } 9221 9222 static DEFINE_IRQ_WORK(pvclock_irq_work, pvclock_irq_work_fn); 9223 9224 /* 9225 * Notification about pvclock gtod data update. 9226 */ 9227 static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused, 9228 void *priv) 9229 { 9230 struct pvclock_gtod_data *gtod = &pvclock_gtod_data; 9231 struct timekeeper *tk = priv; 9232 9233 update_pvclock_gtod(tk); 9234 9235 /* 9236 * Disable master clock if host does not trust, or does not use, 9237 * TSC based clocksource. Delegate queue_work() to irq_work as 9238 * this is invoked with tk_core.seq write held. 9239 */ 9240 if (!gtod_is_based_on_tsc(gtod->clock.vclock_mode) && 9241 atomic_read(&kvm_guest_has_master_clock) != 0) 9242 irq_work_queue(&pvclock_irq_work); 9243 return 0; 9244 } 9245 9246 static struct notifier_block pvclock_gtod_notifier = { 9247 .notifier_call = pvclock_gtod_notify, 9248 }; 9249 #endif 9250 9251 int kvm_arch_init(void *opaque) 9252 { 9253 struct kvm_x86_init_ops *ops = opaque; 9254 u64 host_pat; 9255 int r; 9256 9257 if (kvm_x86_ops.hardware_enable) { 9258 pr_err("kvm: already loaded vendor module '%s'\n", kvm_x86_ops.name); 9259 return -EEXIST; 9260 } 9261 9262 if (!ops->cpu_has_kvm_support()) { 9263 pr_err_ratelimited("kvm: no hardware support for '%s'\n", 9264 ops->runtime_ops->name); 9265 return -EOPNOTSUPP; 9266 } 9267 if (ops->disabled_by_bios()) { 9268 pr_err_ratelimited("kvm: support for '%s' disabled by bios\n", 9269 ops->runtime_ops->name); 9270 return -EOPNOTSUPP; 9271 } 9272 9273 /* 9274 * KVM explicitly assumes that the guest has an FPU and 9275 * FXSAVE/FXRSTOR. For example, the KVM_GET_FPU explicitly casts the 9276 * vCPU's FPU state as a fxregs_state struct. 9277 */ 9278 if (!boot_cpu_has(X86_FEATURE_FPU) || !boot_cpu_has(X86_FEATURE_FXSR)) { 9279 printk(KERN_ERR "kvm: inadequate fpu\n"); 9280 return -EOPNOTSUPP; 9281 } 9282 9283 if (IS_ENABLED(CONFIG_PREEMPT_RT) && !boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { 9284 pr_err("RT requires X86_FEATURE_CONSTANT_TSC\n"); 9285 return -EOPNOTSUPP; 9286 } 9287 9288 /* 9289 * KVM assumes that PAT entry '0' encodes WB memtype and simply zeroes 9290 * the PAT bits in SPTEs. Bail if PAT[0] is programmed to something 9291 * other than WB. Note, EPT doesn't utilize the PAT, but don't bother 9292 * with an exception. PAT[0] is set to WB on RESET and also by the 9293 * kernel, i.e. failure indicates a kernel bug or broken firmware. 9294 */ 9295 if (rdmsrl_safe(MSR_IA32_CR_PAT, &host_pat) || 9296 (host_pat & GENMASK(2, 0)) != 6) { 9297 pr_err("kvm: host PAT[0] is not WB\n"); 9298 return -EIO; 9299 } 9300 9301 x86_emulator_cache = kvm_alloc_emulator_cache(); 9302 if (!x86_emulator_cache) { 9303 pr_err("kvm: failed to allocate cache for x86 emulator\n"); 9304 return -ENOMEM; 9305 } 9306 9307 user_return_msrs = alloc_percpu(struct kvm_user_return_msrs); 9308 if (!user_return_msrs) { 9309 printk(KERN_ERR "kvm: failed to allocate percpu kvm_user_return_msrs\n"); 9310 r = -ENOMEM; 9311 goto out_free_x86_emulator_cache; 9312 } 9313 kvm_nr_uret_msrs = 0; 9314 9315 r = kvm_mmu_vendor_module_init(); 9316 if (r) 9317 goto out_free_percpu; 9318 9319 kvm_timer_init(); 9320 9321 if (boot_cpu_has(X86_FEATURE_XSAVE)) { 9322 host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK); 9323 kvm_caps.supported_xcr0 = host_xcr0 & KVM_SUPPORTED_XCR0; 9324 } 9325 9326 if (pi_inject_timer == -1) 9327 pi_inject_timer = housekeeping_enabled(HK_TYPE_TIMER); 9328 #ifdef CONFIG_X86_64 9329 pvclock_gtod_register_notifier(&pvclock_gtod_notifier); 9330 9331 if (hypervisor_is_type(X86_HYPER_MS_HYPERV)) 9332 set_hv_tscchange_cb(kvm_hyperv_tsc_notifier); 9333 #endif 9334 9335 return 0; 9336 9337 out_free_percpu: 9338 free_percpu(user_return_msrs); 9339 out_free_x86_emulator_cache: 9340 kmem_cache_destroy(x86_emulator_cache); 9341 return r; 9342 } 9343 9344 void kvm_arch_exit(void) 9345 { 9346 #ifdef CONFIG_X86_64 9347 if (hypervisor_is_type(X86_HYPER_MS_HYPERV)) 9348 clear_hv_tscchange_cb(); 9349 #endif 9350 kvm_lapic_exit(); 9351 9352 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) 9353 cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block, 9354 CPUFREQ_TRANSITION_NOTIFIER); 9355 cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE); 9356 #ifdef CONFIG_X86_64 9357 pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier); 9358 irq_work_sync(&pvclock_irq_work); 9359 cancel_work_sync(&pvclock_gtod_work); 9360 #endif 9361 kvm_x86_ops.hardware_enable = NULL; 9362 kvm_mmu_vendor_module_exit(); 9363 free_percpu(user_return_msrs); 9364 kmem_cache_destroy(x86_emulator_cache); 9365 #ifdef CONFIG_KVM_XEN 9366 static_key_deferred_flush(&kvm_xen_enabled); 9367 WARN_ON(static_branch_unlikely(&kvm_xen_enabled.key)); 9368 #endif 9369 } 9370 9371 static int __kvm_emulate_halt(struct kvm_vcpu *vcpu, int state, int reason) 9372 { 9373 /* 9374 * The vCPU has halted, e.g. executed HLT. Update the run state if the 9375 * local APIC is in-kernel, the run loop will detect the non-runnable 9376 * state and halt the vCPU. Exit to userspace if the local APIC is 9377 * managed by userspace, in which case userspace is responsible for 9378 * handling wake events. 9379 */ 9380 ++vcpu->stat.halt_exits; 9381 if (lapic_in_kernel(vcpu)) { 9382 vcpu->arch.mp_state = state; 9383 return 1; 9384 } else { 9385 vcpu->run->exit_reason = reason; 9386 return 0; 9387 } 9388 } 9389 9390 int kvm_emulate_halt_noskip(struct kvm_vcpu *vcpu) 9391 { 9392 return __kvm_emulate_halt(vcpu, KVM_MP_STATE_HALTED, KVM_EXIT_HLT); 9393 } 9394 EXPORT_SYMBOL_GPL(kvm_emulate_halt_noskip); 9395 9396 int kvm_emulate_halt(struct kvm_vcpu *vcpu) 9397 { 9398 int ret = kvm_skip_emulated_instruction(vcpu); 9399 /* 9400 * TODO: we might be squashing a GUESTDBG_SINGLESTEP-triggered 9401 * KVM_EXIT_DEBUG here. 9402 */ 9403 return kvm_emulate_halt_noskip(vcpu) && ret; 9404 } 9405 EXPORT_SYMBOL_GPL(kvm_emulate_halt); 9406 9407 int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu) 9408 { 9409 int ret = kvm_skip_emulated_instruction(vcpu); 9410 9411 return __kvm_emulate_halt(vcpu, KVM_MP_STATE_AP_RESET_HOLD, 9412 KVM_EXIT_AP_RESET_HOLD) && ret; 9413 } 9414 EXPORT_SYMBOL_GPL(kvm_emulate_ap_reset_hold); 9415 9416 #ifdef CONFIG_X86_64 9417 static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr, 9418 unsigned long clock_type) 9419 { 9420 struct kvm_clock_pairing clock_pairing; 9421 struct timespec64 ts; 9422 u64 cycle; 9423 int ret; 9424 9425 if (clock_type != KVM_CLOCK_PAIRING_WALLCLOCK) 9426 return -KVM_EOPNOTSUPP; 9427 9428 /* 9429 * When tsc is in permanent catchup mode guests won't be able to use 9430 * pvclock_read_retry loop to get consistent view of pvclock 9431 */ 9432 if (vcpu->arch.tsc_always_catchup) 9433 return -KVM_EOPNOTSUPP; 9434 9435 if (!kvm_get_walltime_and_clockread(&ts, &cycle)) 9436 return -KVM_EOPNOTSUPP; 9437 9438 clock_pairing.sec = ts.tv_sec; 9439 clock_pairing.nsec = ts.tv_nsec; 9440 clock_pairing.tsc = kvm_read_l1_tsc(vcpu, cycle); 9441 clock_pairing.flags = 0; 9442 memset(&clock_pairing.pad, 0, sizeof(clock_pairing.pad)); 9443 9444 ret = 0; 9445 if (kvm_write_guest(vcpu->kvm, paddr, &clock_pairing, 9446 sizeof(struct kvm_clock_pairing))) 9447 ret = -KVM_EFAULT; 9448 9449 return ret; 9450 } 9451 #endif 9452 9453 /* 9454 * kvm_pv_kick_cpu_op: Kick a vcpu. 9455 * 9456 * @apicid - apicid of vcpu to be kicked. 9457 */ 9458 static void kvm_pv_kick_cpu_op(struct kvm *kvm, int apicid) 9459 { 9460 /* 9461 * All other fields are unused for APIC_DM_REMRD, but may be consumed by 9462 * common code, e.g. for tracing. Defer initialization to the compiler. 9463 */ 9464 struct kvm_lapic_irq lapic_irq = { 9465 .delivery_mode = APIC_DM_REMRD, 9466 .dest_mode = APIC_DEST_PHYSICAL, 9467 .shorthand = APIC_DEST_NOSHORT, 9468 .dest_id = apicid, 9469 }; 9470 9471 kvm_irq_delivery_to_apic(kvm, NULL, &lapic_irq, NULL); 9472 } 9473 9474 bool kvm_apicv_activated(struct kvm *kvm) 9475 { 9476 return (READ_ONCE(kvm->arch.apicv_inhibit_reasons) == 0); 9477 } 9478 EXPORT_SYMBOL_GPL(kvm_apicv_activated); 9479 9480 bool kvm_vcpu_apicv_activated(struct kvm_vcpu *vcpu) 9481 { 9482 ulong vm_reasons = READ_ONCE(vcpu->kvm->arch.apicv_inhibit_reasons); 9483 ulong vcpu_reasons = static_call(kvm_x86_vcpu_get_apicv_inhibit_reasons)(vcpu); 9484 9485 return (vm_reasons | vcpu_reasons) == 0; 9486 } 9487 EXPORT_SYMBOL_GPL(kvm_vcpu_apicv_activated); 9488 9489 static void set_or_clear_apicv_inhibit(unsigned long *inhibits, 9490 enum kvm_apicv_inhibit reason, bool set) 9491 { 9492 if (set) 9493 __set_bit(reason, inhibits); 9494 else 9495 __clear_bit(reason, inhibits); 9496 9497 trace_kvm_apicv_inhibit_changed(reason, set, *inhibits); 9498 } 9499 9500 static void kvm_apicv_init(struct kvm *kvm) 9501 { 9502 unsigned long *inhibits = &kvm->arch.apicv_inhibit_reasons; 9503 9504 init_rwsem(&kvm->arch.apicv_update_lock); 9505 9506 set_or_clear_apicv_inhibit(inhibits, APICV_INHIBIT_REASON_ABSENT, true); 9507 9508 if (!enable_apicv) 9509 set_or_clear_apicv_inhibit(inhibits, 9510 APICV_INHIBIT_REASON_DISABLE, true); 9511 } 9512 9513 static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id) 9514 { 9515 struct kvm_vcpu *target = NULL; 9516 struct kvm_apic_map *map; 9517 9518 vcpu->stat.directed_yield_attempted++; 9519 9520 if (single_task_running()) 9521 goto no_yield; 9522 9523 rcu_read_lock(); 9524 map = rcu_dereference(vcpu->kvm->arch.apic_map); 9525 9526 if (likely(map) && dest_id <= map->max_apic_id && map->phys_map[dest_id]) 9527 target = map->phys_map[dest_id]->vcpu; 9528 9529 rcu_read_unlock(); 9530 9531 if (!target || !READ_ONCE(target->ready)) 9532 goto no_yield; 9533 9534 /* Ignore requests to yield to self */ 9535 if (vcpu == target) 9536 goto no_yield; 9537 9538 if (kvm_vcpu_yield_to(target) <= 0) 9539 goto no_yield; 9540 9541 vcpu->stat.directed_yield_successful++; 9542 9543 no_yield: 9544 return; 9545 } 9546 9547 static int complete_hypercall_exit(struct kvm_vcpu *vcpu) 9548 { 9549 u64 ret = vcpu->run->hypercall.ret; 9550 9551 if (!is_64_bit_mode(vcpu)) 9552 ret = (u32)ret; 9553 kvm_rax_write(vcpu, ret); 9554 ++vcpu->stat.hypercalls; 9555 return kvm_skip_emulated_instruction(vcpu); 9556 } 9557 9558 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) 9559 { 9560 unsigned long nr, a0, a1, a2, a3, ret; 9561 int op_64_bit; 9562 9563 if (kvm_xen_hypercall_enabled(vcpu->kvm)) 9564 return kvm_xen_hypercall(vcpu); 9565 9566 if (kvm_hv_hypercall_enabled(vcpu)) 9567 return kvm_hv_hypercall(vcpu); 9568 9569 nr = kvm_rax_read(vcpu); 9570 a0 = kvm_rbx_read(vcpu); 9571 a1 = kvm_rcx_read(vcpu); 9572 a2 = kvm_rdx_read(vcpu); 9573 a3 = kvm_rsi_read(vcpu); 9574 9575 trace_kvm_hypercall(nr, a0, a1, a2, a3); 9576 9577 op_64_bit = is_64_bit_hypercall(vcpu); 9578 if (!op_64_bit) { 9579 nr &= 0xFFFFFFFF; 9580 a0 &= 0xFFFFFFFF; 9581 a1 &= 0xFFFFFFFF; 9582 a2 &= 0xFFFFFFFF; 9583 a3 &= 0xFFFFFFFF; 9584 } 9585 9586 if (static_call(kvm_x86_get_cpl)(vcpu) != 0) { 9587 ret = -KVM_EPERM; 9588 goto out; 9589 } 9590 9591 ret = -KVM_ENOSYS; 9592 9593 switch (nr) { 9594 case KVM_HC_VAPIC_POLL_IRQ: 9595 ret = 0; 9596 break; 9597 case KVM_HC_KICK_CPU: 9598 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_UNHALT)) 9599 break; 9600 9601 kvm_pv_kick_cpu_op(vcpu->kvm, a1); 9602 kvm_sched_yield(vcpu, a1); 9603 ret = 0; 9604 break; 9605 #ifdef CONFIG_X86_64 9606 case KVM_HC_CLOCK_PAIRING: 9607 ret = kvm_pv_clock_pairing(vcpu, a0, a1); 9608 break; 9609 #endif 9610 case KVM_HC_SEND_IPI: 9611 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_SEND_IPI)) 9612 break; 9613 9614 ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit); 9615 break; 9616 case KVM_HC_SCHED_YIELD: 9617 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_SCHED_YIELD)) 9618 break; 9619 9620 kvm_sched_yield(vcpu, a0); 9621 ret = 0; 9622 break; 9623 case KVM_HC_MAP_GPA_RANGE: { 9624 u64 gpa = a0, npages = a1, attrs = a2; 9625 9626 ret = -KVM_ENOSYS; 9627 if (!(vcpu->kvm->arch.hypercall_exit_enabled & (1 << KVM_HC_MAP_GPA_RANGE))) 9628 break; 9629 9630 if (!PAGE_ALIGNED(gpa) || !npages || 9631 gpa_to_gfn(gpa) + npages <= gpa_to_gfn(gpa)) { 9632 ret = -KVM_EINVAL; 9633 break; 9634 } 9635 9636 vcpu->run->exit_reason = KVM_EXIT_HYPERCALL; 9637 vcpu->run->hypercall.nr = KVM_HC_MAP_GPA_RANGE; 9638 vcpu->run->hypercall.args[0] = gpa; 9639 vcpu->run->hypercall.args[1] = npages; 9640 vcpu->run->hypercall.args[2] = attrs; 9641 vcpu->run->hypercall.longmode = op_64_bit; 9642 vcpu->arch.complete_userspace_io = complete_hypercall_exit; 9643 return 0; 9644 } 9645 default: 9646 ret = -KVM_ENOSYS; 9647 break; 9648 } 9649 out: 9650 if (!op_64_bit) 9651 ret = (u32)ret; 9652 kvm_rax_write(vcpu, ret); 9653 9654 ++vcpu->stat.hypercalls; 9655 return kvm_skip_emulated_instruction(vcpu); 9656 } 9657 EXPORT_SYMBOL_GPL(kvm_emulate_hypercall); 9658 9659 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt) 9660 { 9661 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 9662 char instruction[3]; 9663 unsigned long rip = kvm_rip_read(vcpu); 9664 9665 /* 9666 * If the quirk is disabled, synthesize a #UD and let the guest pick up 9667 * the pieces. 9668 */ 9669 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_FIX_HYPERCALL_INSN)) { 9670 ctxt->exception.error_code_valid = false; 9671 ctxt->exception.vector = UD_VECTOR; 9672 ctxt->have_exception = true; 9673 return X86EMUL_PROPAGATE_FAULT; 9674 } 9675 9676 static_call(kvm_x86_patch_hypercall)(vcpu, instruction); 9677 9678 return emulator_write_emulated(ctxt, rip, instruction, 3, 9679 &ctxt->exception); 9680 } 9681 9682 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) 9683 { 9684 return vcpu->run->request_interrupt_window && 9685 likely(!pic_in_kernel(vcpu->kvm)); 9686 } 9687 9688 /* Called within kvm->srcu read side. */ 9689 static void post_kvm_run_save(struct kvm_vcpu *vcpu) 9690 { 9691 struct kvm_run *kvm_run = vcpu->run; 9692 9693 kvm_run->if_flag = static_call(kvm_x86_get_if_flag)(vcpu); 9694 kvm_run->cr8 = kvm_get_cr8(vcpu); 9695 kvm_run->apic_base = kvm_get_apic_base(vcpu); 9696 9697 kvm_run->ready_for_interrupt_injection = 9698 pic_in_kernel(vcpu->kvm) || 9699 kvm_vcpu_ready_for_interrupt_injection(vcpu); 9700 9701 if (is_smm(vcpu)) 9702 kvm_run->flags |= KVM_RUN_X86_SMM; 9703 } 9704 9705 static void update_cr8_intercept(struct kvm_vcpu *vcpu) 9706 { 9707 int max_irr, tpr; 9708 9709 if (!kvm_x86_ops.update_cr8_intercept) 9710 return; 9711 9712 if (!lapic_in_kernel(vcpu)) 9713 return; 9714 9715 if (vcpu->arch.apic->apicv_active) 9716 return; 9717 9718 if (!vcpu->arch.apic->vapic_addr) 9719 max_irr = kvm_lapic_find_highest_irr(vcpu); 9720 else 9721 max_irr = -1; 9722 9723 if (max_irr != -1) 9724 max_irr >>= 4; 9725 9726 tpr = kvm_lapic_get_cr8(vcpu); 9727 9728 static_call(kvm_x86_update_cr8_intercept)(vcpu, tpr, max_irr); 9729 } 9730 9731 9732 int kvm_check_nested_events(struct kvm_vcpu *vcpu) 9733 { 9734 if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) { 9735 kvm_x86_ops.nested_ops->triple_fault(vcpu); 9736 return 1; 9737 } 9738 9739 return kvm_x86_ops.nested_ops->check_events(vcpu); 9740 } 9741 9742 static void kvm_inject_exception(struct kvm_vcpu *vcpu) 9743 { 9744 trace_kvm_inj_exception(vcpu->arch.exception.vector, 9745 vcpu->arch.exception.has_error_code, 9746 vcpu->arch.exception.error_code, 9747 vcpu->arch.exception.injected); 9748 9749 if (vcpu->arch.exception.error_code && !is_protmode(vcpu)) 9750 vcpu->arch.exception.error_code = false; 9751 static_call(kvm_x86_inject_exception)(vcpu); 9752 } 9753 9754 /* 9755 * Check for any event (interrupt or exception) that is ready to be injected, 9756 * and if there is at least one event, inject the event with the highest 9757 * priority. This handles both "pending" events, i.e. events that have never 9758 * been injected into the guest, and "injected" events, i.e. events that were 9759 * injected as part of a previous VM-Enter, but weren't successfully delivered 9760 * and need to be re-injected. 9761 * 9762 * Note, this is not guaranteed to be invoked on a guest instruction boundary, 9763 * i.e. doesn't guarantee that there's an event window in the guest. KVM must 9764 * be able to inject exceptions in the "middle" of an instruction, and so must 9765 * also be able to re-inject NMIs and IRQs in the middle of an instruction. 9766 * I.e. for exceptions and re-injected events, NOT invoking this on instruction 9767 * boundaries is necessary and correct. 9768 * 9769 * For simplicity, KVM uses a single path to inject all events (except events 9770 * that are injected directly from L1 to L2) and doesn't explicitly track 9771 * instruction boundaries for asynchronous events. However, because VM-Exits 9772 * that can occur during instruction execution typically result in KVM skipping 9773 * the instruction or injecting an exception, e.g. instruction and exception 9774 * intercepts, and because pending exceptions have higher priority than pending 9775 * interrupts, KVM still honors instruction boundaries in most scenarios. 9776 * 9777 * But, if a VM-Exit occurs during instruction execution, and KVM does NOT skip 9778 * the instruction or inject an exception, then KVM can incorrecty inject a new 9779 * asynchrounous event if the event became pending after the CPU fetched the 9780 * instruction (in the guest). E.g. if a page fault (#PF, #NPF, EPT violation) 9781 * occurs and is resolved by KVM, a coincident NMI, SMI, IRQ, etc... can be 9782 * injected on the restarted instruction instead of being deferred until the 9783 * instruction completes. 9784 * 9785 * In practice, this virtualization hole is unlikely to be observed by the 9786 * guest, and even less likely to cause functional problems. To detect the 9787 * hole, the guest would have to trigger an event on a side effect of an early 9788 * phase of instruction execution, e.g. on the instruction fetch from memory. 9789 * And for it to be a functional problem, the guest would need to depend on the 9790 * ordering between that side effect, the instruction completing, _and_ the 9791 * delivery of the asynchronous event. 9792 */ 9793 static int kvm_check_and_inject_events(struct kvm_vcpu *vcpu, 9794 bool *req_immediate_exit) 9795 { 9796 bool can_inject; 9797 int r; 9798 9799 /* 9800 * Process nested events first, as nested VM-Exit supercedes event 9801 * re-injection. If there's an event queued for re-injection, it will 9802 * be saved into the appropriate vmc{b,s}12 fields on nested VM-Exit. 9803 */ 9804 if (is_guest_mode(vcpu)) 9805 r = kvm_check_nested_events(vcpu); 9806 else 9807 r = 0; 9808 9809 /* 9810 * Re-inject exceptions and events *especially* if immediate entry+exit 9811 * to/from L2 is needed, as any event that has already been injected 9812 * into L2 needs to complete its lifecycle before injecting a new event. 9813 * 9814 * Don't re-inject an NMI or interrupt if there is a pending exception. 9815 * This collision arises if an exception occurred while vectoring the 9816 * injected event, KVM intercepted said exception, and KVM ultimately 9817 * determined the fault belongs to the guest and queues the exception 9818 * for injection back into the guest. 9819 * 9820 * "Injected" interrupts can also collide with pending exceptions if 9821 * userspace ignores the "ready for injection" flag and blindly queues 9822 * an interrupt. In that case, prioritizing the exception is correct, 9823 * as the exception "occurred" before the exit to userspace. Trap-like 9824 * exceptions, e.g. most #DBs, have higher priority than interrupts. 9825 * And while fault-like exceptions, e.g. #GP and #PF, are the lowest 9826 * priority, they're only generated (pended) during instruction 9827 * execution, and interrupts are recognized at instruction boundaries. 9828 * Thus a pending fault-like exception means the fault occurred on the 9829 * *previous* instruction and must be serviced prior to recognizing any 9830 * new events in order to fully complete the previous instruction. 9831 */ 9832 if (vcpu->arch.exception.injected) 9833 kvm_inject_exception(vcpu); 9834 else if (kvm_is_exception_pending(vcpu)) 9835 ; /* see above */ 9836 else if (vcpu->arch.nmi_injected) 9837 static_call(kvm_x86_inject_nmi)(vcpu); 9838 else if (vcpu->arch.interrupt.injected) 9839 static_call(kvm_x86_inject_irq)(vcpu, true); 9840 9841 /* 9842 * Exceptions that morph to VM-Exits are handled above, and pending 9843 * exceptions on top of injected exceptions that do not VM-Exit should 9844 * either morph to #DF or, sadly, override the injected exception. 9845 */ 9846 WARN_ON_ONCE(vcpu->arch.exception.injected && 9847 vcpu->arch.exception.pending); 9848 9849 /* 9850 * Bail if immediate entry+exit to/from the guest is needed to complete 9851 * nested VM-Enter or event re-injection so that a different pending 9852 * event can be serviced (or if KVM needs to exit to userspace). 9853 * 9854 * Otherwise, continue processing events even if VM-Exit occurred. The 9855 * VM-Exit will have cleared exceptions that were meant for L2, but 9856 * there may now be events that can be injected into L1. 9857 */ 9858 if (r < 0) 9859 goto out; 9860 9861 /* 9862 * A pending exception VM-Exit should either result in nested VM-Exit 9863 * or force an immediate re-entry and exit to/from L2, and exception 9864 * VM-Exits cannot be injected (flag should _never_ be set). 9865 */ 9866 WARN_ON_ONCE(vcpu->arch.exception_vmexit.injected || 9867 vcpu->arch.exception_vmexit.pending); 9868 9869 /* 9870 * New events, other than exceptions, cannot be injected if KVM needs 9871 * to re-inject a previous event. See above comments on re-injecting 9872 * for why pending exceptions get priority. 9873 */ 9874 can_inject = !kvm_event_needs_reinjection(vcpu); 9875 9876 if (vcpu->arch.exception.pending) { 9877 /* 9878 * Fault-class exceptions, except #DBs, set RF=1 in the RFLAGS 9879 * value pushed on the stack. Trap-like exception and all #DBs 9880 * leave RF as-is (KVM follows Intel's behavior in this regard; 9881 * AMD states that code breakpoint #DBs excplitly clear RF=0). 9882 * 9883 * Note, most versions of Intel's SDM and AMD's APM incorrectly 9884 * describe the behavior of General Detect #DBs, which are 9885 * fault-like. They do _not_ set RF, a la code breakpoints. 9886 */ 9887 if (exception_type(vcpu->arch.exception.vector) == EXCPT_FAULT) 9888 __kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) | 9889 X86_EFLAGS_RF); 9890 9891 if (vcpu->arch.exception.vector == DB_VECTOR) { 9892 kvm_deliver_exception_payload(vcpu, &vcpu->arch.exception); 9893 if (vcpu->arch.dr7 & DR7_GD) { 9894 vcpu->arch.dr7 &= ~DR7_GD; 9895 kvm_update_dr7(vcpu); 9896 } 9897 } 9898 9899 kvm_inject_exception(vcpu); 9900 9901 vcpu->arch.exception.pending = false; 9902 vcpu->arch.exception.injected = true; 9903 9904 can_inject = false; 9905 } 9906 9907 /* Don't inject interrupts if the user asked to avoid doing so */ 9908 if (vcpu->guest_debug & KVM_GUESTDBG_BLOCKIRQ) 9909 return 0; 9910 9911 /* 9912 * Finally, inject interrupt events. If an event cannot be injected 9913 * due to architectural conditions (e.g. IF=0) a window-open exit 9914 * will re-request KVM_REQ_EVENT. Sometimes however an event is pending 9915 * and can architecturally be injected, but we cannot do it right now: 9916 * an interrupt could have arrived just now and we have to inject it 9917 * as a vmexit, or there could already an event in the queue, which is 9918 * indicated by can_inject. In that case we request an immediate exit 9919 * in order to make progress and get back here for another iteration. 9920 * The kvm_x86_ops hooks communicate this by returning -EBUSY. 9921 */ 9922 #ifdef CONFIG_KVM_SMM 9923 if (vcpu->arch.smi_pending) { 9924 r = can_inject ? static_call(kvm_x86_smi_allowed)(vcpu, true) : -EBUSY; 9925 if (r < 0) 9926 goto out; 9927 if (r) { 9928 vcpu->arch.smi_pending = false; 9929 ++vcpu->arch.smi_count; 9930 enter_smm(vcpu); 9931 can_inject = false; 9932 } else 9933 static_call(kvm_x86_enable_smi_window)(vcpu); 9934 } 9935 #endif 9936 9937 if (vcpu->arch.nmi_pending) { 9938 r = can_inject ? static_call(kvm_x86_nmi_allowed)(vcpu, true) : -EBUSY; 9939 if (r < 0) 9940 goto out; 9941 if (r) { 9942 --vcpu->arch.nmi_pending; 9943 vcpu->arch.nmi_injected = true; 9944 static_call(kvm_x86_inject_nmi)(vcpu); 9945 can_inject = false; 9946 WARN_ON(static_call(kvm_x86_nmi_allowed)(vcpu, true) < 0); 9947 } 9948 if (vcpu->arch.nmi_pending) 9949 static_call(kvm_x86_enable_nmi_window)(vcpu); 9950 } 9951 9952 if (kvm_cpu_has_injectable_intr(vcpu)) { 9953 r = can_inject ? static_call(kvm_x86_interrupt_allowed)(vcpu, true) : -EBUSY; 9954 if (r < 0) 9955 goto out; 9956 if (r) { 9957 kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), false); 9958 static_call(kvm_x86_inject_irq)(vcpu, false); 9959 WARN_ON(static_call(kvm_x86_interrupt_allowed)(vcpu, true) < 0); 9960 } 9961 if (kvm_cpu_has_injectable_intr(vcpu)) 9962 static_call(kvm_x86_enable_irq_window)(vcpu); 9963 } 9964 9965 if (is_guest_mode(vcpu) && 9966 kvm_x86_ops.nested_ops->has_events && 9967 kvm_x86_ops.nested_ops->has_events(vcpu)) 9968 *req_immediate_exit = true; 9969 9970 /* 9971 * KVM must never queue a new exception while injecting an event; KVM 9972 * is done emulating and should only propagate the to-be-injected event 9973 * to the VMCS/VMCB. Queueing a new exception can put the vCPU into an 9974 * infinite loop as KVM will bail from VM-Enter to inject the pending 9975 * exception and start the cycle all over. 9976 * 9977 * Exempt triple faults as they have special handling and won't put the 9978 * vCPU into an infinite loop. Triple fault can be queued when running 9979 * VMX without unrestricted guest, as that requires KVM to emulate Real 9980 * Mode events (see kvm_inject_realmode_interrupt()). 9981 */ 9982 WARN_ON_ONCE(vcpu->arch.exception.pending || 9983 vcpu->arch.exception_vmexit.pending); 9984 return 0; 9985 9986 out: 9987 if (r == -EBUSY) { 9988 *req_immediate_exit = true; 9989 r = 0; 9990 } 9991 return r; 9992 } 9993 9994 static void process_nmi(struct kvm_vcpu *vcpu) 9995 { 9996 unsigned limit = 2; 9997 9998 /* 9999 * x86 is limited to one NMI running, and one NMI pending after it. 10000 * If an NMI is already in progress, limit further NMIs to just one. 10001 * Otherwise, allow two (and we'll inject the first one immediately). 10002 */ 10003 if (static_call(kvm_x86_get_nmi_mask)(vcpu) || vcpu->arch.nmi_injected) 10004 limit = 1; 10005 10006 vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0); 10007 vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit); 10008 kvm_make_request(KVM_REQ_EVENT, vcpu); 10009 } 10010 10011 void kvm_make_scan_ioapic_request_mask(struct kvm *kvm, 10012 unsigned long *vcpu_bitmap) 10013 { 10014 kvm_make_vcpus_request_mask(kvm, KVM_REQ_SCAN_IOAPIC, vcpu_bitmap); 10015 } 10016 10017 void kvm_make_scan_ioapic_request(struct kvm *kvm) 10018 { 10019 kvm_make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC); 10020 } 10021 10022 void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu) 10023 { 10024 struct kvm_lapic *apic = vcpu->arch.apic; 10025 bool activate; 10026 10027 if (!lapic_in_kernel(vcpu)) 10028 return; 10029 10030 down_read(&vcpu->kvm->arch.apicv_update_lock); 10031 preempt_disable(); 10032 10033 /* Do not activate APICV when APIC is disabled */ 10034 activate = kvm_vcpu_apicv_activated(vcpu) && 10035 (kvm_get_apic_mode(vcpu) != LAPIC_MODE_DISABLED); 10036 10037 if (apic->apicv_active == activate) 10038 goto out; 10039 10040 apic->apicv_active = activate; 10041 kvm_apic_update_apicv(vcpu); 10042 static_call(kvm_x86_refresh_apicv_exec_ctrl)(vcpu); 10043 10044 /* 10045 * When APICv gets disabled, we may still have injected interrupts 10046 * pending. At the same time, KVM_REQ_EVENT may not be set as APICv was 10047 * still active when the interrupt got accepted. Make sure 10048 * kvm_check_and_inject_events() is called to check for that. 10049 */ 10050 if (!apic->apicv_active) 10051 kvm_make_request(KVM_REQ_EVENT, vcpu); 10052 10053 out: 10054 preempt_enable(); 10055 up_read(&vcpu->kvm->arch.apicv_update_lock); 10056 } 10057 EXPORT_SYMBOL_GPL(kvm_vcpu_update_apicv); 10058 10059 void __kvm_set_or_clear_apicv_inhibit(struct kvm *kvm, 10060 enum kvm_apicv_inhibit reason, bool set) 10061 { 10062 unsigned long old, new; 10063 10064 lockdep_assert_held_write(&kvm->arch.apicv_update_lock); 10065 10066 if (!static_call(kvm_x86_check_apicv_inhibit_reasons)(reason)) 10067 return; 10068 10069 old = new = kvm->arch.apicv_inhibit_reasons; 10070 10071 set_or_clear_apicv_inhibit(&new, reason, set); 10072 10073 if (!!old != !!new) { 10074 /* 10075 * Kick all vCPUs before setting apicv_inhibit_reasons to avoid 10076 * false positives in the sanity check WARN in svm_vcpu_run(). 10077 * This task will wait for all vCPUs to ack the kick IRQ before 10078 * updating apicv_inhibit_reasons, and all other vCPUs will 10079 * block on acquiring apicv_update_lock so that vCPUs can't 10080 * redo svm_vcpu_run() without seeing the new inhibit state. 10081 * 10082 * Note, holding apicv_update_lock and taking it in the read 10083 * side (handling the request) also prevents other vCPUs from 10084 * servicing the request with a stale apicv_inhibit_reasons. 10085 */ 10086 kvm_make_all_cpus_request(kvm, KVM_REQ_APICV_UPDATE); 10087 kvm->arch.apicv_inhibit_reasons = new; 10088 if (new) { 10089 unsigned long gfn = gpa_to_gfn(APIC_DEFAULT_PHYS_BASE); 10090 int idx = srcu_read_lock(&kvm->srcu); 10091 10092 kvm_zap_gfn_range(kvm, gfn, gfn+1); 10093 srcu_read_unlock(&kvm->srcu, idx); 10094 } 10095 } else { 10096 kvm->arch.apicv_inhibit_reasons = new; 10097 } 10098 } 10099 10100 void kvm_set_or_clear_apicv_inhibit(struct kvm *kvm, 10101 enum kvm_apicv_inhibit reason, bool set) 10102 { 10103 if (!enable_apicv) 10104 return; 10105 10106 down_write(&kvm->arch.apicv_update_lock); 10107 __kvm_set_or_clear_apicv_inhibit(kvm, reason, set); 10108 up_write(&kvm->arch.apicv_update_lock); 10109 } 10110 EXPORT_SYMBOL_GPL(kvm_set_or_clear_apicv_inhibit); 10111 10112 static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) 10113 { 10114 if (!kvm_apic_present(vcpu)) 10115 return; 10116 10117 bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256); 10118 10119 if (irqchip_split(vcpu->kvm)) 10120 kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors); 10121 else { 10122 static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); 10123 if (ioapic_in_kernel(vcpu->kvm)) 10124 kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors); 10125 } 10126 10127 if (is_guest_mode(vcpu)) 10128 vcpu->arch.load_eoi_exitmap_pending = true; 10129 else 10130 kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu); 10131 } 10132 10133 static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu) 10134 { 10135 u64 eoi_exit_bitmap[4]; 10136 10137 if (!kvm_apic_hw_enabled(vcpu->arch.apic)) 10138 return; 10139 10140 if (to_hv_vcpu(vcpu)) { 10141 bitmap_or((ulong *)eoi_exit_bitmap, 10142 vcpu->arch.ioapic_handled_vectors, 10143 to_hv_synic(vcpu)->vec_bitmap, 256); 10144 static_call_cond(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap); 10145 return; 10146 } 10147 10148 static_call_cond(kvm_x86_load_eoi_exitmap)( 10149 vcpu, (u64 *)vcpu->arch.ioapic_handled_vectors); 10150 } 10151 10152 void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, 10153 unsigned long start, unsigned long end) 10154 { 10155 unsigned long apic_address; 10156 10157 /* 10158 * The physical address of apic access page is stored in the VMCS. 10159 * Update it when it becomes invalid. 10160 */ 10161 apic_address = gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT); 10162 if (start <= apic_address && apic_address < end) 10163 kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD); 10164 } 10165 10166 void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) 10167 { 10168 static_call_cond(kvm_x86_guest_memory_reclaimed)(kvm); 10169 } 10170 10171 static void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) 10172 { 10173 if (!lapic_in_kernel(vcpu)) 10174 return; 10175 10176 static_call_cond(kvm_x86_set_apic_access_page_addr)(vcpu); 10177 } 10178 10179 void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu) 10180 { 10181 smp_send_reschedule(vcpu->cpu); 10182 } 10183 EXPORT_SYMBOL_GPL(__kvm_request_immediate_exit); 10184 10185 /* 10186 * Called within kvm->srcu read side. 10187 * Returns 1 to let vcpu_run() continue the guest execution loop without 10188 * exiting to the userspace. Otherwise, the value will be returned to the 10189 * userspace. 10190 */ 10191 static int vcpu_enter_guest(struct kvm_vcpu *vcpu) 10192 { 10193 int r; 10194 bool req_int_win = 10195 dm_request_for_irq_injection(vcpu) && 10196 kvm_cpu_accept_dm_intr(vcpu); 10197 fastpath_t exit_fastpath; 10198 10199 bool req_immediate_exit = false; 10200 10201 /* Forbid vmenter if vcpu dirty ring is soft-full */ 10202 if (unlikely(vcpu->kvm->dirty_ring_size && 10203 kvm_dirty_ring_soft_full(&vcpu->dirty_ring))) { 10204 vcpu->run->exit_reason = KVM_EXIT_DIRTY_RING_FULL; 10205 trace_kvm_dirty_ring_exit(vcpu); 10206 r = 0; 10207 goto out; 10208 } 10209 10210 if (kvm_request_pending(vcpu)) { 10211 if (kvm_check_request(KVM_REQ_VM_DEAD, vcpu)) { 10212 r = -EIO; 10213 goto out; 10214 } 10215 if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) { 10216 if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) { 10217 r = 0; 10218 goto out; 10219 } 10220 } 10221 if (kvm_check_request(KVM_REQ_MMU_FREE_OBSOLETE_ROOTS, vcpu)) 10222 kvm_mmu_free_obsolete_roots(vcpu); 10223 if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu)) 10224 __kvm_migrate_timers(vcpu); 10225 if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu)) 10226 kvm_update_masterclock(vcpu->kvm); 10227 if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu)) 10228 kvm_gen_kvmclock_update(vcpu); 10229 if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) { 10230 r = kvm_guest_time_update(vcpu); 10231 if (unlikely(r)) 10232 goto out; 10233 } 10234 if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu)) 10235 kvm_mmu_sync_roots(vcpu); 10236 if (kvm_check_request(KVM_REQ_LOAD_MMU_PGD, vcpu)) 10237 kvm_mmu_load_pgd(vcpu); 10238 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { 10239 kvm_vcpu_flush_tlb_all(vcpu); 10240 10241 /* Flushing all ASIDs flushes the current ASID... */ 10242 kvm_clear_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); 10243 } 10244 kvm_service_local_tlb_flush_requests(vcpu); 10245 10246 if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) { 10247 vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS; 10248 r = 0; 10249 goto out; 10250 } 10251 if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) { 10252 if (is_guest_mode(vcpu)) { 10253 kvm_x86_ops.nested_ops->triple_fault(vcpu); 10254 } else { 10255 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; 10256 vcpu->mmio_needed = 0; 10257 r = 0; 10258 goto out; 10259 } 10260 } 10261 if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) { 10262 /* Page is swapped out. Do synthetic halt */ 10263 vcpu->arch.apf.halted = true; 10264 r = 1; 10265 goto out; 10266 } 10267 if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu)) 10268 record_steal_time(vcpu); 10269 if (kvm_check_request(KVM_REQ_SMI, vcpu)) 10270 process_smi(vcpu); 10271 if (kvm_check_request(KVM_REQ_NMI, vcpu)) 10272 process_nmi(vcpu); 10273 if (kvm_check_request(KVM_REQ_PMU, vcpu)) 10274 kvm_pmu_handle_event(vcpu); 10275 if (kvm_check_request(KVM_REQ_PMI, vcpu)) 10276 kvm_pmu_deliver_pmi(vcpu); 10277 if (kvm_check_request(KVM_REQ_IOAPIC_EOI_EXIT, vcpu)) { 10278 BUG_ON(vcpu->arch.pending_ioapic_eoi > 255); 10279 if (test_bit(vcpu->arch.pending_ioapic_eoi, 10280 vcpu->arch.ioapic_handled_vectors)) { 10281 vcpu->run->exit_reason = KVM_EXIT_IOAPIC_EOI; 10282 vcpu->run->eoi.vector = 10283 vcpu->arch.pending_ioapic_eoi; 10284 r = 0; 10285 goto out; 10286 } 10287 } 10288 if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu)) 10289 vcpu_scan_ioapic(vcpu); 10290 if (kvm_check_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu)) 10291 vcpu_load_eoi_exitmap(vcpu); 10292 if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu)) 10293 kvm_vcpu_reload_apic_access_page(vcpu); 10294 if (kvm_check_request(KVM_REQ_HV_CRASH, vcpu)) { 10295 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; 10296 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_CRASH; 10297 vcpu->run->system_event.ndata = 0; 10298 r = 0; 10299 goto out; 10300 } 10301 if (kvm_check_request(KVM_REQ_HV_RESET, vcpu)) { 10302 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; 10303 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_RESET; 10304 vcpu->run->system_event.ndata = 0; 10305 r = 0; 10306 goto out; 10307 } 10308 if (kvm_check_request(KVM_REQ_HV_EXIT, vcpu)) { 10309 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); 10310 10311 vcpu->run->exit_reason = KVM_EXIT_HYPERV; 10312 vcpu->run->hyperv = hv_vcpu->exit; 10313 r = 0; 10314 goto out; 10315 } 10316 10317 /* 10318 * KVM_REQ_HV_STIMER has to be processed after 10319 * KVM_REQ_CLOCK_UPDATE, because Hyper-V SynIC timers 10320 * depend on the guest clock being up-to-date 10321 */ 10322 if (kvm_check_request(KVM_REQ_HV_STIMER, vcpu)) 10323 kvm_hv_process_stimers(vcpu); 10324 if (kvm_check_request(KVM_REQ_APICV_UPDATE, vcpu)) 10325 kvm_vcpu_update_apicv(vcpu); 10326 if (kvm_check_request(KVM_REQ_APF_READY, vcpu)) 10327 kvm_check_async_pf_completion(vcpu); 10328 if (kvm_check_request(KVM_REQ_MSR_FILTER_CHANGED, vcpu)) 10329 static_call(kvm_x86_msr_filter_changed)(vcpu); 10330 10331 if (kvm_check_request(KVM_REQ_UPDATE_CPU_DIRTY_LOGGING, vcpu)) 10332 static_call(kvm_x86_update_cpu_dirty_logging)(vcpu); 10333 } 10334 10335 if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win || 10336 kvm_xen_has_interrupt(vcpu)) { 10337 ++vcpu->stat.req_event; 10338 r = kvm_apic_accept_events(vcpu); 10339 if (r < 0) { 10340 r = 0; 10341 goto out; 10342 } 10343 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { 10344 r = 1; 10345 goto out; 10346 } 10347 10348 r = kvm_check_and_inject_events(vcpu, &req_immediate_exit); 10349 if (r < 0) { 10350 r = 0; 10351 goto out; 10352 } 10353 if (req_int_win) 10354 static_call(kvm_x86_enable_irq_window)(vcpu); 10355 10356 if (kvm_lapic_enabled(vcpu)) { 10357 update_cr8_intercept(vcpu); 10358 kvm_lapic_sync_to_vapic(vcpu); 10359 } 10360 } 10361 10362 r = kvm_mmu_reload(vcpu); 10363 if (unlikely(r)) { 10364 goto cancel_injection; 10365 } 10366 10367 preempt_disable(); 10368 10369 static_call(kvm_x86_prepare_switch_to_guest)(vcpu); 10370 10371 /* 10372 * Disable IRQs before setting IN_GUEST_MODE. Posted interrupt 10373 * IPI are then delayed after guest entry, which ensures that they 10374 * result in virtual interrupt delivery. 10375 */ 10376 local_irq_disable(); 10377 10378 /* Store vcpu->apicv_active before vcpu->mode. */ 10379 smp_store_release(&vcpu->mode, IN_GUEST_MODE); 10380 10381 kvm_vcpu_srcu_read_unlock(vcpu); 10382 10383 /* 10384 * 1) We should set ->mode before checking ->requests. Please see 10385 * the comment in kvm_vcpu_exiting_guest_mode(). 10386 * 10387 * 2) For APICv, we should set ->mode before checking PID.ON. This 10388 * pairs with the memory barrier implicit in pi_test_and_set_on 10389 * (see vmx_deliver_posted_interrupt). 10390 * 10391 * 3) This also orders the write to mode from any reads to the page 10392 * tables done while the VCPU is running. Please see the comment 10393 * in kvm_flush_remote_tlbs. 10394 */ 10395 smp_mb__after_srcu_read_unlock(); 10396 10397 /* 10398 * Process pending posted interrupts to handle the case where the 10399 * notification IRQ arrived in the host, or was never sent (because the 10400 * target vCPU wasn't running). Do this regardless of the vCPU's APICv 10401 * status, KVM doesn't update assigned devices when APICv is inhibited, 10402 * i.e. they can post interrupts even if APICv is temporarily disabled. 10403 */ 10404 if (kvm_lapic_enabled(vcpu)) 10405 static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); 10406 10407 if (kvm_vcpu_exit_request(vcpu)) { 10408 vcpu->mode = OUTSIDE_GUEST_MODE; 10409 smp_wmb(); 10410 local_irq_enable(); 10411 preempt_enable(); 10412 kvm_vcpu_srcu_read_lock(vcpu); 10413 r = 1; 10414 goto cancel_injection; 10415 } 10416 10417 if (req_immediate_exit) { 10418 kvm_make_request(KVM_REQ_EVENT, vcpu); 10419 static_call(kvm_x86_request_immediate_exit)(vcpu); 10420 } 10421 10422 fpregs_assert_state_consistent(); 10423 if (test_thread_flag(TIF_NEED_FPU_LOAD)) 10424 switch_fpu_return(); 10425 10426 if (vcpu->arch.guest_fpu.xfd_err) 10427 wrmsrl(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err); 10428 10429 if (unlikely(vcpu->arch.switch_db_regs)) { 10430 set_debugreg(0, 7); 10431 set_debugreg(vcpu->arch.eff_db[0], 0); 10432 set_debugreg(vcpu->arch.eff_db[1], 1); 10433 set_debugreg(vcpu->arch.eff_db[2], 2); 10434 set_debugreg(vcpu->arch.eff_db[3], 3); 10435 } else if (unlikely(hw_breakpoint_active())) { 10436 set_debugreg(0, 7); 10437 } 10438 10439 guest_timing_enter_irqoff(); 10440 10441 for (;;) { 10442 /* 10443 * Assert that vCPU vs. VM APICv state is consistent. An APICv 10444 * update must kick and wait for all vCPUs before toggling the 10445 * per-VM state, and responsing vCPUs must wait for the update 10446 * to complete before servicing KVM_REQ_APICV_UPDATE. 10447 */ 10448 WARN_ON_ONCE((kvm_vcpu_apicv_activated(vcpu) != kvm_vcpu_apicv_active(vcpu)) && 10449 (kvm_get_apic_mode(vcpu) != LAPIC_MODE_DISABLED)); 10450 10451 exit_fastpath = static_call(kvm_x86_vcpu_run)(vcpu); 10452 if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST)) 10453 break; 10454 10455 if (kvm_lapic_enabled(vcpu)) 10456 static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); 10457 10458 if (unlikely(kvm_vcpu_exit_request(vcpu))) { 10459 exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED; 10460 break; 10461 } 10462 } 10463 10464 /* 10465 * Do this here before restoring debug registers on the host. And 10466 * since we do this before handling the vmexit, a DR access vmexit 10467 * can (a) read the correct value of the debug registers, (b) set 10468 * KVM_DEBUGREG_WONT_EXIT again. 10469 */ 10470 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) { 10471 WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP); 10472 static_call(kvm_x86_sync_dirty_debug_regs)(vcpu); 10473 kvm_update_dr0123(vcpu); 10474 kvm_update_dr7(vcpu); 10475 } 10476 10477 /* 10478 * If the guest has used debug registers, at least dr7 10479 * will be disabled while returning to the host. 10480 * If we don't have active breakpoints in the host, we don't 10481 * care about the messed up debug address registers. But if 10482 * we have some of them active, restore the old state. 10483 */ 10484 if (hw_breakpoint_active()) 10485 hw_breakpoint_restore(); 10486 10487 vcpu->arch.last_vmentry_cpu = vcpu->cpu; 10488 vcpu->arch.last_guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc()); 10489 10490 vcpu->mode = OUTSIDE_GUEST_MODE; 10491 smp_wmb(); 10492 10493 /* 10494 * Sync xfd before calling handle_exit_irqoff() which may 10495 * rely on the fact that guest_fpu::xfd is up-to-date (e.g. 10496 * in #NM irqoff handler). 10497 */ 10498 if (vcpu->arch.xfd_no_write_intercept) 10499 fpu_sync_guest_vmexit_xfd_state(); 10500 10501 static_call(kvm_x86_handle_exit_irqoff)(vcpu); 10502 10503 if (vcpu->arch.guest_fpu.xfd_err) 10504 wrmsrl(MSR_IA32_XFD_ERR, 0); 10505 10506 /* 10507 * Consume any pending interrupts, including the possible source of 10508 * VM-Exit on SVM and any ticks that occur between VM-Exit and now. 10509 * An instruction is required after local_irq_enable() to fully unblock 10510 * interrupts on processors that implement an interrupt shadow, the 10511 * stat.exits increment will do nicely. 10512 */ 10513 kvm_before_interrupt(vcpu, KVM_HANDLING_IRQ); 10514 local_irq_enable(); 10515 ++vcpu->stat.exits; 10516 local_irq_disable(); 10517 kvm_after_interrupt(vcpu); 10518 10519 /* 10520 * Wait until after servicing IRQs to account guest time so that any 10521 * ticks that occurred while running the guest are properly accounted 10522 * to the guest. Waiting until IRQs are enabled degrades the accuracy 10523 * of accounting via context tracking, but the loss of accuracy is 10524 * acceptable for all known use cases. 10525 */ 10526 guest_timing_exit_irqoff(); 10527 10528 local_irq_enable(); 10529 preempt_enable(); 10530 10531 kvm_vcpu_srcu_read_lock(vcpu); 10532 10533 /* 10534 * Profile KVM exit RIPs: 10535 */ 10536 if (unlikely(prof_on == KVM_PROFILING)) { 10537 unsigned long rip = kvm_rip_read(vcpu); 10538 profile_hit(KVM_PROFILING, (void *)rip); 10539 } 10540 10541 if (unlikely(vcpu->arch.tsc_always_catchup)) 10542 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 10543 10544 if (vcpu->arch.apic_attention) 10545 kvm_lapic_sync_from_vapic(vcpu); 10546 10547 r = static_call(kvm_x86_handle_exit)(vcpu, exit_fastpath); 10548 return r; 10549 10550 cancel_injection: 10551 if (req_immediate_exit) 10552 kvm_make_request(KVM_REQ_EVENT, vcpu); 10553 static_call(kvm_x86_cancel_injection)(vcpu); 10554 if (unlikely(vcpu->arch.apic_attention)) 10555 kvm_lapic_sync_from_vapic(vcpu); 10556 out: 10557 return r; 10558 } 10559 10560 /* Called within kvm->srcu read side. */ 10561 static inline int vcpu_block(struct kvm_vcpu *vcpu) 10562 { 10563 bool hv_timer; 10564 10565 if (!kvm_arch_vcpu_runnable(vcpu)) { 10566 /* 10567 * Switch to the software timer before halt-polling/blocking as 10568 * the guest's timer may be a break event for the vCPU, and the 10569 * hypervisor timer runs only when the CPU is in guest mode. 10570 * Switch before halt-polling so that KVM recognizes an expired 10571 * timer before blocking. 10572 */ 10573 hv_timer = kvm_lapic_hv_timer_in_use(vcpu); 10574 if (hv_timer) 10575 kvm_lapic_switch_to_sw_timer(vcpu); 10576 10577 kvm_vcpu_srcu_read_unlock(vcpu); 10578 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) 10579 kvm_vcpu_halt(vcpu); 10580 else 10581 kvm_vcpu_block(vcpu); 10582 kvm_vcpu_srcu_read_lock(vcpu); 10583 10584 if (hv_timer) 10585 kvm_lapic_switch_to_hv_timer(vcpu); 10586 10587 /* 10588 * If the vCPU is not runnable, a signal or another host event 10589 * of some kind is pending; service it without changing the 10590 * vCPU's activity state. 10591 */ 10592 if (!kvm_arch_vcpu_runnable(vcpu)) 10593 return 1; 10594 } 10595 10596 /* 10597 * Evaluate nested events before exiting the halted state. This allows 10598 * the halt state to be recorded properly in the VMCS12's activity 10599 * state field (AMD does not have a similar field and a VM-Exit always 10600 * causes a spurious wakeup from HLT). 10601 */ 10602 if (is_guest_mode(vcpu)) { 10603 if (kvm_check_nested_events(vcpu) < 0) 10604 return 0; 10605 } 10606 10607 if (kvm_apic_accept_events(vcpu) < 0) 10608 return 0; 10609 switch(vcpu->arch.mp_state) { 10610 case KVM_MP_STATE_HALTED: 10611 case KVM_MP_STATE_AP_RESET_HOLD: 10612 vcpu->arch.pv.pv_unhalted = false; 10613 vcpu->arch.mp_state = 10614 KVM_MP_STATE_RUNNABLE; 10615 fallthrough; 10616 case KVM_MP_STATE_RUNNABLE: 10617 vcpu->arch.apf.halted = false; 10618 break; 10619 case KVM_MP_STATE_INIT_RECEIVED: 10620 break; 10621 default: 10622 WARN_ON_ONCE(1); 10623 break; 10624 } 10625 return 1; 10626 } 10627 10628 static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu) 10629 { 10630 return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && 10631 !vcpu->arch.apf.halted); 10632 } 10633 10634 /* Called within kvm->srcu read side. */ 10635 static int vcpu_run(struct kvm_vcpu *vcpu) 10636 { 10637 int r; 10638 10639 vcpu->arch.l1tf_flush_l1d = true; 10640 10641 for (;;) { 10642 /* 10643 * If another guest vCPU requests a PV TLB flush in the middle 10644 * of instruction emulation, the rest of the emulation could 10645 * use a stale page translation. Assume that any code after 10646 * this point can start executing an instruction. 10647 */ 10648 vcpu->arch.at_instruction_boundary = false; 10649 if (kvm_vcpu_running(vcpu)) { 10650 r = vcpu_enter_guest(vcpu); 10651 } else { 10652 r = vcpu_block(vcpu); 10653 } 10654 10655 if (r <= 0) 10656 break; 10657 10658 kvm_clear_request(KVM_REQ_UNBLOCK, vcpu); 10659 if (kvm_xen_has_pending_events(vcpu)) 10660 kvm_xen_inject_pending_events(vcpu); 10661 10662 if (kvm_cpu_has_pending_timer(vcpu)) 10663 kvm_inject_pending_timer_irqs(vcpu); 10664 10665 if (dm_request_for_irq_injection(vcpu) && 10666 kvm_vcpu_ready_for_interrupt_injection(vcpu)) { 10667 r = 0; 10668 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; 10669 ++vcpu->stat.request_irq_exits; 10670 break; 10671 } 10672 10673 if (__xfer_to_guest_mode_work_pending()) { 10674 kvm_vcpu_srcu_read_unlock(vcpu); 10675 r = xfer_to_guest_mode_handle_work(vcpu); 10676 kvm_vcpu_srcu_read_lock(vcpu); 10677 if (r) 10678 return r; 10679 } 10680 } 10681 10682 return r; 10683 } 10684 10685 static inline int complete_emulated_io(struct kvm_vcpu *vcpu) 10686 { 10687 return kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE); 10688 } 10689 10690 static int complete_emulated_pio(struct kvm_vcpu *vcpu) 10691 { 10692 BUG_ON(!vcpu->arch.pio.count); 10693 10694 return complete_emulated_io(vcpu); 10695 } 10696 10697 /* 10698 * Implements the following, as a state machine: 10699 * 10700 * read: 10701 * for each fragment 10702 * for each mmio piece in the fragment 10703 * write gpa, len 10704 * exit 10705 * copy data 10706 * execute insn 10707 * 10708 * write: 10709 * for each fragment 10710 * for each mmio piece in the fragment 10711 * write gpa, len 10712 * copy data 10713 * exit 10714 */ 10715 static int complete_emulated_mmio(struct kvm_vcpu *vcpu) 10716 { 10717 struct kvm_run *run = vcpu->run; 10718 struct kvm_mmio_fragment *frag; 10719 unsigned len; 10720 10721 BUG_ON(!vcpu->mmio_needed); 10722 10723 /* Complete previous fragment */ 10724 frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment]; 10725 len = min(8u, frag->len); 10726 if (!vcpu->mmio_is_write) 10727 memcpy(frag->data, run->mmio.data, len); 10728 10729 if (frag->len <= 8) { 10730 /* Switch to the next fragment. */ 10731 frag++; 10732 vcpu->mmio_cur_fragment++; 10733 } else { 10734 /* Go forward to the next mmio piece. */ 10735 frag->data += len; 10736 frag->gpa += len; 10737 frag->len -= len; 10738 } 10739 10740 if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) { 10741 vcpu->mmio_needed = 0; 10742 10743 /* FIXME: return into emulator if single-stepping. */ 10744 if (vcpu->mmio_is_write) 10745 return 1; 10746 vcpu->mmio_read_completed = 1; 10747 return complete_emulated_io(vcpu); 10748 } 10749 10750 run->exit_reason = KVM_EXIT_MMIO; 10751 run->mmio.phys_addr = frag->gpa; 10752 if (vcpu->mmio_is_write) 10753 memcpy(run->mmio.data, frag->data, min(8u, frag->len)); 10754 run->mmio.len = min(8u, frag->len); 10755 run->mmio.is_write = vcpu->mmio_is_write; 10756 vcpu->arch.complete_userspace_io = complete_emulated_mmio; 10757 return 0; 10758 } 10759 10760 /* Swap (qemu) user FPU context for the guest FPU context. */ 10761 static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) 10762 { 10763 /* Exclude PKRU, it's restored separately immediately after VM-Exit. */ 10764 fpu_swap_kvm_fpstate(&vcpu->arch.guest_fpu, true); 10765 trace_kvm_fpu(1); 10766 } 10767 10768 /* When vcpu_run ends, restore user space FPU context. */ 10769 static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) 10770 { 10771 fpu_swap_kvm_fpstate(&vcpu->arch.guest_fpu, false); 10772 ++vcpu->stat.fpu_reload; 10773 trace_kvm_fpu(0); 10774 } 10775 10776 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) 10777 { 10778 struct kvm_queued_exception *ex = &vcpu->arch.exception; 10779 struct kvm_run *kvm_run = vcpu->run; 10780 int r; 10781 10782 vcpu_load(vcpu); 10783 kvm_sigset_activate(vcpu); 10784 kvm_run->flags = 0; 10785 kvm_load_guest_fpu(vcpu); 10786 10787 kvm_vcpu_srcu_read_lock(vcpu); 10788 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { 10789 if (kvm_run->immediate_exit) { 10790 r = -EINTR; 10791 goto out; 10792 } 10793 /* 10794 * It should be impossible for the hypervisor timer to be in 10795 * use before KVM has ever run the vCPU. 10796 */ 10797 WARN_ON_ONCE(kvm_lapic_hv_timer_in_use(vcpu)); 10798 10799 kvm_vcpu_srcu_read_unlock(vcpu); 10800 kvm_vcpu_block(vcpu); 10801 kvm_vcpu_srcu_read_lock(vcpu); 10802 10803 if (kvm_apic_accept_events(vcpu) < 0) { 10804 r = 0; 10805 goto out; 10806 } 10807 r = -EAGAIN; 10808 if (signal_pending(current)) { 10809 r = -EINTR; 10810 kvm_run->exit_reason = KVM_EXIT_INTR; 10811 ++vcpu->stat.signal_exits; 10812 } 10813 goto out; 10814 } 10815 10816 if ((kvm_run->kvm_valid_regs & ~KVM_SYNC_X86_VALID_FIELDS) || 10817 (kvm_run->kvm_dirty_regs & ~KVM_SYNC_X86_VALID_FIELDS)) { 10818 r = -EINVAL; 10819 goto out; 10820 } 10821 10822 if (kvm_run->kvm_dirty_regs) { 10823 r = sync_regs(vcpu); 10824 if (r != 0) 10825 goto out; 10826 } 10827 10828 /* re-sync apic's tpr */ 10829 if (!lapic_in_kernel(vcpu)) { 10830 if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) { 10831 r = -EINVAL; 10832 goto out; 10833 } 10834 } 10835 10836 /* 10837 * If userspace set a pending exception and L2 is active, convert it to 10838 * a pending VM-Exit if L1 wants to intercept the exception. 10839 */ 10840 if (vcpu->arch.exception_from_userspace && is_guest_mode(vcpu) && 10841 kvm_x86_ops.nested_ops->is_exception_vmexit(vcpu, ex->vector, 10842 ex->error_code)) { 10843 kvm_queue_exception_vmexit(vcpu, ex->vector, 10844 ex->has_error_code, ex->error_code, 10845 ex->has_payload, ex->payload); 10846 ex->injected = false; 10847 ex->pending = false; 10848 } 10849 vcpu->arch.exception_from_userspace = false; 10850 10851 if (unlikely(vcpu->arch.complete_userspace_io)) { 10852 int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io; 10853 vcpu->arch.complete_userspace_io = NULL; 10854 r = cui(vcpu); 10855 if (r <= 0) 10856 goto out; 10857 } else { 10858 WARN_ON_ONCE(vcpu->arch.pio.count); 10859 WARN_ON_ONCE(vcpu->mmio_needed); 10860 } 10861 10862 if (kvm_run->immediate_exit) { 10863 r = -EINTR; 10864 goto out; 10865 } 10866 10867 r = static_call(kvm_x86_vcpu_pre_run)(vcpu); 10868 if (r <= 0) 10869 goto out; 10870 10871 r = vcpu_run(vcpu); 10872 10873 out: 10874 kvm_put_guest_fpu(vcpu); 10875 if (kvm_run->kvm_valid_regs) 10876 store_regs(vcpu); 10877 post_kvm_run_save(vcpu); 10878 kvm_vcpu_srcu_read_unlock(vcpu); 10879 10880 kvm_sigset_deactivate(vcpu); 10881 vcpu_put(vcpu); 10882 return r; 10883 } 10884 10885 static void __get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 10886 { 10887 if (vcpu->arch.emulate_regs_need_sync_to_vcpu) { 10888 /* 10889 * We are here if userspace calls get_regs() in the middle of 10890 * instruction emulation. Registers state needs to be copied 10891 * back from emulation context to vcpu. Userspace shouldn't do 10892 * that usually, but some bad designed PV devices (vmware 10893 * backdoor interface) need this to work 10894 */ 10895 emulator_writeback_register_cache(vcpu->arch.emulate_ctxt); 10896 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; 10897 } 10898 regs->rax = kvm_rax_read(vcpu); 10899 regs->rbx = kvm_rbx_read(vcpu); 10900 regs->rcx = kvm_rcx_read(vcpu); 10901 regs->rdx = kvm_rdx_read(vcpu); 10902 regs->rsi = kvm_rsi_read(vcpu); 10903 regs->rdi = kvm_rdi_read(vcpu); 10904 regs->rsp = kvm_rsp_read(vcpu); 10905 regs->rbp = kvm_rbp_read(vcpu); 10906 #ifdef CONFIG_X86_64 10907 regs->r8 = kvm_r8_read(vcpu); 10908 regs->r9 = kvm_r9_read(vcpu); 10909 regs->r10 = kvm_r10_read(vcpu); 10910 regs->r11 = kvm_r11_read(vcpu); 10911 regs->r12 = kvm_r12_read(vcpu); 10912 regs->r13 = kvm_r13_read(vcpu); 10913 regs->r14 = kvm_r14_read(vcpu); 10914 regs->r15 = kvm_r15_read(vcpu); 10915 #endif 10916 10917 regs->rip = kvm_rip_read(vcpu); 10918 regs->rflags = kvm_get_rflags(vcpu); 10919 } 10920 10921 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 10922 { 10923 vcpu_load(vcpu); 10924 __get_regs(vcpu, regs); 10925 vcpu_put(vcpu); 10926 return 0; 10927 } 10928 10929 static void __set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 10930 { 10931 vcpu->arch.emulate_regs_need_sync_from_vcpu = true; 10932 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; 10933 10934 kvm_rax_write(vcpu, regs->rax); 10935 kvm_rbx_write(vcpu, regs->rbx); 10936 kvm_rcx_write(vcpu, regs->rcx); 10937 kvm_rdx_write(vcpu, regs->rdx); 10938 kvm_rsi_write(vcpu, regs->rsi); 10939 kvm_rdi_write(vcpu, regs->rdi); 10940 kvm_rsp_write(vcpu, regs->rsp); 10941 kvm_rbp_write(vcpu, regs->rbp); 10942 #ifdef CONFIG_X86_64 10943 kvm_r8_write(vcpu, regs->r8); 10944 kvm_r9_write(vcpu, regs->r9); 10945 kvm_r10_write(vcpu, regs->r10); 10946 kvm_r11_write(vcpu, regs->r11); 10947 kvm_r12_write(vcpu, regs->r12); 10948 kvm_r13_write(vcpu, regs->r13); 10949 kvm_r14_write(vcpu, regs->r14); 10950 kvm_r15_write(vcpu, regs->r15); 10951 #endif 10952 10953 kvm_rip_write(vcpu, regs->rip); 10954 kvm_set_rflags(vcpu, regs->rflags | X86_EFLAGS_FIXED); 10955 10956 vcpu->arch.exception.pending = false; 10957 vcpu->arch.exception_vmexit.pending = false; 10958 10959 kvm_make_request(KVM_REQ_EVENT, vcpu); 10960 } 10961 10962 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 10963 { 10964 vcpu_load(vcpu); 10965 __set_regs(vcpu, regs); 10966 vcpu_put(vcpu); 10967 return 0; 10968 } 10969 10970 static void __get_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 10971 { 10972 struct desc_ptr dt; 10973 10974 if (vcpu->arch.guest_state_protected) 10975 goto skip_protected_regs; 10976 10977 kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS); 10978 kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS); 10979 kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES); 10980 kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS); 10981 kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS); 10982 kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS); 10983 10984 kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR); 10985 kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); 10986 10987 static_call(kvm_x86_get_idt)(vcpu, &dt); 10988 sregs->idt.limit = dt.size; 10989 sregs->idt.base = dt.address; 10990 static_call(kvm_x86_get_gdt)(vcpu, &dt); 10991 sregs->gdt.limit = dt.size; 10992 sregs->gdt.base = dt.address; 10993 10994 sregs->cr2 = vcpu->arch.cr2; 10995 sregs->cr3 = kvm_read_cr3(vcpu); 10996 10997 skip_protected_regs: 10998 sregs->cr0 = kvm_read_cr0(vcpu); 10999 sregs->cr4 = kvm_read_cr4(vcpu); 11000 sregs->cr8 = kvm_get_cr8(vcpu); 11001 sregs->efer = vcpu->arch.efer; 11002 sregs->apic_base = kvm_get_apic_base(vcpu); 11003 } 11004 11005 static void __get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 11006 { 11007 __get_sregs_common(vcpu, sregs); 11008 11009 if (vcpu->arch.guest_state_protected) 11010 return; 11011 11012 if (vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft) 11013 set_bit(vcpu->arch.interrupt.nr, 11014 (unsigned long *)sregs->interrupt_bitmap); 11015 } 11016 11017 static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2) 11018 { 11019 int i; 11020 11021 __get_sregs_common(vcpu, (struct kvm_sregs *)sregs2); 11022 11023 if (vcpu->arch.guest_state_protected) 11024 return; 11025 11026 if (is_pae_paging(vcpu)) { 11027 for (i = 0 ; i < 4 ; i++) 11028 sregs2->pdptrs[i] = kvm_pdptr_read(vcpu, i); 11029 sregs2->flags |= KVM_SREGS2_FLAGS_PDPTRS_VALID; 11030 } 11031 } 11032 11033 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 11034 struct kvm_sregs *sregs) 11035 { 11036 vcpu_load(vcpu); 11037 __get_sregs(vcpu, sregs); 11038 vcpu_put(vcpu); 11039 return 0; 11040 } 11041 11042 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 11043 struct kvm_mp_state *mp_state) 11044 { 11045 int r; 11046 11047 vcpu_load(vcpu); 11048 if (kvm_mpx_supported()) 11049 kvm_load_guest_fpu(vcpu); 11050 11051 r = kvm_apic_accept_events(vcpu); 11052 if (r < 0) 11053 goto out; 11054 r = 0; 11055 11056 if ((vcpu->arch.mp_state == KVM_MP_STATE_HALTED || 11057 vcpu->arch.mp_state == KVM_MP_STATE_AP_RESET_HOLD) && 11058 vcpu->arch.pv.pv_unhalted) 11059 mp_state->mp_state = KVM_MP_STATE_RUNNABLE; 11060 else 11061 mp_state->mp_state = vcpu->arch.mp_state; 11062 11063 out: 11064 if (kvm_mpx_supported()) 11065 kvm_put_guest_fpu(vcpu); 11066 vcpu_put(vcpu); 11067 return r; 11068 } 11069 11070 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 11071 struct kvm_mp_state *mp_state) 11072 { 11073 int ret = -EINVAL; 11074 11075 vcpu_load(vcpu); 11076 11077 switch (mp_state->mp_state) { 11078 case KVM_MP_STATE_UNINITIALIZED: 11079 case KVM_MP_STATE_HALTED: 11080 case KVM_MP_STATE_AP_RESET_HOLD: 11081 case KVM_MP_STATE_INIT_RECEIVED: 11082 case KVM_MP_STATE_SIPI_RECEIVED: 11083 if (!lapic_in_kernel(vcpu)) 11084 goto out; 11085 break; 11086 11087 case KVM_MP_STATE_RUNNABLE: 11088 break; 11089 11090 default: 11091 goto out; 11092 } 11093 11094 /* 11095 * Pending INITs are reported using KVM_SET_VCPU_EVENTS, disallow 11096 * forcing the guest into INIT/SIPI if those events are supposed to be 11097 * blocked. KVM prioritizes SMI over INIT, so reject INIT/SIPI state 11098 * if an SMI is pending as well. 11099 */ 11100 if ((!kvm_apic_init_sipi_allowed(vcpu) || vcpu->arch.smi_pending) && 11101 (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED || 11102 mp_state->mp_state == KVM_MP_STATE_INIT_RECEIVED)) 11103 goto out; 11104 11105 if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) { 11106 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; 11107 set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events); 11108 } else 11109 vcpu->arch.mp_state = mp_state->mp_state; 11110 kvm_make_request(KVM_REQ_EVENT, vcpu); 11111 11112 ret = 0; 11113 out: 11114 vcpu_put(vcpu); 11115 return ret; 11116 } 11117 11118 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, 11119 int reason, bool has_error_code, u32 error_code) 11120 { 11121 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 11122 int ret; 11123 11124 init_emulate_ctxt(vcpu); 11125 11126 ret = emulator_task_switch(ctxt, tss_selector, idt_index, reason, 11127 has_error_code, error_code); 11128 if (ret) { 11129 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 11130 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; 11131 vcpu->run->internal.ndata = 0; 11132 return 0; 11133 } 11134 11135 kvm_rip_write(vcpu, ctxt->eip); 11136 kvm_set_rflags(vcpu, ctxt->eflags); 11137 return 1; 11138 } 11139 EXPORT_SYMBOL_GPL(kvm_task_switch); 11140 11141 static bool kvm_is_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 11142 { 11143 if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) { 11144 /* 11145 * When EFER.LME and CR0.PG are set, the processor is in 11146 * 64-bit mode (though maybe in a 32-bit code segment). 11147 * CR4.PAE and EFER.LMA must be set. 11148 */ 11149 if (!(sregs->cr4 & X86_CR4_PAE) || !(sregs->efer & EFER_LMA)) 11150 return false; 11151 if (kvm_vcpu_is_illegal_gpa(vcpu, sregs->cr3)) 11152 return false; 11153 } else { 11154 /* 11155 * Not in 64-bit mode: EFER.LMA is clear and the code 11156 * segment cannot be 64-bit. 11157 */ 11158 if (sregs->efer & EFER_LMA || sregs->cs.l) 11159 return false; 11160 } 11161 11162 return kvm_is_valid_cr4(vcpu, sregs->cr4); 11163 } 11164 11165 static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs, 11166 int *mmu_reset_needed, bool update_pdptrs) 11167 { 11168 struct msr_data apic_base_msr; 11169 int idx; 11170 struct desc_ptr dt; 11171 11172 if (!kvm_is_valid_sregs(vcpu, sregs)) 11173 return -EINVAL; 11174 11175 apic_base_msr.data = sregs->apic_base; 11176 apic_base_msr.host_initiated = true; 11177 if (kvm_set_apic_base(vcpu, &apic_base_msr)) 11178 return -EINVAL; 11179 11180 if (vcpu->arch.guest_state_protected) 11181 return 0; 11182 11183 dt.size = sregs->idt.limit; 11184 dt.address = sregs->idt.base; 11185 static_call(kvm_x86_set_idt)(vcpu, &dt); 11186 dt.size = sregs->gdt.limit; 11187 dt.address = sregs->gdt.base; 11188 static_call(kvm_x86_set_gdt)(vcpu, &dt); 11189 11190 vcpu->arch.cr2 = sregs->cr2; 11191 *mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; 11192 vcpu->arch.cr3 = sregs->cr3; 11193 kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); 11194 static_call_cond(kvm_x86_post_set_cr3)(vcpu, sregs->cr3); 11195 11196 kvm_set_cr8(vcpu, sregs->cr8); 11197 11198 *mmu_reset_needed |= vcpu->arch.efer != sregs->efer; 11199 static_call(kvm_x86_set_efer)(vcpu, sregs->efer); 11200 11201 *mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0; 11202 static_call(kvm_x86_set_cr0)(vcpu, sregs->cr0); 11203 vcpu->arch.cr0 = sregs->cr0; 11204 11205 *mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; 11206 static_call(kvm_x86_set_cr4)(vcpu, sregs->cr4); 11207 11208 if (update_pdptrs) { 11209 idx = srcu_read_lock(&vcpu->kvm->srcu); 11210 if (is_pae_paging(vcpu)) { 11211 load_pdptrs(vcpu, kvm_read_cr3(vcpu)); 11212 *mmu_reset_needed = 1; 11213 } 11214 srcu_read_unlock(&vcpu->kvm->srcu, idx); 11215 } 11216 11217 kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS); 11218 kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS); 11219 kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES); 11220 kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS); 11221 kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS); 11222 kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS); 11223 11224 kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR); 11225 kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); 11226 11227 update_cr8_intercept(vcpu); 11228 11229 /* Older userspace won't unhalt the vcpu on reset. */ 11230 if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 && 11231 sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 && 11232 !is_protmode(vcpu)) 11233 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 11234 11235 return 0; 11236 } 11237 11238 static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 11239 { 11240 int pending_vec, max_bits; 11241 int mmu_reset_needed = 0; 11242 int ret = __set_sregs_common(vcpu, sregs, &mmu_reset_needed, true); 11243 11244 if (ret) 11245 return ret; 11246 11247 if (mmu_reset_needed) 11248 kvm_mmu_reset_context(vcpu); 11249 11250 max_bits = KVM_NR_INTERRUPTS; 11251 pending_vec = find_first_bit( 11252 (const unsigned long *)sregs->interrupt_bitmap, max_bits); 11253 11254 if (pending_vec < max_bits) { 11255 kvm_queue_interrupt(vcpu, pending_vec, false); 11256 pr_debug("Set back pending irq %d\n", pending_vec); 11257 kvm_make_request(KVM_REQ_EVENT, vcpu); 11258 } 11259 return 0; 11260 } 11261 11262 static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2) 11263 { 11264 int mmu_reset_needed = 0; 11265 bool valid_pdptrs = sregs2->flags & KVM_SREGS2_FLAGS_PDPTRS_VALID; 11266 bool pae = (sregs2->cr0 & X86_CR0_PG) && (sregs2->cr4 & X86_CR4_PAE) && 11267 !(sregs2->efer & EFER_LMA); 11268 int i, ret; 11269 11270 if (sregs2->flags & ~KVM_SREGS2_FLAGS_PDPTRS_VALID) 11271 return -EINVAL; 11272 11273 if (valid_pdptrs && (!pae || vcpu->arch.guest_state_protected)) 11274 return -EINVAL; 11275 11276 ret = __set_sregs_common(vcpu, (struct kvm_sregs *)sregs2, 11277 &mmu_reset_needed, !valid_pdptrs); 11278 if (ret) 11279 return ret; 11280 11281 if (valid_pdptrs) { 11282 for (i = 0; i < 4 ; i++) 11283 kvm_pdptr_write(vcpu, i, sregs2->pdptrs[i]); 11284 11285 kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR); 11286 mmu_reset_needed = 1; 11287 vcpu->arch.pdptrs_from_userspace = true; 11288 } 11289 if (mmu_reset_needed) 11290 kvm_mmu_reset_context(vcpu); 11291 return 0; 11292 } 11293 11294 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 11295 struct kvm_sregs *sregs) 11296 { 11297 int ret; 11298 11299 vcpu_load(vcpu); 11300 ret = __set_sregs(vcpu, sregs); 11301 vcpu_put(vcpu); 11302 return ret; 11303 } 11304 11305 static void kvm_arch_vcpu_guestdbg_update_apicv_inhibit(struct kvm *kvm) 11306 { 11307 bool set = false; 11308 struct kvm_vcpu *vcpu; 11309 unsigned long i; 11310 11311 if (!enable_apicv) 11312 return; 11313 11314 down_write(&kvm->arch.apicv_update_lock); 11315 11316 kvm_for_each_vcpu(i, vcpu, kvm) { 11317 if (vcpu->guest_debug & KVM_GUESTDBG_BLOCKIRQ) { 11318 set = true; 11319 break; 11320 } 11321 } 11322 __kvm_set_or_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_BLOCKIRQ, set); 11323 up_write(&kvm->arch.apicv_update_lock); 11324 } 11325 11326 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 11327 struct kvm_guest_debug *dbg) 11328 { 11329 unsigned long rflags; 11330 int i, r; 11331 11332 if (vcpu->arch.guest_state_protected) 11333 return -EINVAL; 11334 11335 vcpu_load(vcpu); 11336 11337 if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) { 11338 r = -EBUSY; 11339 if (kvm_is_exception_pending(vcpu)) 11340 goto out; 11341 if (dbg->control & KVM_GUESTDBG_INJECT_DB) 11342 kvm_queue_exception(vcpu, DB_VECTOR); 11343 else 11344 kvm_queue_exception(vcpu, BP_VECTOR); 11345 } 11346 11347 /* 11348 * Read rflags as long as potentially injected trace flags are still 11349 * filtered out. 11350 */ 11351 rflags = kvm_get_rflags(vcpu); 11352 11353 vcpu->guest_debug = dbg->control; 11354 if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)) 11355 vcpu->guest_debug = 0; 11356 11357 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { 11358 for (i = 0; i < KVM_NR_DB_REGS; ++i) 11359 vcpu->arch.eff_db[i] = dbg->arch.debugreg[i]; 11360 vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7]; 11361 } else { 11362 for (i = 0; i < KVM_NR_DB_REGS; i++) 11363 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; 11364 } 11365 kvm_update_dr7(vcpu); 11366 11367 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) 11368 vcpu->arch.singlestep_rip = kvm_get_linear_rip(vcpu); 11369 11370 /* 11371 * Trigger an rflags update that will inject or remove the trace 11372 * flags. 11373 */ 11374 kvm_set_rflags(vcpu, rflags); 11375 11376 static_call(kvm_x86_update_exception_bitmap)(vcpu); 11377 11378 kvm_arch_vcpu_guestdbg_update_apicv_inhibit(vcpu->kvm); 11379 11380 r = 0; 11381 11382 out: 11383 vcpu_put(vcpu); 11384 return r; 11385 } 11386 11387 /* 11388 * Translate a guest virtual address to a guest physical address. 11389 */ 11390 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 11391 struct kvm_translation *tr) 11392 { 11393 unsigned long vaddr = tr->linear_address; 11394 gpa_t gpa; 11395 int idx; 11396 11397 vcpu_load(vcpu); 11398 11399 idx = srcu_read_lock(&vcpu->kvm->srcu); 11400 gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL); 11401 srcu_read_unlock(&vcpu->kvm->srcu, idx); 11402 tr->physical_address = gpa; 11403 tr->valid = gpa != INVALID_GPA; 11404 tr->writeable = 1; 11405 tr->usermode = 0; 11406 11407 vcpu_put(vcpu); 11408 return 0; 11409 } 11410 11411 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 11412 { 11413 struct fxregs_state *fxsave; 11414 11415 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) 11416 return 0; 11417 11418 vcpu_load(vcpu); 11419 11420 fxsave = &vcpu->arch.guest_fpu.fpstate->regs.fxsave; 11421 memcpy(fpu->fpr, fxsave->st_space, 128); 11422 fpu->fcw = fxsave->cwd; 11423 fpu->fsw = fxsave->swd; 11424 fpu->ftwx = fxsave->twd; 11425 fpu->last_opcode = fxsave->fop; 11426 fpu->last_ip = fxsave->rip; 11427 fpu->last_dp = fxsave->rdp; 11428 memcpy(fpu->xmm, fxsave->xmm_space, sizeof(fxsave->xmm_space)); 11429 11430 vcpu_put(vcpu); 11431 return 0; 11432 } 11433 11434 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 11435 { 11436 struct fxregs_state *fxsave; 11437 11438 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) 11439 return 0; 11440 11441 vcpu_load(vcpu); 11442 11443 fxsave = &vcpu->arch.guest_fpu.fpstate->regs.fxsave; 11444 11445 memcpy(fxsave->st_space, fpu->fpr, 128); 11446 fxsave->cwd = fpu->fcw; 11447 fxsave->swd = fpu->fsw; 11448 fxsave->twd = fpu->ftwx; 11449 fxsave->fop = fpu->last_opcode; 11450 fxsave->rip = fpu->last_ip; 11451 fxsave->rdp = fpu->last_dp; 11452 memcpy(fxsave->xmm_space, fpu->xmm, sizeof(fxsave->xmm_space)); 11453 11454 vcpu_put(vcpu); 11455 return 0; 11456 } 11457 11458 static void store_regs(struct kvm_vcpu *vcpu) 11459 { 11460 BUILD_BUG_ON(sizeof(struct kvm_sync_regs) > SYNC_REGS_SIZE_BYTES); 11461 11462 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_REGS) 11463 __get_regs(vcpu, &vcpu->run->s.regs.regs); 11464 11465 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_SREGS) 11466 __get_sregs(vcpu, &vcpu->run->s.regs.sregs); 11467 11468 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_EVENTS) 11469 kvm_vcpu_ioctl_x86_get_vcpu_events( 11470 vcpu, &vcpu->run->s.regs.events); 11471 } 11472 11473 static int sync_regs(struct kvm_vcpu *vcpu) 11474 { 11475 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_REGS) { 11476 __set_regs(vcpu, &vcpu->run->s.regs.regs); 11477 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_REGS; 11478 } 11479 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_SREGS) { 11480 if (__set_sregs(vcpu, &vcpu->run->s.regs.sregs)) 11481 return -EINVAL; 11482 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_SREGS; 11483 } 11484 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_EVENTS) { 11485 if (kvm_vcpu_ioctl_x86_set_vcpu_events( 11486 vcpu, &vcpu->run->s.regs.events)) 11487 return -EINVAL; 11488 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_EVENTS; 11489 } 11490 11491 return 0; 11492 } 11493 11494 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) 11495 { 11496 if (kvm_check_tsc_unstable() && kvm->created_vcpus) 11497 pr_warn_once("kvm: SMP vm created on host with unstable TSC; " 11498 "guest TSC will not be reliable\n"); 11499 11500 if (!kvm->arch.max_vcpu_ids) 11501 kvm->arch.max_vcpu_ids = KVM_MAX_VCPU_IDS; 11502 11503 if (id >= kvm->arch.max_vcpu_ids) 11504 return -EINVAL; 11505 11506 return static_call(kvm_x86_vcpu_precreate)(kvm); 11507 } 11508 11509 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) 11510 { 11511 struct page *page; 11512 int r; 11513 11514 vcpu->arch.last_vmentry_cpu = -1; 11515 vcpu->arch.regs_avail = ~0; 11516 vcpu->arch.regs_dirty = ~0; 11517 11518 kvm_gpc_init(&vcpu->arch.pv_time); 11519 11520 if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu)) 11521 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 11522 else 11523 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; 11524 11525 r = kvm_mmu_create(vcpu); 11526 if (r < 0) 11527 return r; 11528 11529 if (irqchip_in_kernel(vcpu->kvm)) { 11530 r = kvm_create_lapic(vcpu, lapic_timer_advance_ns); 11531 if (r < 0) 11532 goto fail_mmu_destroy; 11533 11534 /* 11535 * Defer evaluating inhibits until the vCPU is first run, as 11536 * this vCPU will not get notified of any changes until this 11537 * vCPU is visible to other vCPUs (marked online and added to 11538 * the set of vCPUs). Opportunistically mark APICv active as 11539 * VMX in particularly is highly unlikely to have inhibits. 11540 * Ignore the current per-VM APICv state so that vCPU creation 11541 * is guaranteed to run with a deterministic value, the request 11542 * will ensure the vCPU gets the correct state before VM-Entry. 11543 */ 11544 if (enable_apicv) { 11545 vcpu->arch.apic->apicv_active = true; 11546 kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu); 11547 } 11548 } else 11549 static_branch_inc(&kvm_has_noapic_vcpu); 11550 11551 r = -ENOMEM; 11552 11553 page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); 11554 if (!page) 11555 goto fail_free_lapic; 11556 vcpu->arch.pio_data = page_address(page); 11557 11558 vcpu->arch.mce_banks = kcalloc(KVM_MAX_MCE_BANKS * 4, sizeof(u64), 11559 GFP_KERNEL_ACCOUNT); 11560 vcpu->arch.mci_ctl2_banks = kcalloc(KVM_MAX_MCE_BANKS, sizeof(u64), 11561 GFP_KERNEL_ACCOUNT); 11562 if (!vcpu->arch.mce_banks || !vcpu->arch.mci_ctl2_banks) 11563 goto fail_free_mce_banks; 11564 vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS; 11565 11566 if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, 11567 GFP_KERNEL_ACCOUNT)) 11568 goto fail_free_mce_banks; 11569 11570 if (!alloc_emulate_ctxt(vcpu)) 11571 goto free_wbinvd_dirty_mask; 11572 11573 if (!fpu_alloc_guest_fpstate(&vcpu->arch.guest_fpu)) { 11574 pr_err("kvm: failed to allocate vcpu's fpu\n"); 11575 goto free_emulate_ctxt; 11576 } 11577 11578 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); 11579 vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu); 11580 11581 vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT; 11582 11583 kvm_async_pf_hash_reset(vcpu); 11584 11585 vcpu->arch.perf_capabilities = kvm_caps.supported_perf_cap; 11586 kvm_pmu_init(vcpu); 11587 11588 vcpu->arch.pending_external_vector = -1; 11589 vcpu->arch.preempted_in_kernel = false; 11590 11591 #if IS_ENABLED(CONFIG_HYPERV) 11592 vcpu->arch.hv_root_tdp = INVALID_PAGE; 11593 #endif 11594 11595 r = static_call(kvm_x86_vcpu_create)(vcpu); 11596 if (r) 11597 goto free_guest_fpu; 11598 11599 vcpu->arch.arch_capabilities = kvm_get_arch_capabilities(); 11600 vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT; 11601 kvm_xen_init_vcpu(vcpu); 11602 kvm_vcpu_mtrr_init(vcpu); 11603 vcpu_load(vcpu); 11604 kvm_set_tsc_khz(vcpu, vcpu->kvm->arch.default_tsc_khz); 11605 kvm_vcpu_reset(vcpu, false); 11606 kvm_init_mmu(vcpu); 11607 vcpu_put(vcpu); 11608 return 0; 11609 11610 free_guest_fpu: 11611 fpu_free_guest_fpstate(&vcpu->arch.guest_fpu); 11612 free_emulate_ctxt: 11613 kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt); 11614 free_wbinvd_dirty_mask: 11615 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); 11616 fail_free_mce_banks: 11617 kfree(vcpu->arch.mce_banks); 11618 kfree(vcpu->arch.mci_ctl2_banks); 11619 free_page((unsigned long)vcpu->arch.pio_data); 11620 fail_free_lapic: 11621 kvm_free_lapic(vcpu); 11622 fail_mmu_destroy: 11623 kvm_mmu_destroy(vcpu); 11624 return r; 11625 } 11626 11627 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 11628 { 11629 struct kvm *kvm = vcpu->kvm; 11630 11631 if (mutex_lock_killable(&vcpu->mutex)) 11632 return; 11633 vcpu_load(vcpu); 11634 kvm_synchronize_tsc(vcpu, 0); 11635 vcpu_put(vcpu); 11636 11637 /* poll control enabled by default */ 11638 vcpu->arch.msr_kvm_poll_control = 1; 11639 11640 mutex_unlock(&vcpu->mutex); 11641 11642 if (kvmclock_periodic_sync && vcpu->vcpu_idx == 0) 11643 schedule_delayed_work(&kvm->arch.kvmclock_sync_work, 11644 KVMCLOCK_SYNC_PERIOD); 11645 } 11646 11647 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 11648 { 11649 int idx; 11650 11651 kvmclock_reset(vcpu); 11652 11653 static_call(kvm_x86_vcpu_free)(vcpu); 11654 11655 kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt); 11656 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); 11657 fpu_free_guest_fpstate(&vcpu->arch.guest_fpu); 11658 11659 kvm_xen_destroy_vcpu(vcpu); 11660 kvm_hv_vcpu_uninit(vcpu); 11661 kvm_pmu_destroy(vcpu); 11662 kfree(vcpu->arch.mce_banks); 11663 kfree(vcpu->arch.mci_ctl2_banks); 11664 kvm_free_lapic(vcpu); 11665 idx = srcu_read_lock(&vcpu->kvm->srcu); 11666 kvm_mmu_destroy(vcpu); 11667 srcu_read_unlock(&vcpu->kvm->srcu, idx); 11668 free_page((unsigned long)vcpu->arch.pio_data); 11669 kvfree(vcpu->arch.cpuid_entries); 11670 if (!lapic_in_kernel(vcpu)) 11671 static_branch_dec(&kvm_has_noapic_vcpu); 11672 } 11673 11674 void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) 11675 { 11676 struct kvm_cpuid_entry2 *cpuid_0x1; 11677 unsigned long old_cr0 = kvm_read_cr0(vcpu); 11678 unsigned long new_cr0; 11679 11680 /* 11681 * Several of the "set" flows, e.g. ->set_cr0(), read other registers 11682 * to handle side effects. RESET emulation hits those flows and relies 11683 * on emulated/virtualized registers, including those that are loaded 11684 * into hardware, to be zeroed at vCPU creation. Use CRs as a sentinel 11685 * to detect improper or missing initialization. 11686 */ 11687 WARN_ON_ONCE(!init_event && 11688 (old_cr0 || kvm_read_cr3(vcpu) || kvm_read_cr4(vcpu))); 11689 11690 kvm_lapic_reset(vcpu, init_event); 11691 11692 vcpu->arch.hflags = 0; 11693 11694 vcpu->arch.smi_pending = 0; 11695 vcpu->arch.smi_count = 0; 11696 atomic_set(&vcpu->arch.nmi_queued, 0); 11697 vcpu->arch.nmi_pending = 0; 11698 vcpu->arch.nmi_injected = false; 11699 kvm_clear_interrupt_queue(vcpu); 11700 kvm_clear_exception_queue(vcpu); 11701 11702 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); 11703 kvm_update_dr0123(vcpu); 11704 vcpu->arch.dr6 = DR6_ACTIVE_LOW; 11705 vcpu->arch.dr7 = DR7_FIXED_1; 11706 kvm_update_dr7(vcpu); 11707 11708 vcpu->arch.cr2 = 0; 11709 11710 kvm_make_request(KVM_REQ_EVENT, vcpu); 11711 vcpu->arch.apf.msr_en_val = 0; 11712 vcpu->arch.apf.msr_int_val = 0; 11713 vcpu->arch.st.msr_val = 0; 11714 11715 kvmclock_reset(vcpu); 11716 11717 kvm_clear_async_pf_completion_queue(vcpu); 11718 kvm_async_pf_hash_reset(vcpu); 11719 vcpu->arch.apf.halted = false; 11720 11721 if (vcpu->arch.guest_fpu.fpstate && kvm_mpx_supported()) { 11722 struct fpstate *fpstate = vcpu->arch.guest_fpu.fpstate; 11723 11724 /* 11725 * All paths that lead to INIT are required to load the guest's 11726 * FPU state (because most paths are buried in KVM_RUN). 11727 */ 11728 if (init_event) 11729 kvm_put_guest_fpu(vcpu); 11730 11731 fpstate_clear_xstate_component(fpstate, XFEATURE_BNDREGS); 11732 fpstate_clear_xstate_component(fpstate, XFEATURE_BNDCSR); 11733 11734 if (init_event) 11735 kvm_load_guest_fpu(vcpu); 11736 } 11737 11738 if (!init_event) { 11739 kvm_pmu_reset(vcpu); 11740 vcpu->arch.smbase = 0x30000; 11741 11742 vcpu->arch.msr_misc_features_enables = 0; 11743 vcpu->arch.ia32_misc_enable_msr = MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL | 11744 MSR_IA32_MISC_ENABLE_BTS_UNAVAIL; 11745 11746 __kvm_set_xcr(vcpu, 0, XFEATURE_MASK_FP); 11747 __kvm_set_msr(vcpu, MSR_IA32_XSS, 0, true); 11748 } 11749 11750 /* All GPRs except RDX (handled below) are zeroed on RESET/INIT. */ 11751 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); 11752 kvm_register_mark_dirty(vcpu, VCPU_REGS_RSP); 11753 11754 /* 11755 * Fall back to KVM's default Family/Model/Stepping of 0x600 (P6/Athlon) 11756 * if no CPUID match is found. Note, it's impossible to get a match at 11757 * RESET since KVM emulates RESET before exposing the vCPU to userspace, 11758 * i.e. it's impossible for kvm_find_cpuid_entry() to find a valid entry 11759 * on RESET. But, go through the motions in case that's ever remedied. 11760 */ 11761 cpuid_0x1 = kvm_find_cpuid_entry(vcpu, 1); 11762 kvm_rdx_write(vcpu, cpuid_0x1 ? cpuid_0x1->eax : 0x600); 11763 11764 static_call(kvm_x86_vcpu_reset)(vcpu, init_event); 11765 11766 kvm_set_rflags(vcpu, X86_EFLAGS_FIXED); 11767 kvm_rip_write(vcpu, 0xfff0); 11768 11769 vcpu->arch.cr3 = 0; 11770 kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); 11771 11772 /* 11773 * CR0.CD/NW are set on RESET, preserved on INIT. Note, some versions 11774 * of Intel's SDM list CD/NW as being set on INIT, but they contradict 11775 * (or qualify) that with a footnote stating that CD/NW are preserved. 11776 */ 11777 new_cr0 = X86_CR0_ET; 11778 if (init_event) 11779 new_cr0 |= (old_cr0 & (X86_CR0_NW | X86_CR0_CD)); 11780 else 11781 new_cr0 |= X86_CR0_NW | X86_CR0_CD; 11782 11783 static_call(kvm_x86_set_cr0)(vcpu, new_cr0); 11784 static_call(kvm_x86_set_cr4)(vcpu, 0); 11785 static_call(kvm_x86_set_efer)(vcpu, 0); 11786 static_call(kvm_x86_update_exception_bitmap)(vcpu); 11787 11788 /* 11789 * On the standard CR0/CR4/EFER modification paths, there are several 11790 * complex conditions determining whether the MMU has to be reset and/or 11791 * which PCIDs have to be flushed. However, CR0.WP and the paging-related 11792 * bits in CR4 and EFER are irrelevant if CR0.PG was '0'; and a reset+flush 11793 * is needed anyway if CR0.PG was '1' (which can only happen for INIT, as 11794 * CR0 will be '0' prior to RESET). So we only need to check CR0.PG here. 11795 */ 11796 if (old_cr0 & X86_CR0_PG) { 11797 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 11798 kvm_mmu_reset_context(vcpu); 11799 } 11800 11801 /* 11802 * Intel's SDM states that all TLB entries are flushed on INIT. AMD's 11803 * APM states the TLBs are untouched by INIT, but it also states that 11804 * the TLBs are flushed on "External initialization of the processor." 11805 * Flush the guest TLB regardless of vendor, there is no meaningful 11806 * benefit in relying on the guest to flush the TLB immediately after 11807 * INIT. A spurious TLB flush is benign and likely negligible from a 11808 * performance perspective. 11809 */ 11810 if (init_event) 11811 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 11812 } 11813 EXPORT_SYMBOL_GPL(kvm_vcpu_reset); 11814 11815 void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) 11816 { 11817 struct kvm_segment cs; 11818 11819 kvm_get_segment(vcpu, &cs, VCPU_SREG_CS); 11820 cs.selector = vector << 8; 11821 cs.base = vector << 12; 11822 kvm_set_segment(vcpu, &cs, VCPU_SREG_CS); 11823 kvm_rip_write(vcpu, 0); 11824 } 11825 EXPORT_SYMBOL_GPL(kvm_vcpu_deliver_sipi_vector); 11826 11827 int kvm_arch_hardware_enable(void) 11828 { 11829 struct kvm *kvm; 11830 struct kvm_vcpu *vcpu; 11831 unsigned long i; 11832 int ret; 11833 u64 local_tsc; 11834 u64 max_tsc = 0; 11835 bool stable, backwards_tsc = false; 11836 11837 kvm_user_return_msr_cpu_online(); 11838 ret = static_call(kvm_x86_hardware_enable)(); 11839 if (ret != 0) 11840 return ret; 11841 11842 local_tsc = rdtsc(); 11843 stable = !kvm_check_tsc_unstable(); 11844 list_for_each_entry(kvm, &vm_list, vm_list) { 11845 kvm_for_each_vcpu(i, vcpu, kvm) { 11846 if (!stable && vcpu->cpu == smp_processor_id()) 11847 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 11848 if (stable && vcpu->arch.last_host_tsc > local_tsc) { 11849 backwards_tsc = true; 11850 if (vcpu->arch.last_host_tsc > max_tsc) 11851 max_tsc = vcpu->arch.last_host_tsc; 11852 } 11853 } 11854 } 11855 11856 /* 11857 * Sometimes, even reliable TSCs go backwards. This happens on 11858 * platforms that reset TSC during suspend or hibernate actions, but 11859 * maintain synchronization. We must compensate. Fortunately, we can 11860 * detect that condition here, which happens early in CPU bringup, 11861 * before any KVM threads can be running. Unfortunately, we can't 11862 * bring the TSCs fully up to date with real time, as we aren't yet far 11863 * enough into CPU bringup that we know how much real time has actually 11864 * elapsed; our helper function, ktime_get_boottime_ns() will be using boot 11865 * variables that haven't been updated yet. 11866 * 11867 * So we simply find the maximum observed TSC above, then record the 11868 * adjustment to TSC in each VCPU. When the VCPU later gets loaded, 11869 * the adjustment will be applied. Note that we accumulate 11870 * adjustments, in case multiple suspend cycles happen before some VCPU 11871 * gets a chance to run again. In the event that no KVM threads get a 11872 * chance to run, we will miss the entire elapsed period, as we'll have 11873 * reset last_host_tsc, so VCPUs will not have the TSC adjusted and may 11874 * loose cycle time. This isn't too big a deal, since the loss will be 11875 * uniform across all VCPUs (not to mention the scenario is extremely 11876 * unlikely). It is possible that a second hibernate recovery happens 11877 * much faster than a first, causing the observed TSC here to be 11878 * smaller; this would require additional padding adjustment, which is 11879 * why we set last_host_tsc to the local tsc observed here. 11880 * 11881 * N.B. - this code below runs only on platforms with reliable TSC, 11882 * as that is the only way backwards_tsc is set above. Also note 11883 * that this runs for ALL vcpus, which is not a bug; all VCPUs should 11884 * have the same delta_cyc adjustment applied if backwards_tsc 11885 * is detected. Note further, this adjustment is only done once, 11886 * as we reset last_host_tsc on all VCPUs to stop this from being 11887 * called multiple times (one for each physical CPU bringup). 11888 * 11889 * Platforms with unreliable TSCs don't have to deal with this, they 11890 * will be compensated by the logic in vcpu_load, which sets the TSC to 11891 * catchup mode. This will catchup all VCPUs to real time, but cannot 11892 * guarantee that they stay in perfect synchronization. 11893 */ 11894 if (backwards_tsc) { 11895 u64 delta_cyc = max_tsc - local_tsc; 11896 list_for_each_entry(kvm, &vm_list, vm_list) { 11897 kvm->arch.backwards_tsc_observed = true; 11898 kvm_for_each_vcpu(i, vcpu, kvm) { 11899 vcpu->arch.tsc_offset_adjustment += delta_cyc; 11900 vcpu->arch.last_host_tsc = local_tsc; 11901 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); 11902 } 11903 11904 /* 11905 * We have to disable TSC offset matching.. if you were 11906 * booting a VM while issuing an S4 host suspend.... 11907 * you may have some problem. Solving this issue is 11908 * left as an exercise to the reader. 11909 */ 11910 kvm->arch.last_tsc_nsec = 0; 11911 kvm->arch.last_tsc_write = 0; 11912 } 11913 11914 } 11915 return 0; 11916 } 11917 11918 void kvm_arch_hardware_disable(void) 11919 { 11920 static_call(kvm_x86_hardware_disable)(); 11921 drop_user_return_notifiers(); 11922 } 11923 11924 static inline void kvm_ops_update(struct kvm_x86_init_ops *ops) 11925 { 11926 memcpy(&kvm_x86_ops, ops->runtime_ops, sizeof(kvm_x86_ops)); 11927 11928 #define __KVM_X86_OP(func) \ 11929 static_call_update(kvm_x86_##func, kvm_x86_ops.func); 11930 #define KVM_X86_OP(func) \ 11931 WARN_ON(!kvm_x86_ops.func); __KVM_X86_OP(func) 11932 #define KVM_X86_OP_OPTIONAL __KVM_X86_OP 11933 #define KVM_X86_OP_OPTIONAL_RET0(func) \ 11934 static_call_update(kvm_x86_##func, (void *)kvm_x86_ops.func ? : \ 11935 (void *)__static_call_return0); 11936 #include <asm/kvm-x86-ops.h> 11937 #undef __KVM_X86_OP 11938 11939 kvm_pmu_ops_update(ops->pmu_ops); 11940 } 11941 11942 int kvm_arch_hardware_setup(void *opaque) 11943 { 11944 struct kvm_x86_init_ops *ops = opaque; 11945 int r; 11946 11947 rdmsrl_safe(MSR_EFER, &host_efer); 11948 11949 if (boot_cpu_has(X86_FEATURE_XSAVES)) 11950 rdmsrl(MSR_IA32_XSS, host_xss); 11951 11952 kvm_init_pmu_capability(); 11953 11954 r = ops->hardware_setup(); 11955 if (r != 0) 11956 return r; 11957 11958 kvm_ops_update(ops); 11959 11960 kvm_register_perf_callbacks(ops->handle_intel_pt_intr); 11961 11962 if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES)) 11963 kvm_caps.supported_xss = 0; 11964 11965 #define __kvm_cpu_cap_has(UNUSED_, f) kvm_cpu_cap_has(f) 11966 cr4_reserved_bits = __cr4_reserved_bits(__kvm_cpu_cap_has, UNUSED_); 11967 #undef __kvm_cpu_cap_has 11968 11969 if (kvm_caps.has_tsc_control) { 11970 /* 11971 * Make sure the user can only configure tsc_khz values that 11972 * fit into a signed integer. 11973 * A min value is not calculated because it will always 11974 * be 1 on all machines. 11975 */ 11976 u64 max = min(0x7fffffffULL, 11977 __scale_tsc(kvm_caps.max_tsc_scaling_ratio, tsc_khz)); 11978 kvm_caps.max_guest_tsc_khz = max; 11979 } 11980 kvm_caps.default_tsc_scaling_ratio = 1ULL << kvm_caps.tsc_scaling_ratio_frac_bits; 11981 kvm_init_msr_list(); 11982 return 0; 11983 } 11984 11985 void kvm_arch_hardware_unsetup(void) 11986 { 11987 kvm_unregister_perf_callbacks(); 11988 11989 static_call(kvm_x86_hardware_unsetup)(); 11990 } 11991 11992 int kvm_arch_check_processor_compat(void *opaque) 11993 { 11994 struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); 11995 struct kvm_x86_init_ops *ops = opaque; 11996 11997 WARN_ON(!irqs_disabled()); 11998 11999 if (__cr4_reserved_bits(cpu_has, c) != 12000 __cr4_reserved_bits(cpu_has, &boot_cpu_data)) 12001 return -EIO; 12002 12003 return ops->check_processor_compatibility(); 12004 } 12005 12006 bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu) 12007 { 12008 return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id; 12009 } 12010 EXPORT_SYMBOL_GPL(kvm_vcpu_is_reset_bsp); 12011 12012 bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu) 12013 { 12014 return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0; 12015 } 12016 12017 __read_mostly DEFINE_STATIC_KEY_FALSE(kvm_has_noapic_vcpu); 12018 EXPORT_SYMBOL_GPL(kvm_has_noapic_vcpu); 12019 12020 void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) 12021 { 12022 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 12023 12024 vcpu->arch.l1tf_flush_l1d = true; 12025 if (pmu->version && unlikely(pmu->event_count)) { 12026 pmu->need_cleanup = true; 12027 kvm_make_request(KVM_REQ_PMU, vcpu); 12028 } 12029 static_call(kvm_x86_sched_in)(vcpu, cpu); 12030 } 12031 12032 void kvm_arch_free_vm(struct kvm *kvm) 12033 { 12034 kfree(to_kvm_hv(kvm)->hv_pa_pg); 12035 __kvm_arch_free_vm(kvm); 12036 } 12037 12038 12039 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 12040 { 12041 int ret; 12042 unsigned long flags; 12043 12044 if (type) 12045 return -EINVAL; 12046 12047 ret = kvm_page_track_init(kvm); 12048 if (ret) 12049 goto out; 12050 12051 ret = kvm_mmu_init_vm(kvm); 12052 if (ret) 12053 goto out_page_track; 12054 12055 ret = static_call(kvm_x86_vm_init)(kvm); 12056 if (ret) 12057 goto out_uninit_mmu; 12058 12059 INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list); 12060 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); 12061 atomic_set(&kvm->arch.noncoherent_dma_count, 0); 12062 12063 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ 12064 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); 12065 /* Reserve bit 1 of irq_sources_bitmap for irqfd-resampler */ 12066 set_bit(KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, 12067 &kvm->arch.irq_sources_bitmap); 12068 12069 raw_spin_lock_init(&kvm->arch.tsc_write_lock); 12070 mutex_init(&kvm->arch.apic_map_lock); 12071 seqcount_raw_spinlock_init(&kvm->arch.pvclock_sc, &kvm->arch.tsc_write_lock); 12072 kvm->arch.kvmclock_offset = -get_kvmclock_base_ns(); 12073 12074 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); 12075 pvclock_update_vm_gtod_copy(kvm); 12076 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); 12077 12078 kvm->arch.default_tsc_khz = max_tsc_khz ? : tsc_khz; 12079 kvm->arch.guest_can_read_msr_platform_info = true; 12080 kvm->arch.enable_pmu = enable_pmu; 12081 12082 #if IS_ENABLED(CONFIG_HYPERV) 12083 spin_lock_init(&kvm->arch.hv_root_tdp_lock); 12084 kvm->arch.hv_root_tdp = INVALID_PAGE; 12085 #endif 12086 12087 INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn); 12088 INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn); 12089 12090 kvm_apicv_init(kvm); 12091 kvm_hv_init_vm(kvm); 12092 kvm_xen_init_vm(kvm); 12093 12094 return 0; 12095 12096 out_uninit_mmu: 12097 kvm_mmu_uninit_vm(kvm); 12098 out_page_track: 12099 kvm_page_track_cleanup(kvm); 12100 out: 12101 return ret; 12102 } 12103 12104 int kvm_arch_post_init_vm(struct kvm *kvm) 12105 { 12106 return kvm_mmu_post_init_vm(kvm); 12107 } 12108 12109 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu) 12110 { 12111 vcpu_load(vcpu); 12112 kvm_mmu_unload(vcpu); 12113 vcpu_put(vcpu); 12114 } 12115 12116 static void kvm_unload_vcpu_mmus(struct kvm *kvm) 12117 { 12118 unsigned long i; 12119 struct kvm_vcpu *vcpu; 12120 12121 kvm_for_each_vcpu(i, vcpu, kvm) { 12122 kvm_clear_async_pf_completion_queue(vcpu); 12123 kvm_unload_vcpu_mmu(vcpu); 12124 } 12125 } 12126 12127 void kvm_arch_sync_events(struct kvm *kvm) 12128 { 12129 cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work); 12130 cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work); 12131 kvm_free_pit(kvm); 12132 } 12133 12134 /** 12135 * __x86_set_memory_region: Setup KVM internal memory slot 12136 * 12137 * @kvm: the kvm pointer to the VM. 12138 * @id: the slot ID to setup. 12139 * @gpa: the GPA to install the slot (unused when @size == 0). 12140 * @size: the size of the slot. Set to zero to uninstall a slot. 12141 * 12142 * This function helps to setup a KVM internal memory slot. Specify 12143 * @size > 0 to install a new slot, while @size == 0 to uninstall a 12144 * slot. The return code can be one of the following: 12145 * 12146 * HVA: on success (uninstall will return a bogus HVA) 12147 * -errno: on error 12148 * 12149 * The caller should always use IS_ERR() to check the return value 12150 * before use. Note, the KVM internal memory slots are guaranteed to 12151 * remain valid and unchanged until the VM is destroyed, i.e., the 12152 * GPA->HVA translation will not change. However, the HVA is a user 12153 * address, i.e. its accessibility is not guaranteed, and must be 12154 * accessed via __copy_{to,from}_user(). 12155 */ 12156 void __user * __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, 12157 u32 size) 12158 { 12159 int i, r; 12160 unsigned long hva, old_npages; 12161 struct kvm_memslots *slots = kvm_memslots(kvm); 12162 struct kvm_memory_slot *slot; 12163 12164 /* Called with kvm->slots_lock held. */ 12165 if (WARN_ON(id >= KVM_MEM_SLOTS_NUM)) 12166 return ERR_PTR_USR(-EINVAL); 12167 12168 slot = id_to_memslot(slots, id); 12169 if (size) { 12170 if (slot && slot->npages) 12171 return ERR_PTR_USR(-EEXIST); 12172 12173 /* 12174 * MAP_SHARED to prevent internal slot pages from being moved 12175 * by fork()/COW. 12176 */ 12177 hva = vm_mmap(NULL, 0, size, PROT_READ | PROT_WRITE, 12178 MAP_SHARED | MAP_ANONYMOUS, 0); 12179 if (IS_ERR((void *)hva)) 12180 return (void __user *)hva; 12181 } else { 12182 if (!slot || !slot->npages) 12183 return NULL; 12184 12185 old_npages = slot->npages; 12186 hva = slot->userspace_addr; 12187 } 12188 12189 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 12190 struct kvm_userspace_memory_region m; 12191 12192 m.slot = id | (i << 16); 12193 m.flags = 0; 12194 m.guest_phys_addr = gpa; 12195 m.userspace_addr = hva; 12196 m.memory_size = size; 12197 r = __kvm_set_memory_region(kvm, &m); 12198 if (r < 0) 12199 return ERR_PTR_USR(r); 12200 } 12201 12202 if (!size) 12203 vm_munmap(hva, old_npages * PAGE_SIZE); 12204 12205 return (void __user *)hva; 12206 } 12207 EXPORT_SYMBOL_GPL(__x86_set_memory_region); 12208 12209 void kvm_arch_pre_destroy_vm(struct kvm *kvm) 12210 { 12211 kvm_mmu_pre_destroy_vm(kvm); 12212 } 12213 12214 void kvm_arch_destroy_vm(struct kvm *kvm) 12215 { 12216 if (current->mm == kvm->mm) { 12217 /* 12218 * Free memory regions allocated on behalf of userspace, 12219 * unless the memory map has changed due to process exit 12220 * or fd copying. 12221 */ 12222 mutex_lock(&kvm->slots_lock); 12223 __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 12224 0, 0); 12225 __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 12226 0, 0); 12227 __x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0); 12228 mutex_unlock(&kvm->slots_lock); 12229 } 12230 kvm_unload_vcpu_mmus(kvm); 12231 static_call_cond(kvm_x86_vm_destroy)(kvm); 12232 kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1)); 12233 kvm_pic_destroy(kvm); 12234 kvm_ioapic_destroy(kvm); 12235 kvm_destroy_vcpus(kvm); 12236 kvfree(rcu_dereference_check(kvm->arch.apic_map, 1)); 12237 kfree(srcu_dereference_check(kvm->arch.pmu_event_filter, &kvm->srcu, 1)); 12238 kvm_mmu_uninit_vm(kvm); 12239 kvm_page_track_cleanup(kvm); 12240 kvm_xen_destroy_vm(kvm); 12241 kvm_hv_destroy_vm(kvm); 12242 } 12243 12244 static void memslot_rmap_free(struct kvm_memory_slot *slot) 12245 { 12246 int i; 12247 12248 for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { 12249 kvfree(slot->arch.rmap[i]); 12250 slot->arch.rmap[i] = NULL; 12251 } 12252 } 12253 12254 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) 12255 { 12256 int i; 12257 12258 memslot_rmap_free(slot); 12259 12260 for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) { 12261 kvfree(slot->arch.lpage_info[i - 1]); 12262 slot->arch.lpage_info[i - 1] = NULL; 12263 } 12264 12265 kvm_page_track_free_memslot(slot); 12266 } 12267 12268 int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages) 12269 { 12270 const int sz = sizeof(*slot->arch.rmap[0]); 12271 int i; 12272 12273 for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { 12274 int level = i + 1; 12275 int lpages = __kvm_mmu_slot_lpages(slot, npages, level); 12276 12277 if (slot->arch.rmap[i]) 12278 continue; 12279 12280 slot->arch.rmap[i] = __vcalloc(lpages, sz, GFP_KERNEL_ACCOUNT); 12281 if (!slot->arch.rmap[i]) { 12282 memslot_rmap_free(slot); 12283 return -ENOMEM; 12284 } 12285 } 12286 12287 return 0; 12288 } 12289 12290 static int kvm_alloc_memslot_metadata(struct kvm *kvm, 12291 struct kvm_memory_slot *slot) 12292 { 12293 unsigned long npages = slot->npages; 12294 int i, r; 12295 12296 /* 12297 * Clear out the previous array pointers for the KVM_MR_MOVE case. The 12298 * old arrays will be freed by __kvm_set_memory_region() if installing 12299 * the new memslot is successful. 12300 */ 12301 memset(&slot->arch, 0, sizeof(slot->arch)); 12302 12303 if (kvm_memslots_have_rmaps(kvm)) { 12304 r = memslot_rmap_alloc(slot, npages); 12305 if (r) 12306 return r; 12307 } 12308 12309 for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) { 12310 struct kvm_lpage_info *linfo; 12311 unsigned long ugfn; 12312 int lpages; 12313 int level = i + 1; 12314 12315 lpages = __kvm_mmu_slot_lpages(slot, npages, level); 12316 12317 linfo = __vcalloc(lpages, sizeof(*linfo), GFP_KERNEL_ACCOUNT); 12318 if (!linfo) 12319 goto out_free; 12320 12321 slot->arch.lpage_info[i - 1] = linfo; 12322 12323 if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1)) 12324 linfo[0].disallow_lpage = 1; 12325 if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1)) 12326 linfo[lpages - 1].disallow_lpage = 1; 12327 ugfn = slot->userspace_addr >> PAGE_SHIFT; 12328 /* 12329 * If the gfn and userspace address are not aligned wrt each 12330 * other, disable large page support for this slot. 12331 */ 12332 if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1)) { 12333 unsigned long j; 12334 12335 for (j = 0; j < lpages; ++j) 12336 linfo[j].disallow_lpage = 1; 12337 } 12338 } 12339 12340 if (kvm_page_track_create_memslot(kvm, slot, npages)) 12341 goto out_free; 12342 12343 return 0; 12344 12345 out_free: 12346 memslot_rmap_free(slot); 12347 12348 for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) { 12349 kvfree(slot->arch.lpage_info[i - 1]); 12350 slot->arch.lpage_info[i - 1] = NULL; 12351 } 12352 return -ENOMEM; 12353 } 12354 12355 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) 12356 { 12357 struct kvm_vcpu *vcpu; 12358 unsigned long i; 12359 12360 /* 12361 * memslots->generation has been incremented. 12362 * mmio generation may have reached its maximum value. 12363 */ 12364 kvm_mmu_invalidate_mmio_sptes(kvm, gen); 12365 12366 /* Force re-initialization of steal_time cache */ 12367 kvm_for_each_vcpu(i, vcpu, kvm) 12368 kvm_vcpu_kick(vcpu); 12369 } 12370 12371 int kvm_arch_prepare_memory_region(struct kvm *kvm, 12372 const struct kvm_memory_slot *old, 12373 struct kvm_memory_slot *new, 12374 enum kvm_mr_change change) 12375 { 12376 if (change == KVM_MR_CREATE || change == KVM_MR_MOVE) { 12377 if ((new->base_gfn + new->npages - 1) > kvm_mmu_max_gfn()) 12378 return -EINVAL; 12379 12380 return kvm_alloc_memslot_metadata(kvm, new); 12381 } 12382 12383 if (change == KVM_MR_FLAGS_ONLY) 12384 memcpy(&new->arch, &old->arch, sizeof(old->arch)); 12385 else if (WARN_ON_ONCE(change != KVM_MR_DELETE)) 12386 return -EIO; 12387 12388 return 0; 12389 } 12390 12391 12392 static void kvm_mmu_update_cpu_dirty_logging(struct kvm *kvm, bool enable) 12393 { 12394 struct kvm_arch *ka = &kvm->arch; 12395 12396 if (!kvm_x86_ops.cpu_dirty_log_size) 12397 return; 12398 12399 if ((enable && ++ka->cpu_dirty_logging_count == 1) || 12400 (!enable && --ka->cpu_dirty_logging_count == 0)) 12401 kvm_make_all_cpus_request(kvm, KVM_REQ_UPDATE_CPU_DIRTY_LOGGING); 12402 12403 WARN_ON_ONCE(ka->cpu_dirty_logging_count < 0); 12404 } 12405 12406 static void kvm_mmu_slot_apply_flags(struct kvm *kvm, 12407 struct kvm_memory_slot *old, 12408 const struct kvm_memory_slot *new, 12409 enum kvm_mr_change change) 12410 { 12411 u32 old_flags = old ? old->flags : 0; 12412 u32 new_flags = new ? new->flags : 0; 12413 bool log_dirty_pages = new_flags & KVM_MEM_LOG_DIRTY_PAGES; 12414 12415 /* 12416 * Update CPU dirty logging if dirty logging is being toggled. This 12417 * applies to all operations. 12418 */ 12419 if ((old_flags ^ new_flags) & KVM_MEM_LOG_DIRTY_PAGES) 12420 kvm_mmu_update_cpu_dirty_logging(kvm, log_dirty_pages); 12421 12422 /* 12423 * Nothing more to do for RO slots (which can't be dirtied and can't be 12424 * made writable) or CREATE/MOVE/DELETE of a slot. 12425 * 12426 * For a memslot with dirty logging disabled: 12427 * CREATE: No dirty mappings will already exist. 12428 * MOVE/DELETE: The old mappings will already have been cleaned up by 12429 * kvm_arch_flush_shadow_memslot() 12430 * 12431 * For a memslot with dirty logging enabled: 12432 * CREATE: No shadow pages exist, thus nothing to write-protect 12433 * and no dirty bits to clear. 12434 * MOVE/DELETE: The old mappings will already have been cleaned up by 12435 * kvm_arch_flush_shadow_memslot(). 12436 */ 12437 if ((change != KVM_MR_FLAGS_ONLY) || (new_flags & KVM_MEM_READONLY)) 12438 return; 12439 12440 /* 12441 * READONLY and non-flags changes were filtered out above, and the only 12442 * other flag is LOG_DIRTY_PAGES, i.e. something is wrong if dirty 12443 * logging isn't being toggled on or off. 12444 */ 12445 if (WARN_ON_ONCE(!((old_flags ^ new_flags) & KVM_MEM_LOG_DIRTY_PAGES))) 12446 return; 12447 12448 if (!log_dirty_pages) { 12449 /* 12450 * Dirty logging tracks sptes in 4k granularity, meaning that 12451 * large sptes have to be split. If live migration succeeds, 12452 * the guest in the source machine will be destroyed and large 12453 * sptes will be created in the destination. However, if the 12454 * guest continues to run in the source machine (for example if 12455 * live migration fails), small sptes will remain around and 12456 * cause bad performance. 12457 * 12458 * Scan sptes if dirty logging has been stopped, dropping those 12459 * which can be collapsed into a single large-page spte. Later 12460 * page faults will create the large-page sptes. 12461 */ 12462 kvm_mmu_zap_collapsible_sptes(kvm, new); 12463 } else { 12464 /* 12465 * Initially-all-set does not require write protecting any page, 12466 * because they're all assumed to be dirty. 12467 */ 12468 if (kvm_dirty_log_manual_protect_and_init_set(kvm)) 12469 return; 12470 12471 if (READ_ONCE(eager_page_split)) 12472 kvm_mmu_slot_try_split_huge_pages(kvm, new, PG_LEVEL_4K); 12473 12474 if (kvm_x86_ops.cpu_dirty_log_size) { 12475 kvm_mmu_slot_leaf_clear_dirty(kvm, new); 12476 kvm_mmu_slot_remove_write_access(kvm, new, PG_LEVEL_2M); 12477 } else { 12478 kvm_mmu_slot_remove_write_access(kvm, new, PG_LEVEL_4K); 12479 } 12480 12481 /* 12482 * Unconditionally flush the TLBs after enabling dirty logging. 12483 * A flush is almost always going to be necessary (see below), 12484 * and unconditionally flushing allows the helpers to omit 12485 * the subtly complex checks when removing write access. 12486 * 12487 * Do the flush outside of mmu_lock to reduce the amount of 12488 * time mmu_lock is held. Flushing after dropping mmu_lock is 12489 * safe as KVM only needs to guarantee the slot is fully 12490 * write-protected before returning to userspace, i.e. before 12491 * userspace can consume the dirty status. 12492 * 12493 * Flushing outside of mmu_lock requires KVM to be careful when 12494 * making decisions based on writable status of an SPTE, e.g. a 12495 * !writable SPTE doesn't guarantee a CPU can't perform writes. 12496 * 12497 * Specifically, KVM also write-protects guest page tables to 12498 * monitor changes when using shadow paging, and must guarantee 12499 * no CPUs can write to those page before mmu_lock is dropped. 12500 * Because CPUs may have stale TLB entries at this point, a 12501 * !writable SPTE doesn't guarantee CPUs can't perform writes. 12502 * 12503 * KVM also allows making SPTES writable outside of mmu_lock, 12504 * e.g. to allow dirty logging without taking mmu_lock. 12505 * 12506 * To handle these scenarios, KVM uses a separate software-only 12507 * bit (MMU-writable) to track if a SPTE is !writable due to 12508 * a guest page table being write-protected (KVM clears the 12509 * MMU-writable flag when write-protecting for shadow paging). 12510 * 12511 * The use of MMU-writable is also the primary motivation for 12512 * the unconditional flush. Because KVM must guarantee that a 12513 * CPU doesn't contain stale, writable TLB entries for a 12514 * !MMU-writable SPTE, KVM must flush if it encounters any 12515 * MMU-writable SPTE regardless of whether the actual hardware 12516 * writable bit was set. I.e. KVM is almost guaranteed to need 12517 * to flush, while unconditionally flushing allows the "remove 12518 * write access" helpers to ignore MMU-writable entirely. 12519 * 12520 * See is_writable_pte() for more details (the case involving 12521 * access-tracked SPTEs is particularly relevant). 12522 */ 12523 kvm_arch_flush_remote_tlbs_memslot(kvm, new); 12524 } 12525 } 12526 12527 void kvm_arch_commit_memory_region(struct kvm *kvm, 12528 struct kvm_memory_slot *old, 12529 const struct kvm_memory_slot *new, 12530 enum kvm_mr_change change) 12531 { 12532 if (!kvm->arch.n_requested_mmu_pages && 12533 (change == KVM_MR_CREATE || change == KVM_MR_DELETE)) { 12534 unsigned long nr_mmu_pages; 12535 12536 nr_mmu_pages = kvm->nr_memslot_pages / KVM_MEMSLOT_PAGES_TO_MMU_PAGES_RATIO; 12537 nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES); 12538 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); 12539 } 12540 12541 kvm_mmu_slot_apply_flags(kvm, old, new, change); 12542 12543 /* Free the arrays associated with the old memslot. */ 12544 if (change == KVM_MR_MOVE) 12545 kvm_arch_free_memslot(kvm, old); 12546 } 12547 12548 void kvm_arch_flush_shadow_all(struct kvm *kvm) 12549 { 12550 kvm_mmu_zap_all(kvm); 12551 } 12552 12553 void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 12554 struct kvm_memory_slot *slot) 12555 { 12556 kvm_page_track_flush_slot(kvm, slot); 12557 } 12558 12559 static inline bool kvm_guest_apic_has_interrupt(struct kvm_vcpu *vcpu) 12560 { 12561 return (is_guest_mode(vcpu) && 12562 static_call(kvm_x86_guest_apic_has_interrupt)(vcpu)); 12563 } 12564 12565 static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) 12566 { 12567 if (!list_empty_careful(&vcpu->async_pf.done)) 12568 return true; 12569 12570 if (kvm_apic_has_pending_init_or_sipi(vcpu) && 12571 kvm_apic_init_sipi_allowed(vcpu)) 12572 return true; 12573 12574 if (vcpu->arch.pv.pv_unhalted) 12575 return true; 12576 12577 if (kvm_is_exception_pending(vcpu)) 12578 return true; 12579 12580 if (kvm_test_request(KVM_REQ_NMI, vcpu) || 12581 (vcpu->arch.nmi_pending && 12582 static_call(kvm_x86_nmi_allowed)(vcpu, false))) 12583 return true; 12584 12585 #ifdef CONFIG_KVM_SMM 12586 if (kvm_test_request(KVM_REQ_SMI, vcpu) || 12587 (vcpu->arch.smi_pending && 12588 static_call(kvm_x86_smi_allowed)(vcpu, false))) 12589 return true; 12590 #endif 12591 12592 if (kvm_arch_interrupt_allowed(vcpu) && 12593 (kvm_cpu_has_interrupt(vcpu) || 12594 kvm_guest_apic_has_interrupt(vcpu))) 12595 return true; 12596 12597 if (kvm_hv_has_stimer_pending(vcpu)) 12598 return true; 12599 12600 if (is_guest_mode(vcpu) && 12601 kvm_x86_ops.nested_ops->has_events && 12602 kvm_x86_ops.nested_ops->has_events(vcpu)) 12603 return true; 12604 12605 if (kvm_xen_has_pending_events(vcpu)) 12606 return true; 12607 12608 return false; 12609 } 12610 12611 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) 12612 { 12613 return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu); 12614 } 12615 12616 bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu) 12617 { 12618 if (kvm_vcpu_apicv_active(vcpu) && 12619 static_call(kvm_x86_dy_apicv_has_pending_interrupt)(vcpu)) 12620 return true; 12621 12622 return false; 12623 } 12624 12625 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) 12626 { 12627 if (READ_ONCE(vcpu->arch.pv.pv_unhalted)) 12628 return true; 12629 12630 if (kvm_test_request(KVM_REQ_NMI, vcpu) || 12631 kvm_test_request(KVM_REQ_SMI, vcpu) || 12632 kvm_test_request(KVM_REQ_EVENT, vcpu)) 12633 return true; 12634 12635 return kvm_arch_dy_has_pending_interrupt(vcpu); 12636 } 12637 12638 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) 12639 { 12640 if (vcpu->arch.guest_state_protected) 12641 return true; 12642 12643 return vcpu->arch.preempted_in_kernel; 12644 } 12645 12646 unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu) 12647 { 12648 return kvm_rip_read(vcpu); 12649 } 12650 12651 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) 12652 { 12653 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; 12654 } 12655 12656 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu) 12657 { 12658 return static_call(kvm_x86_interrupt_allowed)(vcpu, false); 12659 } 12660 12661 unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu) 12662 { 12663 /* Can't read the RIP when guest state is protected, just return 0 */ 12664 if (vcpu->arch.guest_state_protected) 12665 return 0; 12666 12667 if (is_64_bit_mode(vcpu)) 12668 return kvm_rip_read(vcpu); 12669 return (u32)(get_segment_base(vcpu, VCPU_SREG_CS) + 12670 kvm_rip_read(vcpu)); 12671 } 12672 EXPORT_SYMBOL_GPL(kvm_get_linear_rip); 12673 12674 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip) 12675 { 12676 return kvm_get_linear_rip(vcpu) == linear_rip; 12677 } 12678 EXPORT_SYMBOL_GPL(kvm_is_linear_rip); 12679 12680 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu) 12681 { 12682 unsigned long rflags; 12683 12684 rflags = static_call(kvm_x86_get_rflags)(vcpu); 12685 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) 12686 rflags &= ~X86_EFLAGS_TF; 12687 return rflags; 12688 } 12689 EXPORT_SYMBOL_GPL(kvm_get_rflags); 12690 12691 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) 12692 { 12693 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP && 12694 kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) 12695 rflags |= X86_EFLAGS_TF; 12696 static_call(kvm_x86_set_rflags)(vcpu, rflags); 12697 } 12698 12699 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) 12700 { 12701 __kvm_set_rflags(vcpu, rflags); 12702 kvm_make_request(KVM_REQ_EVENT, vcpu); 12703 } 12704 EXPORT_SYMBOL_GPL(kvm_set_rflags); 12705 12706 static inline u32 kvm_async_pf_hash_fn(gfn_t gfn) 12707 { 12708 BUILD_BUG_ON(!is_power_of_2(ASYNC_PF_PER_VCPU)); 12709 12710 return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU)); 12711 } 12712 12713 static inline u32 kvm_async_pf_next_probe(u32 key) 12714 { 12715 return (key + 1) & (ASYNC_PF_PER_VCPU - 1); 12716 } 12717 12718 static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 12719 { 12720 u32 key = kvm_async_pf_hash_fn(gfn); 12721 12722 while (vcpu->arch.apf.gfns[key] != ~0) 12723 key = kvm_async_pf_next_probe(key); 12724 12725 vcpu->arch.apf.gfns[key] = gfn; 12726 } 12727 12728 static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn) 12729 { 12730 int i; 12731 u32 key = kvm_async_pf_hash_fn(gfn); 12732 12733 for (i = 0; i < ASYNC_PF_PER_VCPU && 12734 (vcpu->arch.apf.gfns[key] != gfn && 12735 vcpu->arch.apf.gfns[key] != ~0); i++) 12736 key = kvm_async_pf_next_probe(key); 12737 12738 return key; 12739 } 12740 12741 bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 12742 { 12743 return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn; 12744 } 12745 12746 static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 12747 { 12748 u32 i, j, k; 12749 12750 i = j = kvm_async_pf_gfn_slot(vcpu, gfn); 12751 12752 if (WARN_ON_ONCE(vcpu->arch.apf.gfns[i] != gfn)) 12753 return; 12754 12755 while (true) { 12756 vcpu->arch.apf.gfns[i] = ~0; 12757 do { 12758 j = kvm_async_pf_next_probe(j); 12759 if (vcpu->arch.apf.gfns[j] == ~0) 12760 return; 12761 k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]); 12762 /* 12763 * k lies cyclically in ]i,j] 12764 * | i.k.j | 12765 * |....j i.k.| or |.k..j i...| 12766 */ 12767 } while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j)); 12768 vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j]; 12769 i = j; 12770 } 12771 } 12772 12773 static inline int apf_put_user_notpresent(struct kvm_vcpu *vcpu) 12774 { 12775 u32 reason = KVM_PV_REASON_PAGE_NOT_PRESENT; 12776 12777 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &reason, 12778 sizeof(reason)); 12779 } 12780 12781 static inline int apf_put_user_ready(struct kvm_vcpu *vcpu, u32 token) 12782 { 12783 unsigned int offset = offsetof(struct kvm_vcpu_pv_apf_data, token); 12784 12785 return kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data, 12786 &token, offset, sizeof(token)); 12787 } 12788 12789 static inline bool apf_pageready_slot_free(struct kvm_vcpu *vcpu) 12790 { 12791 unsigned int offset = offsetof(struct kvm_vcpu_pv_apf_data, token); 12792 u32 val; 12793 12794 if (kvm_read_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data, 12795 &val, offset, sizeof(val))) 12796 return false; 12797 12798 return !val; 12799 } 12800 12801 static bool kvm_can_deliver_async_pf(struct kvm_vcpu *vcpu) 12802 { 12803 12804 if (!kvm_pv_async_pf_enabled(vcpu)) 12805 return false; 12806 12807 if (vcpu->arch.apf.send_user_only && 12808 static_call(kvm_x86_get_cpl)(vcpu) == 0) 12809 return false; 12810 12811 if (is_guest_mode(vcpu)) { 12812 /* 12813 * L1 needs to opt into the special #PF vmexits that are 12814 * used to deliver async page faults. 12815 */ 12816 return vcpu->arch.apf.delivery_as_pf_vmexit; 12817 } else { 12818 /* 12819 * Play it safe in case the guest temporarily disables paging. 12820 * The real mode IDT in particular is unlikely to have a #PF 12821 * exception setup. 12822 */ 12823 return is_paging(vcpu); 12824 } 12825 } 12826 12827 bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu) 12828 { 12829 if (unlikely(!lapic_in_kernel(vcpu) || 12830 kvm_event_needs_reinjection(vcpu) || 12831 kvm_is_exception_pending(vcpu))) 12832 return false; 12833 12834 if (kvm_hlt_in_guest(vcpu->kvm) && !kvm_can_deliver_async_pf(vcpu)) 12835 return false; 12836 12837 /* 12838 * If interrupts are off we cannot even use an artificial 12839 * halt state. 12840 */ 12841 return kvm_arch_interrupt_allowed(vcpu); 12842 } 12843 12844 bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, 12845 struct kvm_async_pf *work) 12846 { 12847 struct x86_exception fault; 12848 12849 trace_kvm_async_pf_not_present(work->arch.token, work->cr2_or_gpa); 12850 kvm_add_async_pf_gfn(vcpu, work->arch.gfn); 12851 12852 if (kvm_can_deliver_async_pf(vcpu) && 12853 !apf_put_user_notpresent(vcpu)) { 12854 fault.vector = PF_VECTOR; 12855 fault.error_code_valid = true; 12856 fault.error_code = 0; 12857 fault.nested_page_fault = false; 12858 fault.address = work->arch.token; 12859 fault.async_page_fault = true; 12860 kvm_inject_page_fault(vcpu, &fault); 12861 return true; 12862 } else { 12863 /* 12864 * It is not possible to deliver a paravirtualized asynchronous 12865 * page fault, but putting the guest in an artificial halt state 12866 * can be beneficial nevertheless: if an interrupt arrives, we 12867 * can deliver it timely and perhaps the guest will schedule 12868 * another process. When the instruction that triggered a page 12869 * fault is retried, hopefully the page will be ready in the host. 12870 */ 12871 kvm_make_request(KVM_REQ_APF_HALT, vcpu); 12872 return false; 12873 } 12874 } 12875 12876 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, 12877 struct kvm_async_pf *work) 12878 { 12879 struct kvm_lapic_irq irq = { 12880 .delivery_mode = APIC_DM_FIXED, 12881 .vector = vcpu->arch.apf.vec 12882 }; 12883 12884 if (work->wakeup_all) 12885 work->arch.token = ~0; /* broadcast wakeup */ 12886 else 12887 kvm_del_async_pf_gfn(vcpu, work->arch.gfn); 12888 trace_kvm_async_pf_ready(work->arch.token, work->cr2_or_gpa); 12889 12890 if ((work->wakeup_all || work->notpresent_injected) && 12891 kvm_pv_async_pf_enabled(vcpu) && 12892 !apf_put_user_ready(vcpu, work->arch.token)) { 12893 vcpu->arch.apf.pageready_pending = true; 12894 kvm_apic_set_irq(vcpu, &irq, NULL); 12895 } 12896 12897 vcpu->arch.apf.halted = false; 12898 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 12899 } 12900 12901 void kvm_arch_async_page_present_queued(struct kvm_vcpu *vcpu) 12902 { 12903 kvm_make_request(KVM_REQ_APF_READY, vcpu); 12904 if (!vcpu->arch.apf.pageready_pending) 12905 kvm_vcpu_kick(vcpu); 12906 } 12907 12908 bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu) 12909 { 12910 if (!kvm_pv_async_pf_enabled(vcpu)) 12911 return true; 12912 else 12913 return kvm_lapic_enabled(vcpu) && apf_pageready_slot_free(vcpu); 12914 } 12915 12916 void kvm_arch_start_assignment(struct kvm *kvm) 12917 { 12918 if (atomic_inc_return(&kvm->arch.assigned_device_count) == 1) 12919 static_call_cond(kvm_x86_pi_start_assignment)(kvm); 12920 } 12921 EXPORT_SYMBOL_GPL(kvm_arch_start_assignment); 12922 12923 void kvm_arch_end_assignment(struct kvm *kvm) 12924 { 12925 atomic_dec(&kvm->arch.assigned_device_count); 12926 } 12927 EXPORT_SYMBOL_GPL(kvm_arch_end_assignment); 12928 12929 bool noinstr kvm_arch_has_assigned_device(struct kvm *kvm) 12930 { 12931 return arch_atomic_read(&kvm->arch.assigned_device_count); 12932 } 12933 EXPORT_SYMBOL_GPL(kvm_arch_has_assigned_device); 12934 12935 void kvm_arch_register_noncoherent_dma(struct kvm *kvm) 12936 { 12937 atomic_inc(&kvm->arch.noncoherent_dma_count); 12938 } 12939 EXPORT_SYMBOL_GPL(kvm_arch_register_noncoherent_dma); 12940 12941 void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm) 12942 { 12943 atomic_dec(&kvm->arch.noncoherent_dma_count); 12944 } 12945 EXPORT_SYMBOL_GPL(kvm_arch_unregister_noncoherent_dma); 12946 12947 bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) 12948 { 12949 return atomic_read(&kvm->arch.noncoherent_dma_count); 12950 } 12951 EXPORT_SYMBOL_GPL(kvm_arch_has_noncoherent_dma); 12952 12953 bool kvm_arch_has_irq_bypass(void) 12954 { 12955 return true; 12956 } 12957 12958 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons, 12959 struct irq_bypass_producer *prod) 12960 { 12961 struct kvm_kernel_irqfd *irqfd = 12962 container_of(cons, struct kvm_kernel_irqfd, consumer); 12963 int ret; 12964 12965 irqfd->producer = prod; 12966 kvm_arch_start_assignment(irqfd->kvm); 12967 ret = static_call(kvm_x86_pi_update_irte)(irqfd->kvm, 12968 prod->irq, irqfd->gsi, 1); 12969 12970 if (ret) 12971 kvm_arch_end_assignment(irqfd->kvm); 12972 12973 return ret; 12974 } 12975 12976 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, 12977 struct irq_bypass_producer *prod) 12978 { 12979 int ret; 12980 struct kvm_kernel_irqfd *irqfd = 12981 container_of(cons, struct kvm_kernel_irqfd, consumer); 12982 12983 WARN_ON(irqfd->producer != prod); 12984 irqfd->producer = NULL; 12985 12986 /* 12987 * When producer of consumer is unregistered, we change back to 12988 * remapped mode, so we can re-use the current implementation 12989 * when the irq is masked/disabled or the consumer side (KVM 12990 * int this case doesn't want to receive the interrupts. 12991 */ 12992 ret = static_call(kvm_x86_pi_update_irte)(irqfd->kvm, prod->irq, irqfd->gsi, 0); 12993 if (ret) 12994 printk(KERN_INFO "irq bypass consumer (token %p) unregistration" 12995 " fails: %d\n", irqfd->consumer.token, ret); 12996 12997 kvm_arch_end_assignment(irqfd->kvm); 12998 } 12999 13000 int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, 13001 uint32_t guest_irq, bool set) 13002 { 13003 return static_call(kvm_x86_pi_update_irte)(kvm, host_irq, guest_irq, set); 13004 } 13005 13006 bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *old, 13007 struct kvm_kernel_irq_routing_entry *new) 13008 { 13009 if (new->type != KVM_IRQ_ROUTING_MSI) 13010 return true; 13011 13012 return !!memcmp(&old->msi, &new->msi, sizeof(new->msi)); 13013 } 13014 13015 bool kvm_vector_hashing_enabled(void) 13016 { 13017 return vector_hashing; 13018 } 13019 13020 bool kvm_arch_no_poll(struct kvm_vcpu *vcpu) 13021 { 13022 return (vcpu->arch.msr_kvm_poll_control & 1) == 0; 13023 } 13024 EXPORT_SYMBOL_GPL(kvm_arch_no_poll); 13025 13026 13027 int kvm_spec_ctrl_test_value(u64 value) 13028 { 13029 /* 13030 * test that setting IA32_SPEC_CTRL to given value 13031 * is allowed by the host processor 13032 */ 13033 13034 u64 saved_value; 13035 unsigned long flags; 13036 int ret = 0; 13037 13038 local_irq_save(flags); 13039 13040 if (rdmsrl_safe(MSR_IA32_SPEC_CTRL, &saved_value)) 13041 ret = 1; 13042 else if (wrmsrl_safe(MSR_IA32_SPEC_CTRL, value)) 13043 ret = 1; 13044 else 13045 wrmsrl(MSR_IA32_SPEC_CTRL, saved_value); 13046 13047 local_irq_restore(flags); 13048 13049 return ret; 13050 } 13051 EXPORT_SYMBOL_GPL(kvm_spec_ctrl_test_value); 13052 13053 void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code) 13054 { 13055 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 13056 struct x86_exception fault; 13057 u64 access = error_code & 13058 (PFERR_WRITE_MASK | PFERR_FETCH_MASK | PFERR_USER_MASK); 13059 13060 if (!(error_code & PFERR_PRESENT_MASK) || 13061 mmu->gva_to_gpa(vcpu, mmu, gva, access, &fault) != INVALID_GPA) { 13062 /* 13063 * If vcpu->arch.walk_mmu->gva_to_gpa succeeded, the page 13064 * tables probably do not match the TLB. Just proceed 13065 * with the error code that the processor gave. 13066 */ 13067 fault.vector = PF_VECTOR; 13068 fault.error_code_valid = true; 13069 fault.error_code = error_code; 13070 fault.nested_page_fault = false; 13071 fault.address = gva; 13072 fault.async_page_fault = false; 13073 } 13074 vcpu->arch.walk_mmu->inject_page_fault(vcpu, &fault); 13075 } 13076 EXPORT_SYMBOL_GPL(kvm_fixup_and_inject_pf_error); 13077 13078 /* 13079 * Handles kvm_read/write_guest_virt*() result and either injects #PF or returns 13080 * KVM_EXIT_INTERNAL_ERROR for cases not currently handled by KVM. Return value 13081 * indicates whether exit to userspace is needed. 13082 */ 13083 int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r, 13084 struct x86_exception *e) 13085 { 13086 if (r == X86EMUL_PROPAGATE_FAULT) { 13087 kvm_inject_emulated_page_fault(vcpu, e); 13088 return 1; 13089 } 13090 13091 /* 13092 * In case kvm_read/write_guest_virt*() failed with X86EMUL_IO_NEEDED 13093 * while handling a VMX instruction KVM could've handled the request 13094 * correctly by exiting to userspace and performing I/O but there 13095 * doesn't seem to be a real use-case behind such requests, just return 13096 * KVM_EXIT_INTERNAL_ERROR for now. 13097 */ 13098 kvm_prepare_emulation_failure_exit(vcpu); 13099 13100 return 0; 13101 } 13102 EXPORT_SYMBOL_GPL(kvm_handle_memory_failure); 13103 13104 int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva) 13105 { 13106 bool pcid_enabled; 13107 struct x86_exception e; 13108 struct { 13109 u64 pcid; 13110 u64 gla; 13111 } operand; 13112 int r; 13113 13114 r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e); 13115 if (r != X86EMUL_CONTINUE) 13116 return kvm_handle_memory_failure(vcpu, r, &e); 13117 13118 if (operand.pcid >> 12 != 0) { 13119 kvm_inject_gp(vcpu, 0); 13120 return 1; 13121 } 13122 13123 pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE); 13124 13125 switch (type) { 13126 case INVPCID_TYPE_INDIV_ADDR: 13127 if ((!pcid_enabled && (operand.pcid != 0)) || 13128 is_noncanonical_address(operand.gla, vcpu)) { 13129 kvm_inject_gp(vcpu, 0); 13130 return 1; 13131 } 13132 kvm_mmu_invpcid_gva(vcpu, operand.gla, operand.pcid); 13133 return kvm_skip_emulated_instruction(vcpu); 13134 13135 case INVPCID_TYPE_SINGLE_CTXT: 13136 if (!pcid_enabled && (operand.pcid != 0)) { 13137 kvm_inject_gp(vcpu, 0); 13138 return 1; 13139 } 13140 13141 kvm_invalidate_pcid(vcpu, operand.pcid); 13142 return kvm_skip_emulated_instruction(vcpu); 13143 13144 case INVPCID_TYPE_ALL_NON_GLOBAL: 13145 /* 13146 * Currently, KVM doesn't mark global entries in the shadow 13147 * page tables, so a non-global flush just degenerates to a 13148 * global flush. If needed, we could optimize this later by 13149 * keeping track of global entries in shadow page tables. 13150 */ 13151 13152 fallthrough; 13153 case INVPCID_TYPE_ALL_INCL_GLOBAL: 13154 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 13155 return kvm_skip_emulated_instruction(vcpu); 13156 13157 default: 13158 kvm_inject_gp(vcpu, 0); 13159 return 1; 13160 } 13161 } 13162 EXPORT_SYMBOL_GPL(kvm_handle_invpcid); 13163 13164 static int complete_sev_es_emulated_mmio(struct kvm_vcpu *vcpu) 13165 { 13166 struct kvm_run *run = vcpu->run; 13167 struct kvm_mmio_fragment *frag; 13168 unsigned int len; 13169 13170 BUG_ON(!vcpu->mmio_needed); 13171 13172 /* Complete previous fragment */ 13173 frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment]; 13174 len = min(8u, frag->len); 13175 if (!vcpu->mmio_is_write) 13176 memcpy(frag->data, run->mmio.data, len); 13177 13178 if (frag->len <= 8) { 13179 /* Switch to the next fragment. */ 13180 frag++; 13181 vcpu->mmio_cur_fragment++; 13182 } else { 13183 /* Go forward to the next mmio piece. */ 13184 frag->data += len; 13185 frag->gpa += len; 13186 frag->len -= len; 13187 } 13188 13189 if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) { 13190 vcpu->mmio_needed = 0; 13191 13192 // VMG change, at this point, we're always done 13193 // RIP has already been advanced 13194 return 1; 13195 } 13196 13197 // More MMIO is needed 13198 run->mmio.phys_addr = frag->gpa; 13199 run->mmio.len = min(8u, frag->len); 13200 run->mmio.is_write = vcpu->mmio_is_write; 13201 if (run->mmio.is_write) 13202 memcpy(run->mmio.data, frag->data, min(8u, frag->len)); 13203 run->exit_reason = KVM_EXIT_MMIO; 13204 13205 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio; 13206 13207 return 0; 13208 } 13209 13210 int kvm_sev_es_mmio_write(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes, 13211 void *data) 13212 { 13213 int handled; 13214 struct kvm_mmio_fragment *frag; 13215 13216 if (!data) 13217 return -EINVAL; 13218 13219 handled = write_emultor.read_write_mmio(vcpu, gpa, bytes, data); 13220 if (handled == bytes) 13221 return 1; 13222 13223 bytes -= handled; 13224 gpa += handled; 13225 data += handled; 13226 13227 /*TODO: Check if need to increment number of frags */ 13228 frag = vcpu->mmio_fragments; 13229 vcpu->mmio_nr_fragments = 1; 13230 frag->len = bytes; 13231 frag->gpa = gpa; 13232 frag->data = data; 13233 13234 vcpu->mmio_needed = 1; 13235 vcpu->mmio_cur_fragment = 0; 13236 13237 vcpu->run->mmio.phys_addr = gpa; 13238 vcpu->run->mmio.len = min(8u, frag->len); 13239 vcpu->run->mmio.is_write = 1; 13240 memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); 13241 vcpu->run->exit_reason = KVM_EXIT_MMIO; 13242 13243 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio; 13244 13245 return 0; 13246 } 13247 EXPORT_SYMBOL_GPL(kvm_sev_es_mmio_write); 13248 13249 int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes, 13250 void *data) 13251 { 13252 int handled; 13253 struct kvm_mmio_fragment *frag; 13254 13255 if (!data) 13256 return -EINVAL; 13257 13258 handled = read_emultor.read_write_mmio(vcpu, gpa, bytes, data); 13259 if (handled == bytes) 13260 return 1; 13261 13262 bytes -= handled; 13263 gpa += handled; 13264 data += handled; 13265 13266 /*TODO: Check if need to increment number of frags */ 13267 frag = vcpu->mmio_fragments; 13268 vcpu->mmio_nr_fragments = 1; 13269 frag->len = bytes; 13270 frag->gpa = gpa; 13271 frag->data = data; 13272 13273 vcpu->mmio_needed = 1; 13274 vcpu->mmio_cur_fragment = 0; 13275 13276 vcpu->run->mmio.phys_addr = gpa; 13277 vcpu->run->mmio.len = min(8u, frag->len); 13278 vcpu->run->mmio.is_write = 0; 13279 vcpu->run->exit_reason = KVM_EXIT_MMIO; 13280 13281 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio; 13282 13283 return 0; 13284 } 13285 EXPORT_SYMBOL_GPL(kvm_sev_es_mmio_read); 13286 13287 static void advance_sev_es_emulated_pio(struct kvm_vcpu *vcpu, unsigned count, int size) 13288 { 13289 vcpu->arch.sev_pio_count -= count; 13290 vcpu->arch.sev_pio_data += count * size; 13291 } 13292 13293 static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size, 13294 unsigned int port); 13295 13296 static int complete_sev_es_emulated_outs(struct kvm_vcpu *vcpu) 13297 { 13298 int size = vcpu->arch.pio.size; 13299 int port = vcpu->arch.pio.port; 13300 13301 vcpu->arch.pio.count = 0; 13302 if (vcpu->arch.sev_pio_count) 13303 return kvm_sev_es_outs(vcpu, size, port); 13304 return 1; 13305 } 13306 13307 static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size, 13308 unsigned int port) 13309 { 13310 for (;;) { 13311 unsigned int count = 13312 min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count); 13313 int ret = emulator_pio_out(vcpu, size, port, vcpu->arch.sev_pio_data, count); 13314 13315 /* memcpy done already by emulator_pio_out. */ 13316 advance_sev_es_emulated_pio(vcpu, count, size); 13317 if (!ret) 13318 break; 13319 13320 /* Emulation done by the kernel. */ 13321 if (!vcpu->arch.sev_pio_count) 13322 return 1; 13323 } 13324 13325 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_outs; 13326 return 0; 13327 } 13328 13329 static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size, 13330 unsigned int port); 13331 13332 static int complete_sev_es_emulated_ins(struct kvm_vcpu *vcpu) 13333 { 13334 unsigned count = vcpu->arch.pio.count; 13335 int size = vcpu->arch.pio.size; 13336 int port = vcpu->arch.pio.port; 13337 13338 complete_emulator_pio_in(vcpu, vcpu->arch.sev_pio_data); 13339 advance_sev_es_emulated_pio(vcpu, count, size); 13340 if (vcpu->arch.sev_pio_count) 13341 return kvm_sev_es_ins(vcpu, size, port); 13342 return 1; 13343 } 13344 13345 static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size, 13346 unsigned int port) 13347 { 13348 for (;;) { 13349 unsigned int count = 13350 min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count); 13351 if (!emulator_pio_in(vcpu, size, port, vcpu->arch.sev_pio_data, count)) 13352 break; 13353 13354 /* Emulation done by the kernel. */ 13355 advance_sev_es_emulated_pio(vcpu, count, size); 13356 if (!vcpu->arch.sev_pio_count) 13357 return 1; 13358 } 13359 13360 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_ins; 13361 return 0; 13362 } 13363 13364 int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size, 13365 unsigned int port, void *data, unsigned int count, 13366 int in) 13367 { 13368 vcpu->arch.sev_pio_data = data; 13369 vcpu->arch.sev_pio_count = count; 13370 return in ? kvm_sev_es_ins(vcpu, size, port) 13371 : kvm_sev_es_outs(vcpu, size, port); 13372 } 13373 EXPORT_SYMBOL_GPL(kvm_sev_es_string_io); 13374 13375 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_entry); 13376 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit); 13377 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio); 13378 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq); 13379 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault); 13380 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr); 13381 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr); 13382 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmenter); 13383 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit); 13384 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject); 13385 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit); 13386 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmenter_failed); 13387 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga); 13388 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit); 13389 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts); 13390 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_write_tsc_offset); 13391 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ple_window_update); 13392 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pml_full); 13393 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pi_irte_update); 13394 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_unaccelerated_access); 13395 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_incomplete_ipi); 13396 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_ga_log); 13397 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_kick_vcpu_slowpath); 13398 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_doorbell); 13399 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_apicv_accept_irq); 13400 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_enter); 13401 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_exit); 13402 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_enter); 13403 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_exit); 13404 13405 static int __init kvm_x86_init(void) 13406 { 13407 kvm_mmu_x86_module_init(); 13408 return 0; 13409 } 13410 module_init(kvm_x86_init); 13411 13412 static void __exit kvm_x86_exit(void) 13413 { 13414 /* 13415 * If module_init() is implemented, module_exit() must also be 13416 * implemented to allow module unload. 13417 */ 13418 } 13419 module_exit(kvm_x86_exit); 13420