1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Kernel-based Virtual Machine driver for Linux 4 * 5 * derived from drivers/kvm/kvm_main.c 6 * 7 * Copyright (C) 2006 Qumranet, Inc. 8 * Copyright (C) 2008 Qumranet, Inc. 9 * Copyright IBM Corporation, 2008 10 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 11 * 12 * Authors: 13 * Avi Kivity <avi@qumranet.com> 14 * Yaniv Kamay <yaniv@qumranet.com> 15 * Amit Shah <amit.shah@qumranet.com> 16 * Ben-Ami Yassour <benami@il.ibm.com> 17 */ 18 19 #include <linux/kvm_host.h> 20 #include "irq.h" 21 #include "ioapic.h" 22 #include "mmu.h" 23 #include "i8254.h" 24 #include "tss.h" 25 #include "kvm_cache_regs.h" 26 #include "kvm_emulate.h" 27 #include "x86.h" 28 #include "cpuid.h" 29 #include "pmu.h" 30 #include "hyperv.h" 31 #include "lapic.h" 32 #include "xen.h" 33 #include "smm.h" 34 35 #include <linux/clocksource.h> 36 #include <linux/interrupt.h> 37 #include <linux/kvm.h> 38 #include <linux/fs.h> 39 #include <linux/vmalloc.h> 40 #include <linux/export.h> 41 #include <linux/moduleparam.h> 42 #include <linux/mman.h> 43 #include <linux/highmem.h> 44 #include <linux/iommu.h> 45 #include <linux/cpufreq.h> 46 #include <linux/user-return-notifier.h> 47 #include <linux/srcu.h> 48 #include <linux/slab.h> 49 #include <linux/perf_event.h> 50 #include <linux/uaccess.h> 51 #include <linux/hash.h> 52 #include <linux/pci.h> 53 #include <linux/timekeeper_internal.h> 54 #include <linux/pvclock_gtod.h> 55 #include <linux/kvm_irqfd.h> 56 #include <linux/irqbypass.h> 57 #include <linux/sched/stat.h> 58 #include <linux/sched/isolation.h> 59 #include <linux/mem_encrypt.h> 60 #include <linux/entry-kvm.h> 61 #include <linux/suspend.h> 62 63 #include <trace/events/kvm.h> 64 65 #include <asm/debugreg.h> 66 #include <asm/msr.h> 67 #include <asm/desc.h> 68 #include <asm/mce.h> 69 #include <asm/pkru.h> 70 #include <linux/kernel_stat.h> 71 #include <asm/fpu/api.h> 72 #include <asm/fpu/xcr.h> 73 #include <asm/fpu/xstate.h> 74 #include <asm/pvclock.h> 75 #include <asm/div64.h> 76 #include <asm/irq_remapping.h> 77 #include <asm/mshyperv.h> 78 #include <asm/hypervisor.h> 79 #include <asm/tlbflush.h> 80 #include <asm/intel_pt.h> 81 #include <asm/emulate_prefix.h> 82 #include <asm/sgx.h> 83 #include <clocksource/hyperv_timer.h> 84 85 #define CREATE_TRACE_POINTS 86 #include "trace.h" 87 88 #define MAX_IO_MSRS 256 89 #define KVM_MAX_MCE_BANKS 32 90 91 struct kvm_caps kvm_caps __read_mostly = { 92 .supported_mce_cap = MCG_CTL_P | MCG_SER_P, 93 }; 94 EXPORT_SYMBOL_GPL(kvm_caps); 95 96 #define ERR_PTR_USR(e) ((void __user *)ERR_PTR(e)) 97 98 #define emul_to_vcpu(ctxt) \ 99 ((struct kvm_vcpu *)(ctxt)->vcpu) 100 101 /* EFER defaults: 102 * - enable syscall per default because its emulated by KVM 103 * - enable LME and LMA per default on 64 bit KVM 104 */ 105 #ifdef CONFIG_X86_64 106 static 107 u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA)); 108 #else 109 static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE); 110 #endif 111 112 static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS; 113 114 #define KVM_EXIT_HYPERCALL_VALID_MASK (1 << KVM_HC_MAP_GPA_RANGE) 115 116 #define KVM_CAP_PMU_VALID_MASK KVM_PMU_CAP_DISABLE 117 118 #define KVM_X2APIC_API_VALID_FLAGS (KVM_X2APIC_API_USE_32BIT_IDS | \ 119 KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK) 120 121 static void update_cr8_intercept(struct kvm_vcpu *vcpu); 122 static void process_nmi(struct kvm_vcpu *vcpu); 123 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); 124 static void store_regs(struct kvm_vcpu *vcpu); 125 static int sync_regs(struct kvm_vcpu *vcpu); 126 static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu); 127 128 static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2); 129 static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2); 130 131 struct kvm_x86_ops kvm_x86_ops __read_mostly; 132 133 #define KVM_X86_OP(func) \ 134 DEFINE_STATIC_CALL_NULL(kvm_x86_##func, \ 135 *(((struct kvm_x86_ops *)0)->func)); 136 #define KVM_X86_OP_OPTIONAL KVM_X86_OP 137 #define KVM_X86_OP_OPTIONAL_RET0 KVM_X86_OP 138 #include <asm/kvm-x86-ops.h> 139 EXPORT_STATIC_CALL_GPL(kvm_x86_get_cs_db_l_bits); 140 EXPORT_STATIC_CALL_GPL(kvm_x86_cache_reg); 141 142 static bool __read_mostly ignore_msrs = 0; 143 module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR); 144 145 bool __read_mostly report_ignored_msrs = true; 146 module_param(report_ignored_msrs, bool, S_IRUGO | S_IWUSR); 147 EXPORT_SYMBOL_GPL(report_ignored_msrs); 148 149 unsigned int min_timer_period_us = 200; 150 module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR); 151 152 static bool __read_mostly kvmclock_periodic_sync = true; 153 module_param(kvmclock_periodic_sync, bool, S_IRUGO); 154 155 /* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */ 156 static u32 __read_mostly tsc_tolerance_ppm = 250; 157 module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR); 158 159 /* 160 * lapic timer advance (tscdeadline mode only) in nanoseconds. '-1' enables 161 * adaptive tuning starting from default advancement of 1000ns. '0' disables 162 * advancement entirely. Any other value is used as-is and disables adaptive 163 * tuning, i.e. allows privileged userspace to set an exact advancement time. 164 */ 165 static int __read_mostly lapic_timer_advance_ns = -1; 166 module_param(lapic_timer_advance_ns, int, S_IRUGO | S_IWUSR); 167 168 static bool __read_mostly vector_hashing = true; 169 module_param(vector_hashing, bool, S_IRUGO); 170 171 bool __read_mostly enable_vmware_backdoor = false; 172 module_param(enable_vmware_backdoor, bool, S_IRUGO); 173 EXPORT_SYMBOL_GPL(enable_vmware_backdoor); 174 175 /* 176 * Flags to manipulate forced emulation behavior (any non-zero value will 177 * enable forced emulation). 178 */ 179 #define KVM_FEP_CLEAR_RFLAGS_RF BIT(1) 180 static int __read_mostly force_emulation_prefix; 181 module_param(force_emulation_prefix, int, 0644); 182 183 int __read_mostly pi_inject_timer = -1; 184 module_param(pi_inject_timer, bint, S_IRUGO | S_IWUSR); 185 186 /* Enable/disable PMU virtualization */ 187 bool __read_mostly enable_pmu = true; 188 EXPORT_SYMBOL_GPL(enable_pmu); 189 module_param(enable_pmu, bool, 0444); 190 191 bool __read_mostly eager_page_split = true; 192 module_param(eager_page_split, bool, 0644); 193 194 /* 195 * Restoring the host value for MSRs that are only consumed when running in 196 * usermode, e.g. SYSCALL MSRs and TSC_AUX, can be deferred until the CPU 197 * returns to userspace, i.e. the kernel can run with the guest's value. 198 */ 199 #define KVM_MAX_NR_USER_RETURN_MSRS 16 200 201 struct kvm_user_return_msrs { 202 struct user_return_notifier urn; 203 bool registered; 204 struct kvm_user_return_msr_values { 205 u64 host; 206 u64 curr; 207 } values[KVM_MAX_NR_USER_RETURN_MSRS]; 208 }; 209 210 u32 __read_mostly kvm_nr_uret_msrs; 211 EXPORT_SYMBOL_GPL(kvm_nr_uret_msrs); 212 static u32 __read_mostly kvm_uret_msrs_list[KVM_MAX_NR_USER_RETURN_MSRS]; 213 static struct kvm_user_return_msrs __percpu *user_return_msrs; 214 215 #define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \ 216 | XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \ 217 | XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512 \ 218 | XFEATURE_MASK_PKRU | XFEATURE_MASK_XTILE) 219 220 u64 __read_mostly host_efer; 221 EXPORT_SYMBOL_GPL(host_efer); 222 223 bool __read_mostly allow_smaller_maxphyaddr = 0; 224 EXPORT_SYMBOL_GPL(allow_smaller_maxphyaddr); 225 226 bool __read_mostly enable_apicv = true; 227 EXPORT_SYMBOL_GPL(enable_apicv); 228 229 u64 __read_mostly host_xss; 230 EXPORT_SYMBOL_GPL(host_xss); 231 232 const struct _kvm_stats_desc kvm_vm_stats_desc[] = { 233 KVM_GENERIC_VM_STATS(), 234 STATS_DESC_COUNTER(VM, mmu_shadow_zapped), 235 STATS_DESC_COUNTER(VM, mmu_pte_write), 236 STATS_DESC_COUNTER(VM, mmu_pde_zapped), 237 STATS_DESC_COUNTER(VM, mmu_flooded), 238 STATS_DESC_COUNTER(VM, mmu_recycled), 239 STATS_DESC_COUNTER(VM, mmu_cache_miss), 240 STATS_DESC_ICOUNTER(VM, mmu_unsync), 241 STATS_DESC_ICOUNTER(VM, pages_4k), 242 STATS_DESC_ICOUNTER(VM, pages_2m), 243 STATS_DESC_ICOUNTER(VM, pages_1g), 244 STATS_DESC_ICOUNTER(VM, nx_lpage_splits), 245 STATS_DESC_PCOUNTER(VM, max_mmu_rmap_size), 246 STATS_DESC_PCOUNTER(VM, max_mmu_page_hash_collisions) 247 }; 248 249 const struct kvm_stats_header kvm_vm_stats_header = { 250 .name_size = KVM_STATS_NAME_SIZE, 251 .num_desc = ARRAY_SIZE(kvm_vm_stats_desc), 252 .id_offset = sizeof(struct kvm_stats_header), 253 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, 254 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + 255 sizeof(kvm_vm_stats_desc), 256 }; 257 258 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { 259 KVM_GENERIC_VCPU_STATS(), 260 STATS_DESC_COUNTER(VCPU, pf_taken), 261 STATS_DESC_COUNTER(VCPU, pf_fixed), 262 STATS_DESC_COUNTER(VCPU, pf_emulate), 263 STATS_DESC_COUNTER(VCPU, pf_spurious), 264 STATS_DESC_COUNTER(VCPU, pf_fast), 265 STATS_DESC_COUNTER(VCPU, pf_mmio_spte_created), 266 STATS_DESC_COUNTER(VCPU, pf_guest), 267 STATS_DESC_COUNTER(VCPU, tlb_flush), 268 STATS_DESC_COUNTER(VCPU, invlpg), 269 STATS_DESC_COUNTER(VCPU, exits), 270 STATS_DESC_COUNTER(VCPU, io_exits), 271 STATS_DESC_COUNTER(VCPU, mmio_exits), 272 STATS_DESC_COUNTER(VCPU, signal_exits), 273 STATS_DESC_COUNTER(VCPU, irq_window_exits), 274 STATS_DESC_COUNTER(VCPU, nmi_window_exits), 275 STATS_DESC_COUNTER(VCPU, l1d_flush), 276 STATS_DESC_COUNTER(VCPU, halt_exits), 277 STATS_DESC_COUNTER(VCPU, request_irq_exits), 278 STATS_DESC_COUNTER(VCPU, irq_exits), 279 STATS_DESC_COUNTER(VCPU, host_state_reload), 280 STATS_DESC_COUNTER(VCPU, fpu_reload), 281 STATS_DESC_COUNTER(VCPU, insn_emulation), 282 STATS_DESC_COUNTER(VCPU, insn_emulation_fail), 283 STATS_DESC_COUNTER(VCPU, hypercalls), 284 STATS_DESC_COUNTER(VCPU, irq_injections), 285 STATS_DESC_COUNTER(VCPU, nmi_injections), 286 STATS_DESC_COUNTER(VCPU, req_event), 287 STATS_DESC_COUNTER(VCPU, nested_run), 288 STATS_DESC_COUNTER(VCPU, directed_yield_attempted), 289 STATS_DESC_COUNTER(VCPU, directed_yield_successful), 290 STATS_DESC_COUNTER(VCPU, preemption_reported), 291 STATS_DESC_COUNTER(VCPU, preemption_other), 292 STATS_DESC_IBOOLEAN(VCPU, guest_mode), 293 STATS_DESC_COUNTER(VCPU, notify_window_exits), 294 }; 295 296 const struct kvm_stats_header kvm_vcpu_stats_header = { 297 .name_size = KVM_STATS_NAME_SIZE, 298 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc), 299 .id_offset = sizeof(struct kvm_stats_header), 300 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, 301 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + 302 sizeof(kvm_vcpu_stats_desc), 303 }; 304 305 u64 __read_mostly host_xcr0; 306 307 static struct kmem_cache *x86_emulator_cache; 308 309 /* 310 * When called, it means the previous get/set msr reached an invalid msr. 311 * Return true if we want to ignore/silent this failed msr access. 312 */ 313 static bool kvm_msr_ignored_check(u32 msr, u64 data, bool write) 314 { 315 const char *op = write ? "wrmsr" : "rdmsr"; 316 317 if (ignore_msrs) { 318 if (report_ignored_msrs) 319 kvm_pr_unimpl("ignored %s: 0x%x data 0x%llx\n", 320 op, msr, data); 321 /* Mask the error */ 322 return true; 323 } else { 324 kvm_debug_ratelimited("unhandled %s: 0x%x data 0x%llx\n", 325 op, msr, data); 326 return false; 327 } 328 } 329 330 static struct kmem_cache *kvm_alloc_emulator_cache(void) 331 { 332 unsigned int useroffset = offsetof(struct x86_emulate_ctxt, src); 333 unsigned int size = sizeof(struct x86_emulate_ctxt); 334 335 return kmem_cache_create_usercopy("x86_emulator", size, 336 __alignof__(struct x86_emulate_ctxt), 337 SLAB_ACCOUNT, useroffset, 338 size - useroffset, NULL); 339 } 340 341 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt); 342 343 static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu) 344 { 345 int i; 346 for (i = 0; i < ASYNC_PF_PER_VCPU; i++) 347 vcpu->arch.apf.gfns[i] = ~0; 348 } 349 350 static void kvm_on_user_return(struct user_return_notifier *urn) 351 { 352 unsigned slot; 353 struct kvm_user_return_msrs *msrs 354 = container_of(urn, struct kvm_user_return_msrs, urn); 355 struct kvm_user_return_msr_values *values; 356 unsigned long flags; 357 358 /* 359 * Disabling irqs at this point since the following code could be 360 * interrupted and executed through kvm_arch_hardware_disable() 361 */ 362 local_irq_save(flags); 363 if (msrs->registered) { 364 msrs->registered = false; 365 user_return_notifier_unregister(urn); 366 } 367 local_irq_restore(flags); 368 for (slot = 0; slot < kvm_nr_uret_msrs; ++slot) { 369 values = &msrs->values[slot]; 370 if (values->host != values->curr) { 371 wrmsrl(kvm_uret_msrs_list[slot], values->host); 372 values->curr = values->host; 373 } 374 } 375 } 376 377 static int kvm_probe_user_return_msr(u32 msr) 378 { 379 u64 val; 380 int ret; 381 382 preempt_disable(); 383 ret = rdmsrl_safe(msr, &val); 384 if (ret) 385 goto out; 386 ret = wrmsrl_safe(msr, val); 387 out: 388 preempt_enable(); 389 return ret; 390 } 391 392 int kvm_add_user_return_msr(u32 msr) 393 { 394 BUG_ON(kvm_nr_uret_msrs >= KVM_MAX_NR_USER_RETURN_MSRS); 395 396 if (kvm_probe_user_return_msr(msr)) 397 return -1; 398 399 kvm_uret_msrs_list[kvm_nr_uret_msrs] = msr; 400 return kvm_nr_uret_msrs++; 401 } 402 EXPORT_SYMBOL_GPL(kvm_add_user_return_msr); 403 404 int kvm_find_user_return_msr(u32 msr) 405 { 406 int i; 407 408 for (i = 0; i < kvm_nr_uret_msrs; ++i) { 409 if (kvm_uret_msrs_list[i] == msr) 410 return i; 411 } 412 return -1; 413 } 414 EXPORT_SYMBOL_GPL(kvm_find_user_return_msr); 415 416 static void kvm_user_return_msr_cpu_online(void) 417 { 418 unsigned int cpu = smp_processor_id(); 419 struct kvm_user_return_msrs *msrs = per_cpu_ptr(user_return_msrs, cpu); 420 u64 value; 421 int i; 422 423 for (i = 0; i < kvm_nr_uret_msrs; ++i) { 424 rdmsrl_safe(kvm_uret_msrs_list[i], &value); 425 msrs->values[i].host = value; 426 msrs->values[i].curr = value; 427 } 428 } 429 430 int kvm_set_user_return_msr(unsigned slot, u64 value, u64 mask) 431 { 432 unsigned int cpu = smp_processor_id(); 433 struct kvm_user_return_msrs *msrs = per_cpu_ptr(user_return_msrs, cpu); 434 int err; 435 436 value = (value & mask) | (msrs->values[slot].host & ~mask); 437 if (value == msrs->values[slot].curr) 438 return 0; 439 err = wrmsrl_safe(kvm_uret_msrs_list[slot], value); 440 if (err) 441 return 1; 442 443 msrs->values[slot].curr = value; 444 if (!msrs->registered) { 445 msrs->urn.on_user_return = kvm_on_user_return; 446 user_return_notifier_register(&msrs->urn); 447 msrs->registered = true; 448 } 449 return 0; 450 } 451 EXPORT_SYMBOL_GPL(kvm_set_user_return_msr); 452 453 static void drop_user_return_notifiers(void) 454 { 455 unsigned int cpu = smp_processor_id(); 456 struct kvm_user_return_msrs *msrs = per_cpu_ptr(user_return_msrs, cpu); 457 458 if (msrs->registered) 459 kvm_on_user_return(&msrs->urn); 460 } 461 462 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu) 463 { 464 return vcpu->arch.apic_base; 465 } 466 EXPORT_SYMBOL_GPL(kvm_get_apic_base); 467 468 enum lapic_mode kvm_get_apic_mode(struct kvm_vcpu *vcpu) 469 { 470 return kvm_apic_mode(kvm_get_apic_base(vcpu)); 471 } 472 EXPORT_SYMBOL_GPL(kvm_get_apic_mode); 473 474 int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 475 { 476 enum lapic_mode old_mode = kvm_get_apic_mode(vcpu); 477 enum lapic_mode new_mode = kvm_apic_mode(msr_info->data); 478 u64 reserved_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu) | 0x2ff | 479 (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) ? 0 : X2APIC_ENABLE); 480 481 if ((msr_info->data & reserved_bits) != 0 || new_mode == LAPIC_MODE_INVALID) 482 return 1; 483 if (!msr_info->host_initiated) { 484 if (old_mode == LAPIC_MODE_X2APIC && new_mode == LAPIC_MODE_XAPIC) 485 return 1; 486 if (old_mode == LAPIC_MODE_DISABLED && new_mode == LAPIC_MODE_X2APIC) 487 return 1; 488 } 489 490 kvm_lapic_set_base(vcpu, msr_info->data); 491 kvm_recalculate_apic_map(vcpu->kvm); 492 return 0; 493 } 494 EXPORT_SYMBOL_GPL(kvm_set_apic_base); 495 496 /* 497 * Handle a fault on a hardware virtualization (VMX or SVM) instruction. 498 * 499 * Hardware virtualization extension instructions may fault if a reboot turns 500 * off virtualization while processes are running. Usually after catching the 501 * fault we just panic; during reboot instead the instruction is ignored. 502 */ 503 noinstr void kvm_spurious_fault(void) 504 { 505 /* Fault while not rebooting. We want the trace. */ 506 BUG_ON(!kvm_rebooting); 507 } 508 EXPORT_SYMBOL_GPL(kvm_spurious_fault); 509 510 #define EXCPT_BENIGN 0 511 #define EXCPT_CONTRIBUTORY 1 512 #define EXCPT_PF 2 513 514 static int exception_class(int vector) 515 { 516 switch (vector) { 517 case PF_VECTOR: 518 return EXCPT_PF; 519 case DE_VECTOR: 520 case TS_VECTOR: 521 case NP_VECTOR: 522 case SS_VECTOR: 523 case GP_VECTOR: 524 return EXCPT_CONTRIBUTORY; 525 default: 526 break; 527 } 528 return EXCPT_BENIGN; 529 } 530 531 #define EXCPT_FAULT 0 532 #define EXCPT_TRAP 1 533 #define EXCPT_ABORT 2 534 #define EXCPT_INTERRUPT 3 535 #define EXCPT_DB 4 536 537 static int exception_type(int vector) 538 { 539 unsigned int mask; 540 541 if (WARN_ON(vector > 31 || vector == NMI_VECTOR)) 542 return EXCPT_INTERRUPT; 543 544 mask = 1 << vector; 545 546 /* 547 * #DBs can be trap-like or fault-like, the caller must check other CPU 548 * state, e.g. DR6, to determine whether a #DB is a trap or fault. 549 */ 550 if (mask & (1 << DB_VECTOR)) 551 return EXCPT_DB; 552 553 if (mask & ((1 << BP_VECTOR) | (1 << OF_VECTOR))) 554 return EXCPT_TRAP; 555 556 if (mask & ((1 << DF_VECTOR) | (1 << MC_VECTOR))) 557 return EXCPT_ABORT; 558 559 /* Reserved exceptions will result in fault */ 560 return EXCPT_FAULT; 561 } 562 563 void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu, 564 struct kvm_queued_exception *ex) 565 { 566 if (!ex->has_payload) 567 return; 568 569 switch (ex->vector) { 570 case DB_VECTOR: 571 /* 572 * "Certain debug exceptions may clear bit 0-3. The 573 * remaining contents of the DR6 register are never 574 * cleared by the processor". 575 */ 576 vcpu->arch.dr6 &= ~DR_TRAP_BITS; 577 /* 578 * In order to reflect the #DB exception payload in guest 579 * dr6, three components need to be considered: active low 580 * bit, FIXED_1 bits and active high bits (e.g. DR6_BD, 581 * DR6_BS and DR6_BT) 582 * DR6_ACTIVE_LOW contains the FIXED_1 and active low bits. 583 * In the target guest dr6: 584 * FIXED_1 bits should always be set. 585 * Active low bits should be cleared if 1-setting in payload. 586 * Active high bits should be set if 1-setting in payload. 587 * 588 * Note, the payload is compatible with the pending debug 589 * exceptions/exit qualification under VMX, that active_low bits 590 * are active high in payload. 591 * So they need to be flipped for DR6. 592 */ 593 vcpu->arch.dr6 |= DR6_ACTIVE_LOW; 594 vcpu->arch.dr6 |= ex->payload; 595 vcpu->arch.dr6 ^= ex->payload & DR6_ACTIVE_LOW; 596 597 /* 598 * The #DB payload is defined as compatible with the 'pending 599 * debug exceptions' field under VMX, not DR6. While bit 12 is 600 * defined in the 'pending debug exceptions' field (enabled 601 * breakpoint), it is reserved and must be zero in DR6. 602 */ 603 vcpu->arch.dr6 &= ~BIT(12); 604 break; 605 case PF_VECTOR: 606 vcpu->arch.cr2 = ex->payload; 607 break; 608 } 609 610 ex->has_payload = false; 611 ex->payload = 0; 612 } 613 EXPORT_SYMBOL_GPL(kvm_deliver_exception_payload); 614 615 static void kvm_queue_exception_vmexit(struct kvm_vcpu *vcpu, unsigned int vector, 616 bool has_error_code, u32 error_code, 617 bool has_payload, unsigned long payload) 618 { 619 struct kvm_queued_exception *ex = &vcpu->arch.exception_vmexit; 620 621 ex->vector = vector; 622 ex->injected = false; 623 ex->pending = true; 624 ex->has_error_code = has_error_code; 625 ex->error_code = error_code; 626 ex->has_payload = has_payload; 627 ex->payload = payload; 628 } 629 630 /* Forcibly leave the nested mode in cases like a vCPU reset */ 631 static void kvm_leave_nested(struct kvm_vcpu *vcpu) 632 { 633 kvm_x86_ops.nested_ops->leave_nested(vcpu); 634 } 635 636 static void kvm_multiple_exception(struct kvm_vcpu *vcpu, 637 unsigned nr, bool has_error, u32 error_code, 638 bool has_payload, unsigned long payload, bool reinject) 639 { 640 u32 prev_nr; 641 int class1, class2; 642 643 kvm_make_request(KVM_REQ_EVENT, vcpu); 644 645 /* 646 * If the exception is destined for L2 and isn't being reinjected, 647 * morph it to a VM-Exit if L1 wants to intercept the exception. A 648 * previously injected exception is not checked because it was checked 649 * when it was original queued, and re-checking is incorrect if _L1_ 650 * injected the exception, in which case it's exempt from interception. 651 */ 652 if (!reinject && is_guest_mode(vcpu) && 653 kvm_x86_ops.nested_ops->is_exception_vmexit(vcpu, nr, error_code)) { 654 kvm_queue_exception_vmexit(vcpu, nr, has_error, error_code, 655 has_payload, payload); 656 return; 657 } 658 659 if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) { 660 queue: 661 if (reinject) { 662 /* 663 * On VM-Entry, an exception can be pending if and only 664 * if event injection was blocked by nested_run_pending. 665 * In that case, however, vcpu_enter_guest() requests an 666 * immediate exit, and the guest shouldn't proceed far 667 * enough to need reinjection. 668 */ 669 WARN_ON_ONCE(kvm_is_exception_pending(vcpu)); 670 vcpu->arch.exception.injected = true; 671 if (WARN_ON_ONCE(has_payload)) { 672 /* 673 * A reinjected event has already 674 * delivered its payload. 675 */ 676 has_payload = false; 677 payload = 0; 678 } 679 } else { 680 vcpu->arch.exception.pending = true; 681 vcpu->arch.exception.injected = false; 682 } 683 vcpu->arch.exception.has_error_code = has_error; 684 vcpu->arch.exception.vector = nr; 685 vcpu->arch.exception.error_code = error_code; 686 vcpu->arch.exception.has_payload = has_payload; 687 vcpu->arch.exception.payload = payload; 688 if (!is_guest_mode(vcpu)) 689 kvm_deliver_exception_payload(vcpu, 690 &vcpu->arch.exception); 691 return; 692 } 693 694 /* to check exception */ 695 prev_nr = vcpu->arch.exception.vector; 696 if (prev_nr == DF_VECTOR) { 697 /* triple fault -> shutdown */ 698 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 699 return; 700 } 701 class1 = exception_class(prev_nr); 702 class2 = exception_class(nr); 703 if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY) || 704 (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) { 705 /* 706 * Synthesize #DF. Clear the previously injected or pending 707 * exception so as not to incorrectly trigger shutdown. 708 */ 709 vcpu->arch.exception.injected = false; 710 vcpu->arch.exception.pending = false; 711 712 kvm_queue_exception_e(vcpu, DF_VECTOR, 0); 713 } else { 714 /* replace previous exception with a new one in a hope 715 that instruction re-execution will regenerate lost 716 exception */ 717 goto queue; 718 } 719 } 720 721 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr) 722 { 723 kvm_multiple_exception(vcpu, nr, false, 0, false, 0, false); 724 } 725 EXPORT_SYMBOL_GPL(kvm_queue_exception); 726 727 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr) 728 { 729 kvm_multiple_exception(vcpu, nr, false, 0, false, 0, true); 730 } 731 EXPORT_SYMBOL_GPL(kvm_requeue_exception); 732 733 void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr, 734 unsigned long payload) 735 { 736 kvm_multiple_exception(vcpu, nr, false, 0, true, payload, false); 737 } 738 EXPORT_SYMBOL_GPL(kvm_queue_exception_p); 739 740 static void kvm_queue_exception_e_p(struct kvm_vcpu *vcpu, unsigned nr, 741 u32 error_code, unsigned long payload) 742 { 743 kvm_multiple_exception(vcpu, nr, true, error_code, 744 true, payload, false); 745 } 746 747 int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err) 748 { 749 if (err) 750 kvm_inject_gp(vcpu, 0); 751 else 752 return kvm_skip_emulated_instruction(vcpu); 753 754 return 1; 755 } 756 EXPORT_SYMBOL_GPL(kvm_complete_insn_gp); 757 758 static int complete_emulated_insn_gp(struct kvm_vcpu *vcpu, int err) 759 { 760 if (err) { 761 kvm_inject_gp(vcpu, 0); 762 return 1; 763 } 764 765 return kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE | EMULTYPE_SKIP | 766 EMULTYPE_COMPLETE_USER_EXIT); 767 } 768 769 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) 770 { 771 ++vcpu->stat.pf_guest; 772 773 /* 774 * Async #PF in L2 is always forwarded to L1 as a VM-Exit regardless of 775 * whether or not L1 wants to intercept "regular" #PF. 776 */ 777 if (is_guest_mode(vcpu) && fault->async_page_fault) 778 kvm_queue_exception_vmexit(vcpu, PF_VECTOR, 779 true, fault->error_code, 780 true, fault->address); 781 else 782 kvm_queue_exception_e_p(vcpu, PF_VECTOR, fault->error_code, 783 fault->address); 784 } 785 EXPORT_SYMBOL_GPL(kvm_inject_page_fault); 786 787 void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu, 788 struct x86_exception *fault) 789 { 790 struct kvm_mmu *fault_mmu; 791 WARN_ON_ONCE(fault->vector != PF_VECTOR); 792 793 fault_mmu = fault->nested_page_fault ? vcpu->arch.mmu : 794 vcpu->arch.walk_mmu; 795 796 /* 797 * Invalidate the TLB entry for the faulting address, if it exists, 798 * else the access will fault indefinitely (and to emulate hardware). 799 */ 800 if ((fault->error_code & PFERR_PRESENT_MASK) && 801 !(fault->error_code & PFERR_RSVD_MASK)) 802 kvm_mmu_invalidate_gva(vcpu, fault_mmu, fault->address, 803 fault_mmu->root.hpa); 804 805 fault_mmu->inject_page_fault(vcpu, fault); 806 } 807 EXPORT_SYMBOL_GPL(kvm_inject_emulated_page_fault); 808 809 void kvm_inject_nmi(struct kvm_vcpu *vcpu) 810 { 811 atomic_inc(&vcpu->arch.nmi_queued); 812 kvm_make_request(KVM_REQ_NMI, vcpu); 813 } 814 EXPORT_SYMBOL_GPL(kvm_inject_nmi); 815 816 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) 817 { 818 kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, false); 819 } 820 EXPORT_SYMBOL_GPL(kvm_queue_exception_e); 821 822 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) 823 { 824 kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, true); 825 } 826 EXPORT_SYMBOL_GPL(kvm_requeue_exception_e); 827 828 /* 829 * Checks if cpl <= required_cpl; if true, return true. Otherwise queue 830 * a #GP and return false. 831 */ 832 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl) 833 { 834 if (static_call(kvm_x86_get_cpl)(vcpu) <= required_cpl) 835 return true; 836 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); 837 return false; 838 } 839 EXPORT_SYMBOL_GPL(kvm_require_cpl); 840 841 bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr) 842 { 843 if ((dr != 4 && dr != 5) || !kvm_read_cr4_bits(vcpu, X86_CR4_DE)) 844 return true; 845 846 kvm_queue_exception(vcpu, UD_VECTOR); 847 return false; 848 } 849 EXPORT_SYMBOL_GPL(kvm_require_dr); 850 851 static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu) 852 { 853 return vcpu->arch.reserved_gpa_bits | rsvd_bits(5, 8) | rsvd_bits(1, 2); 854 } 855 856 /* 857 * Load the pae pdptrs. Return 1 if they are all valid, 0 otherwise. 858 */ 859 int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3) 860 { 861 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 862 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT; 863 gpa_t real_gpa; 864 int i; 865 int ret; 866 u64 pdpte[ARRAY_SIZE(mmu->pdptrs)]; 867 868 /* 869 * If the MMU is nested, CR3 holds an L2 GPA and needs to be translated 870 * to an L1 GPA. 871 */ 872 real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(pdpt_gfn), 873 PFERR_USER_MASK | PFERR_WRITE_MASK, NULL); 874 if (real_gpa == INVALID_GPA) 875 return 0; 876 877 /* Note the offset, PDPTRs are 32 byte aligned when using PAE paging. */ 878 ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(real_gpa), pdpte, 879 cr3 & GENMASK(11, 5), sizeof(pdpte)); 880 if (ret < 0) 881 return 0; 882 883 for (i = 0; i < ARRAY_SIZE(pdpte); ++i) { 884 if ((pdpte[i] & PT_PRESENT_MASK) && 885 (pdpte[i] & pdptr_rsvd_bits(vcpu))) { 886 return 0; 887 } 888 } 889 890 /* 891 * Marking VCPU_EXREG_PDPTR dirty doesn't work for !tdp_enabled. 892 * Shadow page roots need to be reconstructed instead. 893 */ 894 if (!tdp_enabled && memcmp(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs))) 895 kvm_mmu_free_roots(vcpu->kvm, mmu, KVM_MMU_ROOT_CURRENT); 896 897 memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs)); 898 kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR); 899 kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu); 900 vcpu->arch.pdptrs_from_userspace = false; 901 902 return 1; 903 } 904 EXPORT_SYMBOL_GPL(load_pdptrs); 905 906 void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0) 907 { 908 if ((cr0 ^ old_cr0) & X86_CR0_PG) { 909 kvm_clear_async_pf_completion_queue(vcpu); 910 kvm_async_pf_hash_reset(vcpu); 911 912 /* 913 * Clearing CR0.PG is defined to flush the TLB from the guest's 914 * perspective. 915 */ 916 if (!(cr0 & X86_CR0_PG)) 917 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 918 } 919 920 if ((cr0 ^ old_cr0) & KVM_MMU_CR0_ROLE_BITS) 921 kvm_mmu_reset_context(vcpu); 922 923 if (((cr0 ^ old_cr0) & X86_CR0_CD) && 924 kvm_arch_has_noncoherent_dma(vcpu->kvm) && 925 !kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) 926 kvm_zap_gfn_range(vcpu->kvm, 0, ~0ULL); 927 } 928 EXPORT_SYMBOL_GPL(kvm_post_set_cr0); 929 930 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 931 { 932 unsigned long old_cr0 = kvm_read_cr0(vcpu); 933 934 cr0 |= X86_CR0_ET; 935 936 #ifdef CONFIG_X86_64 937 if (cr0 & 0xffffffff00000000UL) 938 return 1; 939 #endif 940 941 cr0 &= ~CR0_RESERVED_BITS; 942 943 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) 944 return 1; 945 946 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) 947 return 1; 948 949 #ifdef CONFIG_X86_64 950 if ((vcpu->arch.efer & EFER_LME) && !is_paging(vcpu) && 951 (cr0 & X86_CR0_PG)) { 952 int cs_db, cs_l; 953 954 if (!is_pae(vcpu)) 955 return 1; 956 static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l); 957 if (cs_l) 958 return 1; 959 } 960 #endif 961 if (!(vcpu->arch.efer & EFER_LME) && (cr0 & X86_CR0_PG) && 962 is_pae(vcpu) && ((cr0 ^ old_cr0) & X86_CR0_PDPTR_BITS) && 963 !load_pdptrs(vcpu, kvm_read_cr3(vcpu))) 964 return 1; 965 966 if (!(cr0 & X86_CR0_PG) && 967 (is_64_bit_mode(vcpu) || kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE))) 968 return 1; 969 970 static_call(kvm_x86_set_cr0)(vcpu, cr0); 971 972 kvm_post_set_cr0(vcpu, old_cr0, cr0); 973 974 return 0; 975 } 976 EXPORT_SYMBOL_GPL(kvm_set_cr0); 977 978 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) 979 { 980 (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f)); 981 } 982 EXPORT_SYMBOL_GPL(kvm_lmsw); 983 984 void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu) 985 { 986 if (vcpu->arch.guest_state_protected) 987 return; 988 989 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) { 990 991 if (vcpu->arch.xcr0 != host_xcr0) 992 xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); 993 994 if (vcpu->arch.xsaves_enabled && 995 vcpu->arch.ia32_xss != host_xss) 996 wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss); 997 } 998 999 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS 1000 if (static_cpu_has(X86_FEATURE_PKU) && 1001 vcpu->arch.pkru != vcpu->arch.host_pkru && 1002 ((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) || 1003 kvm_read_cr4_bits(vcpu, X86_CR4_PKE))) 1004 write_pkru(vcpu->arch.pkru); 1005 #endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */ 1006 } 1007 EXPORT_SYMBOL_GPL(kvm_load_guest_xsave_state); 1008 1009 void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu) 1010 { 1011 if (vcpu->arch.guest_state_protected) 1012 return; 1013 1014 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS 1015 if (static_cpu_has(X86_FEATURE_PKU) && 1016 ((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) || 1017 kvm_read_cr4_bits(vcpu, X86_CR4_PKE))) { 1018 vcpu->arch.pkru = rdpkru(); 1019 if (vcpu->arch.pkru != vcpu->arch.host_pkru) 1020 write_pkru(vcpu->arch.host_pkru); 1021 } 1022 #endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */ 1023 1024 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) { 1025 1026 if (vcpu->arch.xcr0 != host_xcr0) 1027 xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0); 1028 1029 if (vcpu->arch.xsaves_enabled && 1030 vcpu->arch.ia32_xss != host_xss) 1031 wrmsrl(MSR_IA32_XSS, host_xss); 1032 } 1033 1034 } 1035 EXPORT_SYMBOL_GPL(kvm_load_host_xsave_state); 1036 1037 #ifdef CONFIG_X86_64 1038 static inline u64 kvm_guest_supported_xfd(struct kvm_vcpu *vcpu) 1039 { 1040 return vcpu->arch.guest_supported_xcr0 & XFEATURE_MASK_USER_DYNAMIC; 1041 } 1042 #endif 1043 1044 static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) 1045 { 1046 u64 xcr0 = xcr; 1047 u64 old_xcr0 = vcpu->arch.xcr0; 1048 u64 valid_bits; 1049 1050 /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now */ 1051 if (index != XCR_XFEATURE_ENABLED_MASK) 1052 return 1; 1053 if (!(xcr0 & XFEATURE_MASK_FP)) 1054 return 1; 1055 if ((xcr0 & XFEATURE_MASK_YMM) && !(xcr0 & XFEATURE_MASK_SSE)) 1056 return 1; 1057 1058 /* 1059 * Do not allow the guest to set bits that we do not support 1060 * saving. However, xcr0 bit 0 is always set, even if the 1061 * emulated CPU does not support XSAVE (see kvm_vcpu_reset()). 1062 */ 1063 valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP; 1064 if (xcr0 & ~valid_bits) 1065 return 1; 1066 1067 if ((!(xcr0 & XFEATURE_MASK_BNDREGS)) != 1068 (!(xcr0 & XFEATURE_MASK_BNDCSR))) 1069 return 1; 1070 1071 if (xcr0 & XFEATURE_MASK_AVX512) { 1072 if (!(xcr0 & XFEATURE_MASK_YMM)) 1073 return 1; 1074 if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512) 1075 return 1; 1076 } 1077 1078 if ((xcr0 & XFEATURE_MASK_XTILE) && 1079 ((xcr0 & XFEATURE_MASK_XTILE) != XFEATURE_MASK_XTILE)) 1080 return 1; 1081 1082 vcpu->arch.xcr0 = xcr0; 1083 1084 if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND) 1085 kvm_update_cpuid_runtime(vcpu); 1086 return 0; 1087 } 1088 1089 int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu) 1090 { 1091 /* Note, #UD due to CR4.OSXSAVE=0 has priority over the intercept. */ 1092 if (static_call(kvm_x86_get_cpl)(vcpu) != 0 || 1093 __kvm_set_xcr(vcpu, kvm_rcx_read(vcpu), kvm_read_edx_eax(vcpu))) { 1094 kvm_inject_gp(vcpu, 0); 1095 return 1; 1096 } 1097 1098 return kvm_skip_emulated_instruction(vcpu); 1099 } 1100 EXPORT_SYMBOL_GPL(kvm_emulate_xsetbv); 1101 1102 bool __kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 1103 { 1104 if (cr4 & cr4_reserved_bits) 1105 return false; 1106 1107 if (cr4 & vcpu->arch.cr4_guest_rsvd_bits) 1108 return false; 1109 1110 return true; 1111 } 1112 EXPORT_SYMBOL_GPL(__kvm_is_valid_cr4); 1113 1114 static bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 1115 { 1116 return __kvm_is_valid_cr4(vcpu, cr4) && 1117 static_call(kvm_x86_is_valid_cr4)(vcpu, cr4); 1118 } 1119 1120 void kvm_post_set_cr4(struct kvm_vcpu *vcpu, unsigned long old_cr4, unsigned long cr4) 1121 { 1122 if ((cr4 ^ old_cr4) & KVM_MMU_CR4_ROLE_BITS) 1123 kvm_mmu_reset_context(vcpu); 1124 1125 /* 1126 * If CR4.PCIDE is changed 0 -> 1, there is no need to flush the TLB 1127 * according to the SDM; however, stale prev_roots could be reused 1128 * incorrectly in the future after a MOV to CR3 with NOFLUSH=1, so we 1129 * free them all. This is *not* a superset of KVM_REQ_TLB_FLUSH_GUEST 1130 * or KVM_REQ_TLB_FLUSH_CURRENT, because the hardware TLB is not flushed, 1131 * so fall through. 1132 */ 1133 if (!tdp_enabled && 1134 (cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) 1135 kvm_mmu_unload(vcpu); 1136 1137 /* 1138 * The TLB has to be flushed for all PCIDs if any of the following 1139 * (architecturally required) changes happen: 1140 * - CR4.PCIDE is changed from 1 to 0 1141 * - CR4.PGE is toggled 1142 * 1143 * This is a superset of KVM_REQ_TLB_FLUSH_CURRENT. 1144 */ 1145 if (((cr4 ^ old_cr4) & X86_CR4_PGE) || 1146 (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE))) 1147 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 1148 1149 /* 1150 * The TLB has to be flushed for the current PCID if any of the 1151 * following (architecturally required) changes happen: 1152 * - CR4.SMEP is changed from 0 to 1 1153 * - CR4.PAE is toggled 1154 */ 1155 else if (((cr4 ^ old_cr4) & X86_CR4_PAE) || 1156 ((cr4 & X86_CR4_SMEP) && !(old_cr4 & X86_CR4_SMEP))) 1157 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); 1158 1159 } 1160 EXPORT_SYMBOL_GPL(kvm_post_set_cr4); 1161 1162 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 1163 { 1164 unsigned long old_cr4 = kvm_read_cr4(vcpu); 1165 1166 if (!kvm_is_valid_cr4(vcpu, cr4)) 1167 return 1; 1168 1169 if (is_long_mode(vcpu)) { 1170 if (!(cr4 & X86_CR4_PAE)) 1171 return 1; 1172 if ((cr4 ^ old_cr4) & X86_CR4_LA57) 1173 return 1; 1174 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) 1175 && ((cr4 ^ old_cr4) & X86_CR4_PDPTR_BITS) 1176 && !load_pdptrs(vcpu, kvm_read_cr3(vcpu))) 1177 return 1; 1178 1179 if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) { 1180 if (!guest_cpuid_has(vcpu, X86_FEATURE_PCID)) 1181 return 1; 1182 1183 /* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */ 1184 if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu)) 1185 return 1; 1186 } 1187 1188 static_call(kvm_x86_set_cr4)(vcpu, cr4); 1189 1190 kvm_post_set_cr4(vcpu, old_cr4, cr4); 1191 1192 return 0; 1193 } 1194 EXPORT_SYMBOL_GPL(kvm_set_cr4); 1195 1196 static void kvm_invalidate_pcid(struct kvm_vcpu *vcpu, unsigned long pcid) 1197 { 1198 struct kvm_mmu *mmu = vcpu->arch.mmu; 1199 unsigned long roots_to_free = 0; 1200 int i; 1201 1202 /* 1203 * MOV CR3 and INVPCID are usually not intercepted when using TDP, but 1204 * this is reachable when running EPT=1 and unrestricted_guest=0, and 1205 * also via the emulator. KVM's TDP page tables are not in the scope of 1206 * the invalidation, but the guest's TLB entries need to be flushed as 1207 * the CPU may have cached entries in its TLB for the target PCID. 1208 */ 1209 if (unlikely(tdp_enabled)) { 1210 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 1211 return; 1212 } 1213 1214 /* 1215 * If neither the current CR3 nor any of the prev_roots use the given 1216 * PCID, then nothing needs to be done here because a resync will 1217 * happen anyway before switching to any other CR3. 1218 */ 1219 if (kvm_get_active_pcid(vcpu) == pcid) { 1220 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); 1221 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); 1222 } 1223 1224 /* 1225 * If PCID is disabled, there is no need to free prev_roots even if the 1226 * PCIDs for them are also 0, because MOV to CR3 always flushes the TLB 1227 * with PCIDE=0. 1228 */ 1229 if (!kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)) 1230 return; 1231 1232 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) 1233 if (kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd) == pcid) 1234 roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i); 1235 1236 kvm_mmu_free_roots(vcpu->kvm, mmu, roots_to_free); 1237 } 1238 1239 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) 1240 { 1241 bool skip_tlb_flush = false; 1242 unsigned long pcid = 0; 1243 #ifdef CONFIG_X86_64 1244 bool pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE); 1245 1246 if (pcid_enabled) { 1247 skip_tlb_flush = cr3 & X86_CR3_PCID_NOFLUSH; 1248 cr3 &= ~X86_CR3_PCID_NOFLUSH; 1249 pcid = cr3 & X86_CR3_PCID_MASK; 1250 } 1251 #endif 1252 1253 /* PDPTRs are always reloaded for PAE paging. */ 1254 if (cr3 == kvm_read_cr3(vcpu) && !is_pae_paging(vcpu)) 1255 goto handle_tlb_flush; 1256 1257 /* 1258 * Do not condition the GPA check on long mode, this helper is used to 1259 * stuff CR3, e.g. for RSM emulation, and there is no guarantee that 1260 * the current vCPU mode is accurate. 1261 */ 1262 if (kvm_vcpu_is_illegal_gpa(vcpu, cr3)) 1263 return 1; 1264 1265 if (is_pae_paging(vcpu) && !load_pdptrs(vcpu, cr3)) 1266 return 1; 1267 1268 if (cr3 != kvm_read_cr3(vcpu)) 1269 kvm_mmu_new_pgd(vcpu, cr3); 1270 1271 vcpu->arch.cr3 = cr3; 1272 kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); 1273 /* Do not call post_set_cr3, we do not get here for confidential guests. */ 1274 1275 handle_tlb_flush: 1276 /* 1277 * A load of CR3 that flushes the TLB flushes only the current PCID, 1278 * even if PCID is disabled, in which case PCID=0 is flushed. It's a 1279 * moot point in the end because _disabling_ PCID will flush all PCIDs, 1280 * and it's impossible to use a non-zero PCID when PCID is disabled, 1281 * i.e. only PCID=0 can be relevant. 1282 */ 1283 if (!skip_tlb_flush) 1284 kvm_invalidate_pcid(vcpu, pcid); 1285 1286 return 0; 1287 } 1288 EXPORT_SYMBOL_GPL(kvm_set_cr3); 1289 1290 int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) 1291 { 1292 if (cr8 & CR8_RESERVED_BITS) 1293 return 1; 1294 if (lapic_in_kernel(vcpu)) 1295 kvm_lapic_set_tpr(vcpu, cr8); 1296 else 1297 vcpu->arch.cr8 = cr8; 1298 return 0; 1299 } 1300 EXPORT_SYMBOL_GPL(kvm_set_cr8); 1301 1302 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu) 1303 { 1304 if (lapic_in_kernel(vcpu)) 1305 return kvm_lapic_get_cr8(vcpu); 1306 else 1307 return vcpu->arch.cr8; 1308 } 1309 EXPORT_SYMBOL_GPL(kvm_get_cr8); 1310 1311 static void kvm_update_dr0123(struct kvm_vcpu *vcpu) 1312 { 1313 int i; 1314 1315 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) { 1316 for (i = 0; i < KVM_NR_DB_REGS; i++) 1317 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; 1318 } 1319 } 1320 1321 void kvm_update_dr7(struct kvm_vcpu *vcpu) 1322 { 1323 unsigned long dr7; 1324 1325 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) 1326 dr7 = vcpu->arch.guest_debug_dr7; 1327 else 1328 dr7 = vcpu->arch.dr7; 1329 static_call(kvm_x86_set_dr7)(vcpu, dr7); 1330 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED; 1331 if (dr7 & DR7_BP_EN_MASK) 1332 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED; 1333 } 1334 EXPORT_SYMBOL_GPL(kvm_update_dr7); 1335 1336 static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu) 1337 { 1338 u64 fixed = DR6_FIXED_1; 1339 1340 if (!guest_cpuid_has(vcpu, X86_FEATURE_RTM)) 1341 fixed |= DR6_RTM; 1342 1343 if (!guest_cpuid_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT)) 1344 fixed |= DR6_BUS_LOCK; 1345 return fixed; 1346 } 1347 1348 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) 1349 { 1350 size_t size = ARRAY_SIZE(vcpu->arch.db); 1351 1352 switch (dr) { 1353 case 0 ... 3: 1354 vcpu->arch.db[array_index_nospec(dr, size)] = val; 1355 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) 1356 vcpu->arch.eff_db[dr] = val; 1357 break; 1358 case 4: 1359 case 6: 1360 if (!kvm_dr6_valid(val)) 1361 return 1; /* #GP */ 1362 vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu); 1363 break; 1364 case 5: 1365 default: /* 7 */ 1366 if (!kvm_dr7_valid(val)) 1367 return 1; /* #GP */ 1368 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; 1369 kvm_update_dr7(vcpu); 1370 break; 1371 } 1372 1373 return 0; 1374 } 1375 EXPORT_SYMBOL_GPL(kvm_set_dr); 1376 1377 void kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) 1378 { 1379 size_t size = ARRAY_SIZE(vcpu->arch.db); 1380 1381 switch (dr) { 1382 case 0 ... 3: 1383 *val = vcpu->arch.db[array_index_nospec(dr, size)]; 1384 break; 1385 case 4: 1386 case 6: 1387 *val = vcpu->arch.dr6; 1388 break; 1389 case 5: 1390 default: /* 7 */ 1391 *val = vcpu->arch.dr7; 1392 break; 1393 } 1394 } 1395 EXPORT_SYMBOL_GPL(kvm_get_dr); 1396 1397 int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu) 1398 { 1399 u32 ecx = kvm_rcx_read(vcpu); 1400 u64 data; 1401 1402 if (kvm_pmu_rdpmc(vcpu, ecx, &data)) { 1403 kvm_inject_gp(vcpu, 0); 1404 return 1; 1405 } 1406 1407 kvm_rax_write(vcpu, (u32)data); 1408 kvm_rdx_write(vcpu, data >> 32); 1409 return kvm_skip_emulated_instruction(vcpu); 1410 } 1411 EXPORT_SYMBOL_GPL(kvm_emulate_rdpmc); 1412 1413 /* 1414 * List of msr numbers which we expose to userspace through KVM_GET_MSRS 1415 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. 1416 * 1417 * The three MSR lists(msrs_to_save, emulated_msrs, msr_based_features) 1418 * extract the supported MSRs from the related const lists. 1419 * msrs_to_save is selected from the msrs_to_save_all to reflect the 1420 * capabilities of the host cpu. This capabilities test skips MSRs that are 1421 * kvm-specific. Those are put in emulated_msrs_all; filtering of emulated_msrs 1422 * may depend on host virtualization features rather than host cpu features. 1423 */ 1424 1425 static const u32 msrs_to_save_all[] = { 1426 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, 1427 MSR_STAR, 1428 #ifdef CONFIG_X86_64 1429 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR, 1430 #endif 1431 MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA, 1432 MSR_IA32_FEAT_CTL, MSR_IA32_BNDCFGS, MSR_TSC_AUX, 1433 MSR_IA32_SPEC_CTRL, 1434 MSR_IA32_RTIT_CTL, MSR_IA32_RTIT_STATUS, MSR_IA32_RTIT_CR3_MATCH, 1435 MSR_IA32_RTIT_OUTPUT_BASE, MSR_IA32_RTIT_OUTPUT_MASK, 1436 MSR_IA32_RTIT_ADDR0_A, MSR_IA32_RTIT_ADDR0_B, 1437 MSR_IA32_RTIT_ADDR1_A, MSR_IA32_RTIT_ADDR1_B, 1438 MSR_IA32_RTIT_ADDR2_A, MSR_IA32_RTIT_ADDR2_B, 1439 MSR_IA32_RTIT_ADDR3_A, MSR_IA32_RTIT_ADDR3_B, 1440 MSR_IA32_UMWAIT_CONTROL, 1441 1442 MSR_ARCH_PERFMON_FIXED_CTR0, MSR_ARCH_PERFMON_FIXED_CTR1, 1443 MSR_ARCH_PERFMON_FIXED_CTR0 + 2, 1444 MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_STATUS, 1445 MSR_CORE_PERF_GLOBAL_CTRL, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 1446 MSR_IA32_PEBS_ENABLE, MSR_IA32_DS_AREA, MSR_PEBS_DATA_CFG, 1447 1448 /* This part of MSRs should match KVM_INTEL_PMC_MAX_GENERIC. */ 1449 MSR_ARCH_PERFMON_PERFCTR0, MSR_ARCH_PERFMON_PERFCTR1, 1450 MSR_ARCH_PERFMON_PERFCTR0 + 2, MSR_ARCH_PERFMON_PERFCTR0 + 3, 1451 MSR_ARCH_PERFMON_PERFCTR0 + 4, MSR_ARCH_PERFMON_PERFCTR0 + 5, 1452 MSR_ARCH_PERFMON_PERFCTR0 + 6, MSR_ARCH_PERFMON_PERFCTR0 + 7, 1453 MSR_ARCH_PERFMON_EVENTSEL0, MSR_ARCH_PERFMON_EVENTSEL1, 1454 MSR_ARCH_PERFMON_EVENTSEL0 + 2, MSR_ARCH_PERFMON_EVENTSEL0 + 3, 1455 MSR_ARCH_PERFMON_EVENTSEL0 + 4, MSR_ARCH_PERFMON_EVENTSEL0 + 5, 1456 MSR_ARCH_PERFMON_EVENTSEL0 + 6, MSR_ARCH_PERFMON_EVENTSEL0 + 7, 1457 1458 MSR_K7_EVNTSEL0, MSR_K7_EVNTSEL1, MSR_K7_EVNTSEL2, MSR_K7_EVNTSEL3, 1459 MSR_K7_PERFCTR0, MSR_K7_PERFCTR1, MSR_K7_PERFCTR2, MSR_K7_PERFCTR3, 1460 1461 /* This part of MSRs should match KVM_AMD_PMC_MAX_GENERIC. */ 1462 MSR_F15H_PERF_CTL0, MSR_F15H_PERF_CTL1, MSR_F15H_PERF_CTL2, 1463 MSR_F15H_PERF_CTL3, MSR_F15H_PERF_CTL4, MSR_F15H_PERF_CTL5, 1464 MSR_F15H_PERF_CTR0, MSR_F15H_PERF_CTR1, MSR_F15H_PERF_CTR2, 1465 MSR_F15H_PERF_CTR3, MSR_F15H_PERF_CTR4, MSR_F15H_PERF_CTR5, 1466 1467 MSR_IA32_XFD, MSR_IA32_XFD_ERR, 1468 }; 1469 1470 static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_all)]; 1471 static unsigned num_msrs_to_save; 1472 1473 static const u32 emulated_msrs_all[] = { 1474 MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, 1475 MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW, 1476 HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL, 1477 HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC, 1478 HV_X64_MSR_TSC_FREQUENCY, HV_X64_MSR_APIC_FREQUENCY, 1479 HV_X64_MSR_CRASH_P0, HV_X64_MSR_CRASH_P1, HV_X64_MSR_CRASH_P2, 1480 HV_X64_MSR_CRASH_P3, HV_X64_MSR_CRASH_P4, HV_X64_MSR_CRASH_CTL, 1481 HV_X64_MSR_RESET, 1482 HV_X64_MSR_VP_INDEX, 1483 HV_X64_MSR_VP_RUNTIME, 1484 HV_X64_MSR_SCONTROL, 1485 HV_X64_MSR_STIMER0_CONFIG, 1486 HV_X64_MSR_VP_ASSIST_PAGE, 1487 HV_X64_MSR_REENLIGHTENMENT_CONTROL, HV_X64_MSR_TSC_EMULATION_CONTROL, 1488 HV_X64_MSR_TSC_EMULATION_STATUS, 1489 HV_X64_MSR_SYNDBG_OPTIONS, 1490 HV_X64_MSR_SYNDBG_CONTROL, HV_X64_MSR_SYNDBG_STATUS, 1491 HV_X64_MSR_SYNDBG_SEND_BUFFER, HV_X64_MSR_SYNDBG_RECV_BUFFER, 1492 HV_X64_MSR_SYNDBG_PENDING_BUFFER, 1493 1494 MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME, 1495 MSR_KVM_PV_EOI_EN, MSR_KVM_ASYNC_PF_INT, MSR_KVM_ASYNC_PF_ACK, 1496 1497 MSR_IA32_TSC_ADJUST, 1498 MSR_IA32_TSC_DEADLINE, 1499 MSR_IA32_ARCH_CAPABILITIES, 1500 MSR_IA32_PERF_CAPABILITIES, 1501 MSR_IA32_MISC_ENABLE, 1502 MSR_IA32_MCG_STATUS, 1503 MSR_IA32_MCG_CTL, 1504 MSR_IA32_MCG_EXT_CTL, 1505 MSR_IA32_SMBASE, 1506 MSR_SMI_COUNT, 1507 MSR_PLATFORM_INFO, 1508 MSR_MISC_FEATURES_ENABLES, 1509 MSR_AMD64_VIRT_SPEC_CTRL, 1510 MSR_AMD64_TSC_RATIO, 1511 MSR_IA32_POWER_CTL, 1512 MSR_IA32_UCODE_REV, 1513 1514 /* 1515 * The following list leaves out MSRs whose values are determined 1516 * by arch/x86/kvm/vmx/nested.c based on CPUID or other MSRs. 1517 * We always support the "true" VMX control MSRs, even if the host 1518 * processor does not, so I am putting these registers here rather 1519 * than in msrs_to_save_all. 1520 */ 1521 MSR_IA32_VMX_BASIC, 1522 MSR_IA32_VMX_TRUE_PINBASED_CTLS, 1523 MSR_IA32_VMX_TRUE_PROCBASED_CTLS, 1524 MSR_IA32_VMX_TRUE_EXIT_CTLS, 1525 MSR_IA32_VMX_TRUE_ENTRY_CTLS, 1526 MSR_IA32_VMX_MISC, 1527 MSR_IA32_VMX_CR0_FIXED0, 1528 MSR_IA32_VMX_CR4_FIXED0, 1529 MSR_IA32_VMX_VMCS_ENUM, 1530 MSR_IA32_VMX_PROCBASED_CTLS2, 1531 MSR_IA32_VMX_EPT_VPID_CAP, 1532 MSR_IA32_VMX_VMFUNC, 1533 1534 MSR_K7_HWCR, 1535 MSR_KVM_POLL_CONTROL, 1536 }; 1537 1538 static u32 emulated_msrs[ARRAY_SIZE(emulated_msrs_all)]; 1539 static unsigned num_emulated_msrs; 1540 1541 /* 1542 * List of msr numbers which are used to expose MSR-based features that 1543 * can be used by a hypervisor to validate requested CPU features. 1544 */ 1545 static const u32 msr_based_features_all[] = { 1546 MSR_IA32_VMX_BASIC, 1547 MSR_IA32_VMX_TRUE_PINBASED_CTLS, 1548 MSR_IA32_VMX_PINBASED_CTLS, 1549 MSR_IA32_VMX_TRUE_PROCBASED_CTLS, 1550 MSR_IA32_VMX_PROCBASED_CTLS, 1551 MSR_IA32_VMX_TRUE_EXIT_CTLS, 1552 MSR_IA32_VMX_EXIT_CTLS, 1553 MSR_IA32_VMX_TRUE_ENTRY_CTLS, 1554 MSR_IA32_VMX_ENTRY_CTLS, 1555 MSR_IA32_VMX_MISC, 1556 MSR_IA32_VMX_CR0_FIXED0, 1557 MSR_IA32_VMX_CR0_FIXED1, 1558 MSR_IA32_VMX_CR4_FIXED0, 1559 MSR_IA32_VMX_CR4_FIXED1, 1560 MSR_IA32_VMX_VMCS_ENUM, 1561 MSR_IA32_VMX_PROCBASED_CTLS2, 1562 MSR_IA32_VMX_EPT_VPID_CAP, 1563 MSR_IA32_VMX_VMFUNC, 1564 1565 MSR_F10H_DECFG, 1566 MSR_IA32_UCODE_REV, 1567 MSR_IA32_ARCH_CAPABILITIES, 1568 MSR_IA32_PERF_CAPABILITIES, 1569 }; 1570 1571 static u32 msr_based_features[ARRAY_SIZE(msr_based_features_all)]; 1572 static unsigned int num_msr_based_features; 1573 1574 /* 1575 * Some IA32_ARCH_CAPABILITIES bits have dependencies on MSRs that KVM 1576 * does not yet virtualize. These include: 1577 * 10 - MISC_PACKAGE_CTRLS 1578 * 11 - ENERGY_FILTERING_CTL 1579 * 12 - DOITM 1580 * 18 - FB_CLEAR_CTRL 1581 * 21 - XAPIC_DISABLE_STATUS 1582 * 23 - OVERCLOCKING_STATUS 1583 */ 1584 1585 #define KVM_SUPPORTED_ARCH_CAP \ 1586 (ARCH_CAP_RDCL_NO | ARCH_CAP_IBRS_ALL | ARCH_CAP_RSBA | \ 1587 ARCH_CAP_SKIP_VMENTRY_L1DFLUSH | ARCH_CAP_SSB_NO | ARCH_CAP_MDS_NO | \ 1588 ARCH_CAP_PSCHANGE_MC_NO | ARCH_CAP_TSX_CTRL_MSR | ARCH_CAP_TAA_NO | \ 1589 ARCH_CAP_SBDR_SSDP_NO | ARCH_CAP_FBSDP_NO | ARCH_CAP_PSDP_NO | \ 1590 ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO) 1591 1592 static u64 kvm_get_arch_capabilities(void) 1593 { 1594 u64 data = 0; 1595 1596 if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) { 1597 rdmsrl(MSR_IA32_ARCH_CAPABILITIES, data); 1598 data &= KVM_SUPPORTED_ARCH_CAP; 1599 } 1600 1601 /* 1602 * If nx_huge_pages is enabled, KVM's shadow paging will ensure that 1603 * the nested hypervisor runs with NX huge pages. If it is not, 1604 * L1 is anyway vulnerable to ITLB_MULTIHIT exploits from other 1605 * L1 guests, so it need not worry about its own (L2) guests. 1606 */ 1607 data |= ARCH_CAP_PSCHANGE_MC_NO; 1608 1609 /* 1610 * If we're doing cache flushes (either "always" or "cond") 1611 * we will do one whenever the guest does a vmlaunch/vmresume. 1612 * If an outer hypervisor is doing the cache flush for us 1613 * (VMENTER_L1D_FLUSH_NESTED_VM), we can safely pass that 1614 * capability to the guest too, and if EPT is disabled we're not 1615 * vulnerable. Overall, only VMENTER_L1D_FLUSH_NEVER will 1616 * require a nested hypervisor to do a flush of its own. 1617 */ 1618 if (l1tf_vmx_mitigation != VMENTER_L1D_FLUSH_NEVER) 1619 data |= ARCH_CAP_SKIP_VMENTRY_L1DFLUSH; 1620 1621 if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN)) 1622 data |= ARCH_CAP_RDCL_NO; 1623 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) 1624 data |= ARCH_CAP_SSB_NO; 1625 if (!boot_cpu_has_bug(X86_BUG_MDS)) 1626 data |= ARCH_CAP_MDS_NO; 1627 1628 if (!boot_cpu_has(X86_FEATURE_RTM)) { 1629 /* 1630 * If RTM=0 because the kernel has disabled TSX, the host might 1631 * have TAA_NO or TSX_CTRL. Clear TAA_NO (the guest sees RTM=0 1632 * and therefore knows that there cannot be TAA) but keep 1633 * TSX_CTRL: some buggy userspaces leave it set on tsx=on hosts, 1634 * and we want to allow migrating those guests to tsx=off hosts. 1635 */ 1636 data &= ~ARCH_CAP_TAA_NO; 1637 } else if (!boot_cpu_has_bug(X86_BUG_TAA)) { 1638 data |= ARCH_CAP_TAA_NO; 1639 } else { 1640 /* 1641 * Nothing to do here; we emulate TSX_CTRL if present on the 1642 * host so the guest can choose between disabling TSX or 1643 * using VERW to clear CPU buffers. 1644 */ 1645 } 1646 1647 return data; 1648 } 1649 1650 static int kvm_get_msr_feature(struct kvm_msr_entry *msr) 1651 { 1652 switch (msr->index) { 1653 case MSR_IA32_ARCH_CAPABILITIES: 1654 msr->data = kvm_get_arch_capabilities(); 1655 break; 1656 case MSR_IA32_PERF_CAPABILITIES: 1657 msr->data = kvm_caps.supported_perf_cap; 1658 break; 1659 case MSR_IA32_UCODE_REV: 1660 rdmsrl_safe(msr->index, &msr->data); 1661 break; 1662 default: 1663 return static_call(kvm_x86_get_msr_feature)(msr); 1664 } 1665 return 0; 1666 } 1667 1668 static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data) 1669 { 1670 struct kvm_msr_entry msr; 1671 int r; 1672 1673 msr.index = index; 1674 r = kvm_get_msr_feature(&msr); 1675 1676 if (r == KVM_MSR_RET_INVALID) { 1677 /* Unconditionally clear the output for simplicity */ 1678 *data = 0; 1679 if (kvm_msr_ignored_check(index, 0, false)) 1680 r = 0; 1681 } 1682 1683 if (r) 1684 return r; 1685 1686 *data = msr.data; 1687 1688 return 0; 1689 } 1690 1691 static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) 1692 { 1693 if (efer & EFER_FFXSR && !guest_cpuid_has(vcpu, X86_FEATURE_FXSR_OPT)) 1694 return false; 1695 1696 if (efer & EFER_SVME && !guest_cpuid_has(vcpu, X86_FEATURE_SVM)) 1697 return false; 1698 1699 if (efer & (EFER_LME | EFER_LMA) && 1700 !guest_cpuid_has(vcpu, X86_FEATURE_LM)) 1701 return false; 1702 1703 if (efer & EFER_NX && !guest_cpuid_has(vcpu, X86_FEATURE_NX)) 1704 return false; 1705 1706 return true; 1707 1708 } 1709 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) 1710 { 1711 if (efer & efer_reserved_bits) 1712 return false; 1713 1714 return __kvm_valid_efer(vcpu, efer); 1715 } 1716 EXPORT_SYMBOL_GPL(kvm_valid_efer); 1717 1718 static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 1719 { 1720 u64 old_efer = vcpu->arch.efer; 1721 u64 efer = msr_info->data; 1722 int r; 1723 1724 if (efer & efer_reserved_bits) 1725 return 1; 1726 1727 if (!msr_info->host_initiated) { 1728 if (!__kvm_valid_efer(vcpu, efer)) 1729 return 1; 1730 1731 if (is_paging(vcpu) && 1732 (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) 1733 return 1; 1734 } 1735 1736 efer &= ~EFER_LMA; 1737 efer |= vcpu->arch.efer & EFER_LMA; 1738 1739 r = static_call(kvm_x86_set_efer)(vcpu, efer); 1740 if (r) { 1741 WARN_ON(r > 0); 1742 return r; 1743 } 1744 1745 if ((efer ^ old_efer) & KVM_MMU_EFER_ROLE_BITS) 1746 kvm_mmu_reset_context(vcpu); 1747 1748 return 0; 1749 } 1750 1751 void kvm_enable_efer_bits(u64 mask) 1752 { 1753 efer_reserved_bits &= ~mask; 1754 } 1755 EXPORT_SYMBOL_GPL(kvm_enable_efer_bits); 1756 1757 bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type) 1758 { 1759 struct kvm_x86_msr_filter *msr_filter; 1760 struct msr_bitmap_range *ranges; 1761 struct kvm *kvm = vcpu->kvm; 1762 bool allowed; 1763 int idx; 1764 u32 i; 1765 1766 /* x2APIC MSRs do not support filtering. */ 1767 if (index >= 0x800 && index <= 0x8ff) 1768 return true; 1769 1770 idx = srcu_read_lock(&kvm->srcu); 1771 1772 msr_filter = srcu_dereference(kvm->arch.msr_filter, &kvm->srcu); 1773 if (!msr_filter) { 1774 allowed = true; 1775 goto out; 1776 } 1777 1778 allowed = msr_filter->default_allow; 1779 ranges = msr_filter->ranges; 1780 1781 for (i = 0; i < msr_filter->count; i++) { 1782 u32 start = ranges[i].base; 1783 u32 end = start + ranges[i].nmsrs; 1784 u32 flags = ranges[i].flags; 1785 unsigned long *bitmap = ranges[i].bitmap; 1786 1787 if ((index >= start) && (index < end) && (flags & type)) { 1788 allowed = !!test_bit(index - start, bitmap); 1789 break; 1790 } 1791 } 1792 1793 out: 1794 srcu_read_unlock(&kvm->srcu, idx); 1795 1796 return allowed; 1797 } 1798 EXPORT_SYMBOL_GPL(kvm_msr_allowed); 1799 1800 /* 1801 * Write @data into the MSR specified by @index. Select MSR specific fault 1802 * checks are bypassed if @host_initiated is %true. 1803 * Returns 0 on success, non-0 otherwise. 1804 * Assumes vcpu_load() was already called. 1805 */ 1806 static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data, 1807 bool host_initiated) 1808 { 1809 struct msr_data msr; 1810 1811 switch (index) { 1812 case MSR_FS_BASE: 1813 case MSR_GS_BASE: 1814 case MSR_KERNEL_GS_BASE: 1815 case MSR_CSTAR: 1816 case MSR_LSTAR: 1817 if (is_noncanonical_address(data, vcpu)) 1818 return 1; 1819 break; 1820 case MSR_IA32_SYSENTER_EIP: 1821 case MSR_IA32_SYSENTER_ESP: 1822 /* 1823 * IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if 1824 * non-canonical address is written on Intel but not on 1825 * AMD (which ignores the top 32-bits, because it does 1826 * not implement 64-bit SYSENTER). 1827 * 1828 * 64-bit code should hence be able to write a non-canonical 1829 * value on AMD. Making the address canonical ensures that 1830 * vmentry does not fail on Intel after writing a non-canonical 1831 * value, and that something deterministic happens if the guest 1832 * invokes 64-bit SYSENTER. 1833 */ 1834 data = __canonical_address(data, vcpu_virt_addr_bits(vcpu)); 1835 break; 1836 case MSR_TSC_AUX: 1837 if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX)) 1838 return 1; 1839 1840 if (!host_initiated && 1841 !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) && 1842 !guest_cpuid_has(vcpu, X86_FEATURE_RDPID)) 1843 return 1; 1844 1845 /* 1846 * Per Intel's SDM, bits 63:32 are reserved, but AMD's APM has 1847 * incomplete and conflicting architectural behavior. Current 1848 * AMD CPUs completely ignore bits 63:32, i.e. they aren't 1849 * reserved and always read as zeros. Enforce Intel's reserved 1850 * bits check if and only if the guest CPU is Intel, and clear 1851 * the bits in all other cases. This ensures cross-vendor 1852 * migration will provide consistent behavior for the guest. 1853 */ 1854 if (guest_cpuid_is_intel(vcpu) && (data >> 32) != 0) 1855 return 1; 1856 1857 data = (u32)data; 1858 break; 1859 } 1860 1861 msr.data = data; 1862 msr.index = index; 1863 msr.host_initiated = host_initiated; 1864 1865 return static_call(kvm_x86_set_msr)(vcpu, &msr); 1866 } 1867 1868 static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu, 1869 u32 index, u64 data, bool host_initiated) 1870 { 1871 int ret = __kvm_set_msr(vcpu, index, data, host_initiated); 1872 1873 if (ret == KVM_MSR_RET_INVALID) 1874 if (kvm_msr_ignored_check(index, data, true)) 1875 ret = 0; 1876 1877 return ret; 1878 } 1879 1880 /* 1881 * Read the MSR specified by @index into @data. Select MSR specific fault 1882 * checks are bypassed if @host_initiated is %true. 1883 * Returns 0 on success, non-0 otherwise. 1884 * Assumes vcpu_load() was already called. 1885 */ 1886 int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, 1887 bool host_initiated) 1888 { 1889 struct msr_data msr; 1890 int ret; 1891 1892 switch (index) { 1893 case MSR_TSC_AUX: 1894 if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX)) 1895 return 1; 1896 1897 if (!host_initiated && 1898 !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) && 1899 !guest_cpuid_has(vcpu, X86_FEATURE_RDPID)) 1900 return 1; 1901 break; 1902 } 1903 1904 msr.index = index; 1905 msr.host_initiated = host_initiated; 1906 1907 ret = static_call(kvm_x86_get_msr)(vcpu, &msr); 1908 if (!ret) 1909 *data = msr.data; 1910 return ret; 1911 } 1912 1913 static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu, 1914 u32 index, u64 *data, bool host_initiated) 1915 { 1916 int ret = __kvm_get_msr(vcpu, index, data, host_initiated); 1917 1918 if (ret == KVM_MSR_RET_INVALID) { 1919 /* Unconditionally clear *data for simplicity */ 1920 *data = 0; 1921 if (kvm_msr_ignored_check(index, 0, false)) 1922 ret = 0; 1923 } 1924 1925 return ret; 1926 } 1927 1928 static int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data) 1929 { 1930 if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_READ)) 1931 return KVM_MSR_RET_FILTERED; 1932 return kvm_get_msr_ignored_check(vcpu, index, data, false); 1933 } 1934 1935 static int kvm_set_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 data) 1936 { 1937 if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_WRITE)) 1938 return KVM_MSR_RET_FILTERED; 1939 return kvm_set_msr_ignored_check(vcpu, index, data, false); 1940 } 1941 1942 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data) 1943 { 1944 return kvm_get_msr_ignored_check(vcpu, index, data, false); 1945 } 1946 EXPORT_SYMBOL_GPL(kvm_get_msr); 1947 1948 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data) 1949 { 1950 return kvm_set_msr_ignored_check(vcpu, index, data, false); 1951 } 1952 EXPORT_SYMBOL_GPL(kvm_set_msr); 1953 1954 static void complete_userspace_rdmsr(struct kvm_vcpu *vcpu) 1955 { 1956 if (!vcpu->run->msr.error) { 1957 kvm_rax_write(vcpu, (u32)vcpu->run->msr.data); 1958 kvm_rdx_write(vcpu, vcpu->run->msr.data >> 32); 1959 } 1960 } 1961 1962 static int complete_emulated_msr_access(struct kvm_vcpu *vcpu) 1963 { 1964 return complete_emulated_insn_gp(vcpu, vcpu->run->msr.error); 1965 } 1966 1967 static int complete_emulated_rdmsr(struct kvm_vcpu *vcpu) 1968 { 1969 complete_userspace_rdmsr(vcpu); 1970 return complete_emulated_msr_access(vcpu); 1971 } 1972 1973 static int complete_fast_msr_access(struct kvm_vcpu *vcpu) 1974 { 1975 return static_call(kvm_x86_complete_emulated_msr)(vcpu, vcpu->run->msr.error); 1976 } 1977 1978 static int complete_fast_rdmsr(struct kvm_vcpu *vcpu) 1979 { 1980 complete_userspace_rdmsr(vcpu); 1981 return complete_fast_msr_access(vcpu); 1982 } 1983 1984 static u64 kvm_msr_reason(int r) 1985 { 1986 switch (r) { 1987 case KVM_MSR_RET_INVALID: 1988 return KVM_MSR_EXIT_REASON_UNKNOWN; 1989 case KVM_MSR_RET_FILTERED: 1990 return KVM_MSR_EXIT_REASON_FILTER; 1991 default: 1992 return KVM_MSR_EXIT_REASON_INVAL; 1993 } 1994 } 1995 1996 static int kvm_msr_user_space(struct kvm_vcpu *vcpu, u32 index, 1997 u32 exit_reason, u64 data, 1998 int (*completion)(struct kvm_vcpu *vcpu), 1999 int r) 2000 { 2001 u64 msr_reason = kvm_msr_reason(r); 2002 2003 /* Check if the user wanted to know about this MSR fault */ 2004 if (!(vcpu->kvm->arch.user_space_msr_mask & msr_reason)) 2005 return 0; 2006 2007 vcpu->run->exit_reason = exit_reason; 2008 vcpu->run->msr.error = 0; 2009 memset(vcpu->run->msr.pad, 0, sizeof(vcpu->run->msr.pad)); 2010 vcpu->run->msr.reason = msr_reason; 2011 vcpu->run->msr.index = index; 2012 vcpu->run->msr.data = data; 2013 vcpu->arch.complete_userspace_io = completion; 2014 2015 return 1; 2016 } 2017 2018 int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu) 2019 { 2020 u32 ecx = kvm_rcx_read(vcpu); 2021 u64 data; 2022 int r; 2023 2024 r = kvm_get_msr_with_filter(vcpu, ecx, &data); 2025 2026 if (!r) { 2027 trace_kvm_msr_read(ecx, data); 2028 2029 kvm_rax_write(vcpu, data & -1u); 2030 kvm_rdx_write(vcpu, (data >> 32) & -1u); 2031 } else { 2032 /* MSR read failed? See if we should ask user space */ 2033 if (kvm_msr_user_space(vcpu, ecx, KVM_EXIT_X86_RDMSR, 0, 2034 complete_fast_rdmsr, r)) 2035 return 0; 2036 trace_kvm_msr_read_ex(ecx); 2037 } 2038 2039 return static_call(kvm_x86_complete_emulated_msr)(vcpu, r); 2040 } 2041 EXPORT_SYMBOL_GPL(kvm_emulate_rdmsr); 2042 2043 int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu) 2044 { 2045 u32 ecx = kvm_rcx_read(vcpu); 2046 u64 data = kvm_read_edx_eax(vcpu); 2047 int r; 2048 2049 r = kvm_set_msr_with_filter(vcpu, ecx, data); 2050 2051 if (!r) { 2052 trace_kvm_msr_write(ecx, data); 2053 } else { 2054 /* MSR write failed? See if we should ask user space */ 2055 if (kvm_msr_user_space(vcpu, ecx, KVM_EXIT_X86_WRMSR, data, 2056 complete_fast_msr_access, r)) 2057 return 0; 2058 /* Signal all other negative errors to userspace */ 2059 if (r < 0) 2060 return r; 2061 trace_kvm_msr_write_ex(ecx, data); 2062 } 2063 2064 return static_call(kvm_x86_complete_emulated_msr)(vcpu, r); 2065 } 2066 EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr); 2067 2068 int kvm_emulate_as_nop(struct kvm_vcpu *vcpu) 2069 { 2070 return kvm_skip_emulated_instruction(vcpu); 2071 } 2072 EXPORT_SYMBOL_GPL(kvm_emulate_as_nop); 2073 2074 int kvm_emulate_invd(struct kvm_vcpu *vcpu) 2075 { 2076 /* Treat an INVD instruction as a NOP and just skip it. */ 2077 return kvm_emulate_as_nop(vcpu); 2078 } 2079 EXPORT_SYMBOL_GPL(kvm_emulate_invd); 2080 2081 int kvm_handle_invalid_op(struct kvm_vcpu *vcpu) 2082 { 2083 kvm_queue_exception(vcpu, UD_VECTOR); 2084 return 1; 2085 } 2086 EXPORT_SYMBOL_GPL(kvm_handle_invalid_op); 2087 2088 2089 static int kvm_emulate_monitor_mwait(struct kvm_vcpu *vcpu, const char *insn) 2090 { 2091 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS) && 2092 !guest_cpuid_has(vcpu, X86_FEATURE_MWAIT)) 2093 return kvm_handle_invalid_op(vcpu); 2094 2095 pr_warn_once("kvm: %s instruction emulated as NOP!\n", insn); 2096 return kvm_emulate_as_nop(vcpu); 2097 } 2098 int kvm_emulate_mwait(struct kvm_vcpu *vcpu) 2099 { 2100 return kvm_emulate_monitor_mwait(vcpu, "MWAIT"); 2101 } 2102 EXPORT_SYMBOL_GPL(kvm_emulate_mwait); 2103 2104 int kvm_emulate_monitor(struct kvm_vcpu *vcpu) 2105 { 2106 return kvm_emulate_monitor_mwait(vcpu, "MONITOR"); 2107 } 2108 EXPORT_SYMBOL_GPL(kvm_emulate_monitor); 2109 2110 static inline bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu) 2111 { 2112 xfer_to_guest_mode_prepare(); 2113 return vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu) || 2114 xfer_to_guest_mode_work_pending(); 2115 } 2116 2117 /* 2118 * The fast path for frequent and performance sensitive wrmsr emulation, 2119 * i.e. the sending of IPI, sending IPI early in the VM-Exit flow reduces 2120 * the latency of virtual IPI by avoiding the expensive bits of transitioning 2121 * from guest to host, e.g. reacquiring KVM's SRCU lock. In contrast to the 2122 * other cases which must be called after interrupts are enabled on the host. 2123 */ 2124 static int handle_fastpath_set_x2apic_icr_irqoff(struct kvm_vcpu *vcpu, u64 data) 2125 { 2126 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(vcpu->arch.apic)) 2127 return 1; 2128 2129 if (((data & APIC_SHORT_MASK) == APIC_DEST_NOSHORT) && 2130 ((data & APIC_DEST_MASK) == APIC_DEST_PHYSICAL) && 2131 ((data & APIC_MODE_MASK) == APIC_DM_FIXED) && 2132 ((u32)(data >> 32) != X2APIC_BROADCAST)) 2133 return kvm_x2apic_icr_write(vcpu->arch.apic, data); 2134 2135 return 1; 2136 } 2137 2138 static int handle_fastpath_set_tscdeadline(struct kvm_vcpu *vcpu, u64 data) 2139 { 2140 if (!kvm_can_use_hv_timer(vcpu)) 2141 return 1; 2142 2143 kvm_set_lapic_tscdeadline_msr(vcpu, data); 2144 return 0; 2145 } 2146 2147 fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu) 2148 { 2149 u32 msr = kvm_rcx_read(vcpu); 2150 u64 data; 2151 fastpath_t ret = EXIT_FASTPATH_NONE; 2152 2153 switch (msr) { 2154 case APIC_BASE_MSR + (APIC_ICR >> 4): 2155 data = kvm_read_edx_eax(vcpu); 2156 if (!handle_fastpath_set_x2apic_icr_irqoff(vcpu, data)) { 2157 kvm_skip_emulated_instruction(vcpu); 2158 ret = EXIT_FASTPATH_EXIT_HANDLED; 2159 } 2160 break; 2161 case MSR_IA32_TSC_DEADLINE: 2162 data = kvm_read_edx_eax(vcpu); 2163 if (!handle_fastpath_set_tscdeadline(vcpu, data)) { 2164 kvm_skip_emulated_instruction(vcpu); 2165 ret = EXIT_FASTPATH_REENTER_GUEST; 2166 } 2167 break; 2168 default: 2169 break; 2170 } 2171 2172 if (ret != EXIT_FASTPATH_NONE) 2173 trace_kvm_msr_write(msr, data); 2174 2175 return ret; 2176 } 2177 EXPORT_SYMBOL_GPL(handle_fastpath_set_msr_irqoff); 2178 2179 /* 2180 * Adapt set_msr() to msr_io()'s calling convention 2181 */ 2182 static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) 2183 { 2184 return kvm_get_msr_ignored_check(vcpu, index, data, true); 2185 } 2186 2187 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) 2188 { 2189 return kvm_set_msr_ignored_check(vcpu, index, *data, true); 2190 } 2191 2192 #ifdef CONFIG_X86_64 2193 struct pvclock_clock { 2194 int vclock_mode; 2195 u64 cycle_last; 2196 u64 mask; 2197 u32 mult; 2198 u32 shift; 2199 u64 base_cycles; 2200 u64 offset; 2201 }; 2202 2203 struct pvclock_gtod_data { 2204 seqcount_t seq; 2205 2206 struct pvclock_clock clock; /* extract of a clocksource struct */ 2207 struct pvclock_clock raw_clock; /* extract of a clocksource struct */ 2208 2209 ktime_t offs_boot; 2210 u64 wall_time_sec; 2211 }; 2212 2213 static struct pvclock_gtod_data pvclock_gtod_data; 2214 2215 static void update_pvclock_gtod(struct timekeeper *tk) 2216 { 2217 struct pvclock_gtod_data *vdata = &pvclock_gtod_data; 2218 2219 write_seqcount_begin(&vdata->seq); 2220 2221 /* copy pvclock gtod data */ 2222 vdata->clock.vclock_mode = tk->tkr_mono.clock->vdso_clock_mode; 2223 vdata->clock.cycle_last = tk->tkr_mono.cycle_last; 2224 vdata->clock.mask = tk->tkr_mono.mask; 2225 vdata->clock.mult = tk->tkr_mono.mult; 2226 vdata->clock.shift = tk->tkr_mono.shift; 2227 vdata->clock.base_cycles = tk->tkr_mono.xtime_nsec; 2228 vdata->clock.offset = tk->tkr_mono.base; 2229 2230 vdata->raw_clock.vclock_mode = tk->tkr_raw.clock->vdso_clock_mode; 2231 vdata->raw_clock.cycle_last = tk->tkr_raw.cycle_last; 2232 vdata->raw_clock.mask = tk->tkr_raw.mask; 2233 vdata->raw_clock.mult = tk->tkr_raw.mult; 2234 vdata->raw_clock.shift = tk->tkr_raw.shift; 2235 vdata->raw_clock.base_cycles = tk->tkr_raw.xtime_nsec; 2236 vdata->raw_clock.offset = tk->tkr_raw.base; 2237 2238 vdata->wall_time_sec = tk->xtime_sec; 2239 2240 vdata->offs_boot = tk->offs_boot; 2241 2242 write_seqcount_end(&vdata->seq); 2243 } 2244 2245 static s64 get_kvmclock_base_ns(void) 2246 { 2247 /* Count up from boot time, but with the frequency of the raw clock. */ 2248 return ktime_to_ns(ktime_add(ktime_get_raw(), pvclock_gtod_data.offs_boot)); 2249 } 2250 #else 2251 static s64 get_kvmclock_base_ns(void) 2252 { 2253 /* Master clock not used, so we can just use CLOCK_BOOTTIME. */ 2254 return ktime_get_boottime_ns(); 2255 } 2256 #endif 2257 2258 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock, int sec_hi_ofs) 2259 { 2260 int version; 2261 int r; 2262 struct pvclock_wall_clock wc; 2263 u32 wc_sec_hi; 2264 u64 wall_nsec; 2265 2266 if (!wall_clock) 2267 return; 2268 2269 r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version)); 2270 if (r) 2271 return; 2272 2273 if (version & 1) 2274 ++version; /* first time write, random junk */ 2275 2276 ++version; 2277 2278 if (kvm_write_guest(kvm, wall_clock, &version, sizeof(version))) 2279 return; 2280 2281 /* 2282 * The guest calculates current wall clock time by adding 2283 * system time (updated by kvm_guest_time_update below) to the 2284 * wall clock specified here. We do the reverse here. 2285 */ 2286 wall_nsec = ktime_get_real_ns() - get_kvmclock_ns(kvm); 2287 2288 wc.nsec = do_div(wall_nsec, 1000000000); 2289 wc.sec = (u32)wall_nsec; /* overflow in 2106 guest time */ 2290 wc.version = version; 2291 2292 kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc)); 2293 2294 if (sec_hi_ofs) { 2295 wc_sec_hi = wall_nsec >> 32; 2296 kvm_write_guest(kvm, wall_clock + sec_hi_ofs, 2297 &wc_sec_hi, sizeof(wc_sec_hi)); 2298 } 2299 2300 version++; 2301 kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); 2302 } 2303 2304 static void kvm_write_system_time(struct kvm_vcpu *vcpu, gpa_t system_time, 2305 bool old_msr, bool host_initiated) 2306 { 2307 struct kvm_arch *ka = &vcpu->kvm->arch; 2308 2309 if (vcpu->vcpu_id == 0 && !host_initiated) { 2310 if (ka->boot_vcpu_runs_old_kvmclock != old_msr) 2311 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); 2312 2313 ka->boot_vcpu_runs_old_kvmclock = old_msr; 2314 } 2315 2316 vcpu->arch.time = system_time; 2317 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu); 2318 2319 /* we verify if the enable bit is set... */ 2320 if (system_time & 1) { 2321 kvm_gpc_activate(vcpu->kvm, &vcpu->arch.pv_time, vcpu, 2322 KVM_HOST_USES_PFN, system_time & ~1ULL, 2323 sizeof(struct pvclock_vcpu_time_info)); 2324 } else { 2325 kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.pv_time); 2326 } 2327 2328 return; 2329 } 2330 2331 static uint32_t div_frac(uint32_t dividend, uint32_t divisor) 2332 { 2333 do_shl32_div32(dividend, divisor); 2334 return dividend; 2335 } 2336 2337 static void kvm_get_time_scale(uint64_t scaled_hz, uint64_t base_hz, 2338 s8 *pshift, u32 *pmultiplier) 2339 { 2340 uint64_t scaled64; 2341 int32_t shift = 0; 2342 uint64_t tps64; 2343 uint32_t tps32; 2344 2345 tps64 = base_hz; 2346 scaled64 = scaled_hz; 2347 while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) { 2348 tps64 >>= 1; 2349 shift--; 2350 } 2351 2352 tps32 = (uint32_t)tps64; 2353 while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) { 2354 if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000) 2355 scaled64 >>= 1; 2356 else 2357 tps32 <<= 1; 2358 shift++; 2359 } 2360 2361 *pshift = shift; 2362 *pmultiplier = div_frac(scaled64, tps32); 2363 } 2364 2365 #ifdef CONFIG_X86_64 2366 static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0); 2367 #endif 2368 2369 static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz); 2370 static unsigned long max_tsc_khz; 2371 2372 static u32 adjust_tsc_khz(u32 khz, s32 ppm) 2373 { 2374 u64 v = (u64)khz * (1000000 + ppm); 2375 do_div(v, 1000000); 2376 return v; 2377 } 2378 2379 static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier); 2380 2381 static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale) 2382 { 2383 u64 ratio; 2384 2385 /* Guest TSC same frequency as host TSC? */ 2386 if (!scale) { 2387 kvm_vcpu_write_tsc_multiplier(vcpu, kvm_caps.default_tsc_scaling_ratio); 2388 return 0; 2389 } 2390 2391 /* TSC scaling supported? */ 2392 if (!kvm_caps.has_tsc_control) { 2393 if (user_tsc_khz > tsc_khz) { 2394 vcpu->arch.tsc_catchup = 1; 2395 vcpu->arch.tsc_always_catchup = 1; 2396 return 0; 2397 } else { 2398 pr_warn_ratelimited("user requested TSC rate below hardware speed\n"); 2399 return -1; 2400 } 2401 } 2402 2403 /* TSC scaling required - calculate ratio */ 2404 ratio = mul_u64_u32_div(1ULL << kvm_caps.tsc_scaling_ratio_frac_bits, 2405 user_tsc_khz, tsc_khz); 2406 2407 if (ratio == 0 || ratio >= kvm_caps.max_tsc_scaling_ratio) { 2408 pr_warn_ratelimited("Invalid TSC scaling ratio - virtual-tsc-khz=%u\n", 2409 user_tsc_khz); 2410 return -1; 2411 } 2412 2413 kvm_vcpu_write_tsc_multiplier(vcpu, ratio); 2414 return 0; 2415 } 2416 2417 static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz) 2418 { 2419 u32 thresh_lo, thresh_hi; 2420 int use_scaling = 0; 2421 2422 /* tsc_khz can be zero if TSC calibration fails */ 2423 if (user_tsc_khz == 0) { 2424 /* set tsc_scaling_ratio to a safe value */ 2425 kvm_vcpu_write_tsc_multiplier(vcpu, kvm_caps.default_tsc_scaling_ratio); 2426 return -1; 2427 } 2428 2429 /* Compute a scale to convert nanoseconds in TSC cycles */ 2430 kvm_get_time_scale(user_tsc_khz * 1000LL, NSEC_PER_SEC, 2431 &vcpu->arch.virtual_tsc_shift, 2432 &vcpu->arch.virtual_tsc_mult); 2433 vcpu->arch.virtual_tsc_khz = user_tsc_khz; 2434 2435 /* 2436 * Compute the variation in TSC rate which is acceptable 2437 * within the range of tolerance and decide if the 2438 * rate being applied is within that bounds of the hardware 2439 * rate. If so, no scaling or compensation need be done. 2440 */ 2441 thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm); 2442 thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm); 2443 if (user_tsc_khz < thresh_lo || user_tsc_khz > thresh_hi) { 2444 pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", user_tsc_khz, thresh_lo, thresh_hi); 2445 use_scaling = 1; 2446 } 2447 return set_tsc_khz(vcpu, user_tsc_khz, use_scaling); 2448 } 2449 2450 static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns) 2451 { 2452 u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec, 2453 vcpu->arch.virtual_tsc_mult, 2454 vcpu->arch.virtual_tsc_shift); 2455 tsc += vcpu->arch.this_tsc_write; 2456 return tsc; 2457 } 2458 2459 #ifdef CONFIG_X86_64 2460 static inline int gtod_is_based_on_tsc(int mode) 2461 { 2462 return mode == VDSO_CLOCKMODE_TSC || mode == VDSO_CLOCKMODE_HVCLOCK; 2463 } 2464 #endif 2465 2466 static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu) 2467 { 2468 #ifdef CONFIG_X86_64 2469 bool vcpus_matched; 2470 struct kvm_arch *ka = &vcpu->kvm->arch; 2471 struct pvclock_gtod_data *gtod = &pvclock_gtod_data; 2472 2473 vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == 2474 atomic_read(&vcpu->kvm->online_vcpus)); 2475 2476 /* 2477 * Once the masterclock is enabled, always perform request in 2478 * order to update it. 2479 * 2480 * In order to enable masterclock, the host clocksource must be TSC 2481 * and the vcpus need to have matched TSCs. When that happens, 2482 * perform request to enable masterclock. 2483 */ 2484 if (ka->use_master_clock || 2485 (gtod_is_based_on_tsc(gtod->clock.vclock_mode) && vcpus_matched)) 2486 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); 2487 2488 trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc, 2489 atomic_read(&vcpu->kvm->online_vcpus), 2490 ka->use_master_clock, gtod->clock.vclock_mode); 2491 #endif 2492 } 2493 2494 /* 2495 * Multiply tsc by a fixed point number represented by ratio. 2496 * 2497 * The most significant 64-N bits (mult) of ratio represent the 2498 * integral part of the fixed point number; the remaining N bits 2499 * (frac) represent the fractional part, ie. ratio represents a fixed 2500 * point number (mult + frac * 2^(-N)). 2501 * 2502 * N equals to kvm_caps.tsc_scaling_ratio_frac_bits. 2503 */ 2504 static inline u64 __scale_tsc(u64 ratio, u64 tsc) 2505 { 2506 return mul_u64_u64_shr(tsc, ratio, kvm_caps.tsc_scaling_ratio_frac_bits); 2507 } 2508 2509 u64 kvm_scale_tsc(u64 tsc, u64 ratio) 2510 { 2511 u64 _tsc = tsc; 2512 2513 if (ratio != kvm_caps.default_tsc_scaling_ratio) 2514 _tsc = __scale_tsc(ratio, tsc); 2515 2516 return _tsc; 2517 } 2518 EXPORT_SYMBOL_GPL(kvm_scale_tsc); 2519 2520 static u64 kvm_compute_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) 2521 { 2522 u64 tsc; 2523 2524 tsc = kvm_scale_tsc(rdtsc(), vcpu->arch.l1_tsc_scaling_ratio); 2525 2526 return target_tsc - tsc; 2527 } 2528 2529 u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc) 2530 { 2531 return vcpu->arch.l1_tsc_offset + 2532 kvm_scale_tsc(host_tsc, vcpu->arch.l1_tsc_scaling_ratio); 2533 } 2534 EXPORT_SYMBOL_GPL(kvm_read_l1_tsc); 2535 2536 u64 kvm_calc_nested_tsc_offset(u64 l1_offset, u64 l2_offset, u64 l2_multiplier) 2537 { 2538 u64 nested_offset; 2539 2540 if (l2_multiplier == kvm_caps.default_tsc_scaling_ratio) 2541 nested_offset = l1_offset; 2542 else 2543 nested_offset = mul_s64_u64_shr((s64) l1_offset, l2_multiplier, 2544 kvm_caps.tsc_scaling_ratio_frac_bits); 2545 2546 nested_offset += l2_offset; 2547 return nested_offset; 2548 } 2549 EXPORT_SYMBOL_GPL(kvm_calc_nested_tsc_offset); 2550 2551 u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier) 2552 { 2553 if (l2_multiplier != kvm_caps.default_tsc_scaling_ratio) 2554 return mul_u64_u64_shr(l1_multiplier, l2_multiplier, 2555 kvm_caps.tsc_scaling_ratio_frac_bits); 2556 2557 return l1_multiplier; 2558 } 2559 EXPORT_SYMBOL_GPL(kvm_calc_nested_tsc_multiplier); 2560 2561 static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 l1_offset) 2562 { 2563 trace_kvm_write_tsc_offset(vcpu->vcpu_id, 2564 vcpu->arch.l1_tsc_offset, 2565 l1_offset); 2566 2567 vcpu->arch.l1_tsc_offset = l1_offset; 2568 2569 /* 2570 * If we are here because L1 chose not to trap WRMSR to TSC then 2571 * according to the spec this should set L1's TSC (as opposed to 2572 * setting L1's offset for L2). 2573 */ 2574 if (is_guest_mode(vcpu)) 2575 vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset( 2576 l1_offset, 2577 static_call(kvm_x86_get_l2_tsc_offset)(vcpu), 2578 static_call(kvm_x86_get_l2_tsc_multiplier)(vcpu)); 2579 else 2580 vcpu->arch.tsc_offset = l1_offset; 2581 2582 static_call(kvm_x86_write_tsc_offset)(vcpu, vcpu->arch.tsc_offset); 2583 } 2584 2585 static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier) 2586 { 2587 vcpu->arch.l1_tsc_scaling_ratio = l1_multiplier; 2588 2589 /* Userspace is changing the multiplier while L2 is active */ 2590 if (is_guest_mode(vcpu)) 2591 vcpu->arch.tsc_scaling_ratio = kvm_calc_nested_tsc_multiplier( 2592 l1_multiplier, 2593 static_call(kvm_x86_get_l2_tsc_multiplier)(vcpu)); 2594 else 2595 vcpu->arch.tsc_scaling_ratio = l1_multiplier; 2596 2597 if (kvm_caps.has_tsc_control) 2598 static_call(kvm_x86_write_tsc_multiplier)( 2599 vcpu, vcpu->arch.tsc_scaling_ratio); 2600 } 2601 2602 static inline bool kvm_check_tsc_unstable(void) 2603 { 2604 #ifdef CONFIG_X86_64 2605 /* 2606 * TSC is marked unstable when we're running on Hyper-V, 2607 * 'TSC page' clocksource is good. 2608 */ 2609 if (pvclock_gtod_data.clock.vclock_mode == VDSO_CLOCKMODE_HVCLOCK) 2610 return false; 2611 #endif 2612 return check_tsc_unstable(); 2613 } 2614 2615 /* 2616 * Infers attempts to synchronize the guest's tsc from host writes. Sets the 2617 * offset for the vcpu and tracks the TSC matching generation that the vcpu 2618 * participates in. 2619 */ 2620 static void __kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 offset, u64 tsc, 2621 u64 ns, bool matched) 2622 { 2623 struct kvm *kvm = vcpu->kvm; 2624 2625 lockdep_assert_held(&kvm->arch.tsc_write_lock); 2626 2627 /* 2628 * We also track th most recent recorded KHZ, write and time to 2629 * allow the matching interval to be extended at each write. 2630 */ 2631 kvm->arch.last_tsc_nsec = ns; 2632 kvm->arch.last_tsc_write = tsc; 2633 kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; 2634 kvm->arch.last_tsc_offset = offset; 2635 2636 vcpu->arch.last_guest_tsc = tsc; 2637 2638 kvm_vcpu_write_tsc_offset(vcpu, offset); 2639 2640 if (!matched) { 2641 /* 2642 * We split periods of matched TSC writes into generations. 2643 * For each generation, we track the original measured 2644 * nanosecond time, offset, and write, so if TSCs are in 2645 * sync, we can match exact offset, and if not, we can match 2646 * exact software computation in compute_guest_tsc() 2647 * 2648 * These values are tracked in kvm->arch.cur_xxx variables. 2649 */ 2650 kvm->arch.cur_tsc_generation++; 2651 kvm->arch.cur_tsc_nsec = ns; 2652 kvm->arch.cur_tsc_write = tsc; 2653 kvm->arch.cur_tsc_offset = offset; 2654 kvm->arch.nr_vcpus_matched_tsc = 0; 2655 } else if (vcpu->arch.this_tsc_generation != kvm->arch.cur_tsc_generation) { 2656 kvm->arch.nr_vcpus_matched_tsc++; 2657 } 2658 2659 /* Keep track of which generation this VCPU has synchronized to */ 2660 vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation; 2661 vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; 2662 vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; 2663 2664 kvm_track_tsc_matching(vcpu); 2665 } 2666 2667 static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data) 2668 { 2669 struct kvm *kvm = vcpu->kvm; 2670 u64 offset, ns, elapsed; 2671 unsigned long flags; 2672 bool matched = false; 2673 bool synchronizing = false; 2674 2675 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); 2676 offset = kvm_compute_l1_tsc_offset(vcpu, data); 2677 ns = get_kvmclock_base_ns(); 2678 elapsed = ns - kvm->arch.last_tsc_nsec; 2679 2680 if (vcpu->arch.virtual_tsc_khz) { 2681 if (data == 0) { 2682 /* 2683 * detection of vcpu initialization -- need to sync 2684 * with other vCPUs. This particularly helps to keep 2685 * kvm_clock stable after CPU hotplug 2686 */ 2687 synchronizing = true; 2688 } else { 2689 u64 tsc_exp = kvm->arch.last_tsc_write + 2690 nsec_to_cycles(vcpu, elapsed); 2691 u64 tsc_hz = vcpu->arch.virtual_tsc_khz * 1000LL; 2692 /* 2693 * Special case: TSC write with a small delta (1 second) 2694 * of virtual cycle time against real time is 2695 * interpreted as an attempt to synchronize the CPU. 2696 */ 2697 synchronizing = data < tsc_exp + tsc_hz && 2698 data + tsc_hz > tsc_exp; 2699 } 2700 } 2701 2702 /* 2703 * For a reliable TSC, we can match TSC offsets, and for an unstable 2704 * TSC, we add elapsed time in this computation. We could let the 2705 * compensation code attempt to catch up if we fall behind, but 2706 * it's better to try to match offsets from the beginning. 2707 */ 2708 if (synchronizing && 2709 vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) { 2710 if (!kvm_check_tsc_unstable()) { 2711 offset = kvm->arch.cur_tsc_offset; 2712 } else { 2713 u64 delta = nsec_to_cycles(vcpu, elapsed); 2714 data += delta; 2715 offset = kvm_compute_l1_tsc_offset(vcpu, data); 2716 } 2717 matched = true; 2718 } 2719 2720 __kvm_synchronize_tsc(vcpu, offset, data, ns, matched); 2721 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); 2722 } 2723 2724 static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, 2725 s64 adjustment) 2726 { 2727 u64 tsc_offset = vcpu->arch.l1_tsc_offset; 2728 kvm_vcpu_write_tsc_offset(vcpu, tsc_offset + adjustment); 2729 } 2730 2731 static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment) 2732 { 2733 if (vcpu->arch.l1_tsc_scaling_ratio != kvm_caps.default_tsc_scaling_ratio) 2734 WARN_ON(adjustment < 0); 2735 adjustment = kvm_scale_tsc((u64) adjustment, 2736 vcpu->arch.l1_tsc_scaling_ratio); 2737 adjust_tsc_offset_guest(vcpu, adjustment); 2738 } 2739 2740 #ifdef CONFIG_X86_64 2741 2742 static u64 read_tsc(void) 2743 { 2744 u64 ret = (u64)rdtsc_ordered(); 2745 u64 last = pvclock_gtod_data.clock.cycle_last; 2746 2747 if (likely(ret >= last)) 2748 return ret; 2749 2750 /* 2751 * GCC likes to generate cmov here, but this branch is extremely 2752 * predictable (it's just a function of time and the likely is 2753 * very likely) and there's a data dependence, so force GCC 2754 * to generate a branch instead. I don't barrier() because 2755 * we don't actually need a barrier, and if this function 2756 * ever gets inlined it will generate worse code. 2757 */ 2758 asm volatile (""); 2759 return last; 2760 } 2761 2762 static inline u64 vgettsc(struct pvclock_clock *clock, u64 *tsc_timestamp, 2763 int *mode) 2764 { 2765 long v; 2766 u64 tsc_pg_val; 2767 2768 switch (clock->vclock_mode) { 2769 case VDSO_CLOCKMODE_HVCLOCK: 2770 tsc_pg_val = hv_read_tsc_page_tsc(hv_get_tsc_page(), 2771 tsc_timestamp); 2772 if (tsc_pg_val != U64_MAX) { 2773 /* TSC page valid */ 2774 *mode = VDSO_CLOCKMODE_HVCLOCK; 2775 v = (tsc_pg_val - clock->cycle_last) & 2776 clock->mask; 2777 } else { 2778 /* TSC page invalid */ 2779 *mode = VDSO_CLOCKMODE_NONE; 2780 } 2781 break; 2782 case VDSO_CLOCKMODE_TSC: 2783 *mode = VDSO_CLOCKMODE_TSC; 2784 *tsc_timestamp = read_tsc(); 2785 v = (*tsc_timestamp - clock->cycle_last) & 2786 clock->mask; 2787 break; 2788 default: 2789 *mode = VDSO_CLOCKMODE_NONE; 2790 } 2791 2792 if (*mode == VDSO_CLOCKMODE_NONE) 2793 *tsc_timestamp = v = 0; 2794 2795 return v * clock->mult; 2796 } 2797 2798 static int do_monotonic_raw(s64 *t, u64 *tsc_timestamp) 2799 { 2800 struct pvclock_gtod_data *gtod = &pvclock_gtod_data; 2801 unsigned long seq; 2802 int mode; 2803 u64 ns; 2804 2805 do { 2806 seq = read_seqcount_begin(>od->seq); 2807 ns = gtod->raw_clock.base_cycles; 2808 ns += vgettsc(>od->raw_clock, tsc_timestamp, &mode); 2809 ns >>= gtod->raw_clock.shift; 2810 ns += ktime_to_ns(ktime_add(gtod->raw_clock.offset, gtod->offs_boot)); 2811 } while (unlikely(read_seqcount_retry(>od->seq, seq))); 2812 *t = ns; 2813 2814 return mode; 2815 } 2816 2817 static int do_realtime(struct timespec64 *ts, u64 *tsc_timestamp) 2818 { 2819 struct pvclock_gtod_data *gtod = &pvclock_gtod_data; 2820 unsigned long seq; 2821 int mode; 2822 u64 ns; 2823 2824 do { 2825 seq = read_seqcount_begin(>od->seq); 2826 ts->tv_sec = gtod->wall_time_sec; 2827 ns = gtod->clock.base_cycles; 2828 ns += vgettsc(>od->clock, tsc_timestamp, &mode); 2829 ns >>= gtod->clock.shift; 2830 } while (unlikely(read_seqcount_retry(>od->seq, seq))); 2831 2832 ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); 2833 ts->tv_nsec = ns; 2834 2835 return mode; 2836 } 2837 2838 /* returns true if host is using TSC based clocksource */ 2839 static bool kvm_get_time_and_clockread(s64 *kernel_ns, u64 *tsc_timestamp) 2840 { 2841 /* checked again under seqlock below */ 2842 if (!gtod_is_based_on_tsc(pvclock_gtod_data.clock.vclock_mode)) 2843 return false; 2844 2845 return gtod_is_based_on_tsc(do_monotonic_raw(kernel_ns, 2846 tsc_timestamp)); 2847 } 2848 2849 /* returns true if host is using TSC based clocksource */ 2850 static bool kvm_get_walltime_and_clockread(struct timespec64 *ts, 2851 u64 *tsc_timestamp) 2852 { 2853 /* checked again under seqlock below */ 2854 if (!gtod_is_based_on_tsc(pvclock_gtod_data.clock.vclock_mode)) 2855 return false; 2856 2857 return gtod_is_based_on_tsc(do_realtime(ts, tsc_timestamp)); 2858 } 2859 #endif 2860 2861 /* 2862 * 2863 * Assuming a stable TSC across physical CPUS, and a stable TSC 2864 * across virtual CPUs, the following condition is possible. 2865 * Each numbered line represents an event visible to both 2866 * CPUs at the next numbered event. 2867 * 2868 * "timespecX" represents host monotonic time. "tscX" represents 2869 * RDTSC value. 2870 * 2871 * VCPU0 on CPU0 | VCPU1 on CPU1 2872 * 2873 * 1. read timespec0,tsc0 2874 * 2. | timespec1 = timespec0 + N 2875 * | tsc1 = tsc0 + M 2876 * 3. transition to guest | transition to guest 2877 * 4. ret0 = timespec0 + (rdtsc - tsc0) | 2878 * 5. | ret1 = timespec1 + (rdtsc - tsc1) 2879 * | ret1 = timespec0 + N + (rdtsc - (tsc0 + M)) 2880 * 2881 * Since ret0 update is visible to VCPU1 at time 5, to obey monotonicity: 2882 * 2883 * - ret0 < ret1 2884 * - timespec0 + (rdtsc - tsc0) < timespec0 + N + (rdtsc - (tsc0 + M)) 2885 * ... 2886 * - 0 < N - M => M < N 2887 * 2888 * That is, when timespec0 != timespec1, M < N. Unfortunately that is not 2889 * always the case (the difference between two distinct xtime instances 2890 * might be smaller then the difference between corresponding TSC reads, 2891 * when updating guest vcpus pvclock areas). 2892 * 2893 * To avoid that problem, do not allow visibility of distinct 2894 * system_timestamp/tsc_timestamp values simultaneously: use a master 2895 * copy of host monotonic time values. Update that master copy 2896 * in lockstep. 2897 * 2898 * Rely on synchronization of host TSCs and guest TSCs for monotonicity. 2899 * 2900 */ 2901 2902 static void pvclock_update_vm_gtod_copy(struct kvm *kvm) 2903 { 2904 #ifdef CONFIG_X86_64 2905 struct kvm_arch *ka = &kvm->arch; 2906 int vclock_mode; 2907 bool host_tsc_clocksource, vcpus_matched; 2908 2909 lockdep_assert_held(&kvm->arch.tsc_write_lock); 2910 vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == 2911 atomic_read(&kvm->online_vcpus)); 2912 2913 /* 2914 * If the host uses TSC clock, then passthrough TSC as stable 2915 * to the guest. 2916 */ 2917 host_tsc_clocksource = kvm_get_time_and_clockread( 2918 &ka->master_kernel_ns, 2919 &ka->master_cycle_now); 2920 2921 ka->use_master_clock = host_tsc_clocksource && vcpus_matched 2922 && !ka->backwards_tsc_observed 2923 && !ka->boot_vcpu_runs_old_kvmclock; 2924 2925 if (ka->use_master_clock) 2926 atomic_set(&kvm_guest_has_master_clock, 1); 2927 2928 vclock_mode = pvclock_gtod_data.clock.vclock_mode; 2929 trace_kvm_update_master_clock(ka->use_master_clock, vclock_mode, 2930 vcpus_matched); 2931 #endif 2932 } 2933 2934 static void kvm_make_mclock_inprogress_request(struct kvm *kvm) 2935 { 2936 kvm_make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS); 2937 } 2938 2939 static void __kvm_start_pvclock_update(struct kvm *kvm) 2940 { 2941 raw_spin_lock_irq(&kvm->arch.tsc_write_lock); 2942 write_seqcount_begin(&kvm->arch.pvclock_sc); 2943 } 2944 2945 static void kvm_start_pvclock_update(struct kvm *kvm) 2946 { 2947 kvm_make_mclock_inprogress_request(kvm); 2948 2949 /* no guest entries from this point */ 2950 __kvm_start_pvclock_update(kvm); 2951 } 2952 2953 static void kvm_end_pvclock_update(struct kvm *kvm) 2954 { 2955 struct kvm_arch *ka = &kvm->arch; 2956 struct kvm_vcpu *vcpu; 2957 unsigned long i; 2958 2959 write_seqcount_end(&ka->pvclock_sc); 2960 raw_spin_unlock_irq(&ka->tsc_write_lock); 2961 kvm_for_each_vcpu(i, vcpu, kvm) 2962 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 2963 2964 /* guest entries allowed */ 2965 kvm_for_each_vcpu(i, vcpu, kvm) 2966 kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu); 2967 } 2968 2969 static void kvm_update_masterclock(struct kvm *kvm) 2970 { 2971 kvm_hv_request_tsc_page_update(kvm); 2972 kvm_start_pvclock_update(kvm); 2973 pvclock_update_vm_gtod_copy(kvm); 2974 kvm_end_pvclock_update(kvm); 2975 } 2976 2977 /* Called within read_seqcount_begin/retry for kvm->pvclock_sc. */ 2978 static void __get_kvmclock(struct kvm *kvm, struct kvm_clock_data *data) 2979 { 2980 struct kvm_arch *ka = &kvm->arch; 2981 struct pvclock_vcpu_time_info hv_clock; 2982 2983 /* both __this_cpu_read() and rdtsc() should be on the same cpu */ 2984 get_cpu(); 2985 2986 data->flags = 0; 2987 if (ka->use_master_clock && __this_cpu_read(cpu_tsc_khz)) { 2988 #ifdef CONFIG_X86_64 2989 struct timespec64 ts; 2990 2991 if (kvm_get_walltime_and_clockread(&ts, &data->host_tsc)) { 2992 data->realtime = ts.tv_nsec + NSEC_PER_SEC * ts.tv_sec; 2993 data->flags |= KVM_CLOCK_REALTIME | KVM_CLOCK_HOST_TSC; 2994 } else 2995 #endif 2996 data->host_tsc = rdtsc(); 2997 2998 data->flags |= KVM_CLOCK_TSC_STABLE; 2999 hv_clock.tsc_timestamp = ka->master_cycle_now; 3000 hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset; 3001 kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL, 3002 &hv_clock.tsc_shift, 3003 &hv_clock.tsc_to_system_mul); 3004 data->clock = __pvclock_read_cycles(&hv_clock, data->host_tsc); 3005 } else { 3006 data->clock = get_kvmclock_base_ns() + ka->kvmclock_offset; 3007 } 3008 3009 put_cpu(); 3010 } 3011 3012 static void get_kvmclock(struct kvm *kvm, struct kvm_clock_data *data) 3013 { 3014 struct kvm_arch *ka = &kvm->arch; 3015 unsigned seq; 3016 3017 do { 3018 seq = read_seqcount_begin(&ka->pvclock_sc); 3019 __get_kvmclock(kvm, data); 3020 } while (read_seqcount_retry(&ka->pvclock_sc, seq)); 3021 } 3022 3023 u64 get_kvmclock_ns(struct kvm *kvm) 3024 { 3025 struct kvm_clock_data data; 3026 3027 get_kvmclock(kvm, &data); 3028 return data.clock; 3029 } 3030 3031 static void kvm_setup_guest_pvclock(struct kvm_vcpu *v, 3032 struct gfn_to_pfn_cache *gpc, 3033 unsigned int offset) 3034 { 3035 struct kvm_vcpu_arch *vcpu = &v->arch; 3036 struct pvclock_vcpu_time_info *guest_hv_clock; 3037 unsigned long flags; 3038 3039 read_lock_irqsave(&gpc->lock, flags); 3040 while (!kvm_gfn_to_pfn_cache_check(v->kvm, gpc, gpc->gpa, 3041 offset + sizeof(*guest_hv_clock))) { 3042 read_unlock_irqrestore(&gpc->lock, flags); 3043 3044 if (kvm_gfn_to_pfn_cache_refresh(v->kvm, gpc, gpc->gpa, 3045 offset + sizeof(*guest_hv_clock))) 3046 return; 3047 3048 read_lock_irqsave(&gpc->lock, flags); 3049 } 3050 3051 guest_hv_clock = (void *)(gpc->khva + offset); 3052 3053 /* 3054 * This VCPU is paused, but it's legal for a guest to read another 3055 * VCPU's kvmclock, so we really have to follow the specification where 3056 * it says that version is odd if data is being modified, and even after 3057 * it is consistent. 3058 */ 3059 3060 guest_hv_clock->version = vcpu->hv_clock.version = (guest_hv_clock->version + 1) | 1; 3061 smp_wmb(); 3062 3063 /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */ 3064 vcpu->hv_clock.flags |= (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED); 3065 3066 if (vcpu->pvclock_set_guest_stopped_request) { 3067 vcpu->hv_clock.flags |= PVCLOCK_GUEST_STOPPED; 3068 vcpu->pvclock_set_guest_stopped_request = false; 3069 } 3070 3071 memcpy(guest_hv_clock, &vcpu->hv_clock, sizeof(*guest_hv_clock)); 3072 smp_wmb(); 3073 3074 guest_hv_clock->version = ++vcpu->hv_clock.version; 3075 3076 mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT); 3077 read_unlock_irqrestore(&gpc->lock, flags); 3078 3079 trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock); 3080 } 3081 3082 static int kvm_guest_time_update(struct kvm_vcpu *v) 3083 { 3084 unsigned long flags, tgt_tsc_khz; 3085 unsigned seq; 3086 struct kvm_vcpu_arch *vcpu = &v->arch; 3087 struct kvm_arch *ka = &v->kvm->arch; 3088 s64 kernel_ns; 3089 u64 tsc_timestamp, host_tsc; 3090 u8 pvclock_flags; 3091 bool use_master_clock; 3092 3093 kernel_ns = 0; 3094 host_tsc = 0; 3095 3096 /* 3097 * If the host uses TSC clock, then passthrough TSC as stable 3098 * to the guest. 3099 */ 3100 do { 3101 seq = read_seqcount_begin(&ka->pvclock_sc); 3102 use_master_clock = ka->use_master_clock; 3103 if (use_master_clock) { 3104 host_tsc = ka->master_cycle_now; 3105 kernel_ns = ka->master_kernel_ns; 3106 } 3107 } while (read_seqcount_retry(&ka->pvclock_sc, seq)); 3108 3109 /* Keep irq disabled to prevent changes to the clock */ 3110 local_irq_save(flags); 3111 tgt_tsc_khz = __this_cpu_read(cpu_tsc_khz); 3112 if (unlikely(tgt_tsc_khz == 0)) { 3113 local_irq_restore(flags); 3114 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); 3115 return 1; 3116 } 3117 if (!use_master_clock) { 3118 host_tsc = rdtsc(); 3119 kernel_ns = get_kvmclock_base_ns(); 3120 } 3121 3122 tsc_timestamp = kvm_read_l1_tsc(v, host_tsc); 3123 3124 /* 3125 * We may have to catch up the TSC to match elapsed wall clock 3126 * time for two reasons, even if kvmclock is used. 3127 * 1) CPU could have been running below the maximum TSC rate 3128 * 2) Broken TSC compensation resets the base at each VCPU 3129 * entry to avoid unknown leaps of TSC even when running 3130 * again on the same CPU. This may cause apparent elapsed 3131 * time to disappear, and the guest to stand still or run 3132 * very slowly. 3133 */ 3134 if (vcpu->tsc_catchup) { 3135 u64 tsc = compute_guest_tsc(v, kernel_ns); 3136 if (tsc > tsc_timestamp) { 3137 adjust_tsc_offset_guest(v, tsc - tsc_timestamp); 3138 tsc_timestamp = tsc; 3139 } 3140 } 3141 3142 local_irq_restore(flags); 3143 3144 /* With all the info we got, fill in the values */ 3145 3146 if (kvm_caps.has_tsc_control) 3147 tgt_tsc_khz = kvm_scale_tsc(tgt_tsc_khz, 3148 v->arch.l1_tsc_scaling_ratio); 3149 3150 if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) { 3151 kvm_get_time_scale(NSEC_PER_SEC, tgt_tsc_khz * 1000LL, 3152 &vcpu->hv_clock.tsc_shift, 3153 &vcpu->hv_clock.tsc_to_system_mul); 3154 vcpu->hw_tsc_khz = tgt_tsc_khz; 3155 } 3156 3157 vcpu->hv_clock.tsc_timestamp = tsc_timestamp; 3158 vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset; 3159 vcpu->last_guest_tsc = tsc_timestamp; 3160 3161 /* If the host uses TSC clocksource, then it is stable */ 3162 pvclock_flags = 0; 3163 if (use_master_clock) 3164 pvclock_flags |= PVCLOCK_TSC_STABLE_BIT; 3165 3166 vcpu->hv_clock.flags = pvclock_flags; 3167 3168 if (vcpu->pv_time.active) 3169 kvm_setup_guest_pvclock(v, &vcpu->pv_time, 0); 3170 if (vcpu->xen.vcpu_info_cache.active) 3171 kvm_setup_guest_pvclock(v, &vcpu->xen.vcpu_info_cache, 3172 offsetof(struct compat_vcpu_info, time)); 3173 if (vcpu->xen.vcpu_time_info_cache.active) 3174 kvm_setup_guest_pvclock(v, &vcpu->xen.vcpu_time_info_cache, 0); 3175 kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock); 3176 return 0; 3177 } 3178 3179 /* 3180 * kvmclock updates which are isolated to a given vcpu, such as 3181 * vcpu->cpu migration, should not allow system_timestamp from 3182 * the rest of the vcpus to remain static. Otherwise ntp frequency 3183 * correction applies to one vcpu's system_timestamp but not 3184 * the others. 3185 * 3186 * So in those cases, request a kvmclock update for all vcpus. 3187 * We need to rate-limit these requests though, as they can 3188 * considerably slow guests that have a large number of vcpus. 3189 * The time for a remote vcpu to update its kvmclock is bound 3190 * by the delay we use to rate-limit the updates. 3191 */ 3192 3193 #define KVMCLOCK_UPDATE_DELAY msecs_to_jiffies(100) 3194 3195 static void kvmclock_update_fn(struct work_struct *work) 3196 { 3197 unsigned long i; 3198 struct delayed_work *dwork = to_delayed_work(work); 3199 struct kvm_arch *ka = container_of(dwork, struct kvm_arch, 3200 kvmclock_update_work); 3201 struct kvm *kvm = container_of(ka, struct kvm, arch); 3202 struct kvm_vcpu *vcpu; 3203 3204 kvm_for_each_vcpu(i, vcpu, kvm) { 3205 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 3206 kvm_vcpu_kick(vcpu); 3207 } 3208 } 3209 3210 static void kvm_gen_kvmclock_update(struct kvm_vcpu *v) 3211 { 3212 struct kvm *kvm = v->kvm; 3213 3214 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); 3215 schedule_delayed_work(&kvm->arch.kvmclock_update_work, 3216 KVMCLOCK_UPDATE_DELAY); 3217 } 3218 3219 #define KVMCLOCK_SYNC_PERIOD (300 * HZ) 3220 3221 static void kvmclock_sync_fn(struct work_struct *work) 3222 { 3223 struct delayed_work *dwork = to_delayed_work(work); 3224 struct kvm_arch *ka = container_of(dwork, struct kvm_arch, 3225 kvmclock_sync_work); 3226 struct kvm *kvm = container_of(ka, struct kvm, arch); 3227 3228 if (!kvmclock_periodic_sync) 3229 return; 3230 3231 schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0); 3232 schedule_delayed_work(&kvm->arch.kvmclock_sync_work, 3233 KVMCLOCK_SYNC_PERIOD); 3234 } 3235 3236 /* These helpers are safe iff @msr is known to be an MCx bank MSR. */ 3237 static bool is_mci_control_msr(u32 msr) 3238 { 3239 return (msr & 3) == 0; 3240 } 3241 static bool is_mci_status_msr(u32 msr) 3242 { 3243 return (msr & 3) == 1; 3244 } 3245 3246 /* 3247 * On AMD, HWCR[McStatusWrEn] controls whether setting MCi_STATUS results in #GP. 3248 */ 3249 static bool can_set_mci_status(struct kvm_vcpu *vcpu) 3250 { 3251 /* McStatusWrEn enabled? */ 3252 if (guest_cpuid_is_amd_or_hygon(vcpu)) 3253 return !!(vcpu->arch.msr_hwcr & BIT_ULL(18)); 3254 3255 return false; 3256 } 3257 3258 static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 3259 { 3260 u64 mcg_cap = vcpu->arch.mcg_cap; 3261 unsigned bank_num = mcg_cap & 0xff; 3262 u32 msr = msr_info->index; 3263 u64 data = msr_info->data; 3264 u32 offset, last_msr; 3265 3266 switch (msr) { 3267 case MSR_IA32_MCG_STATUS: 3268 vcpu->arch.mcg_status = data; 3269 break; 3270 case MSR_IA32_MCG_CTL: 3271 if (!(mcg_cap & MCG_CTL_P) && 3272 (data || !msr_info->host_initiated)) 3273 return 1; 3274 if (data != 0 && data != ~(u64)0) 3275 return 1; 3276 vcpu->arch.mcg_ctl = data; 3277 break; 3278 case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1: 3279 last_msr = MSR_IA32_MCx_CTL2(bank_num) - 1; 3280 if (msr > last_msr) 3281 return 1; 3282 3283 if (!(mcg_cap & MCG_CMCI_P) && (data || !msr_info->host_initiated)) 3284 return 1; 3285 /* An attempt to write a 1 to a reserved bit raises #GP */ 3286 if (data & ~(MCI_CTL2_CMCI_EN | MCI_CTL2_CMCI_THRESHOLD_MASK)) 3287 return 1; 3288 offset = array_index_nospec(msr - MSR_IA32_MC0_CTL2, 3289 last_msr + 1 - MSR_IA32_MC0_CTL2); 3290 vcpu->arch.mci_ctl2_banks[offset] = data; 3291 break; 3292 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: 3293 last_msr = MSR_IA32_MCx_CTL(bank_num) - 1; 3294 if (msr > last_msr) 3295 return 1; 3296 3297 /* 3298 * Only 0 or all 1s can be written to IA32_MCi_CTL, all other 3299 * values are architecturally undefined. But, some Linux 3300 * kernels clear bit 10 in bank 4 to workaround a BIOS/GART TLB 3301 * issue on AMD K8s, allow bit 10 to be clear when setting all 3302 * other bits in order to avoid an uncaught #GP in the guest. 3303 * 3304 * UNIXWARE clears bit 0 of MC1_CTL to ignore correctable, 3305 * single-bit ECC data errors. 3306 */ 3307 if (is_mci_control_msr(msr) && 3308 data != 0 && (data | (1 << 10) | 1) != ~(u64)0) 3309 return 1; 3310 3311 /* 3312 * All CPUs allow writing 0 to MCi_STATUS MSRs to clear the MSR. 3313 * AMD-based CPUs allow non-zero values, but if and only if 3314 * HWCR[McStatusWrEn] is set. 3315 */ 3316 if (!msr_info->host_initiated && is_mci_status_msr(msr) && 3317 data != 0 && !can_set_mci_status(vcpu)) 3318 return 1; 3319 3320 offset = array_index_nospec(msr - MSR_IA32_MC0_CTL, 3321 last_msr + 1 - MSR_IA32_MC0_CTL); 3322 vcpu->arch.mce_banks[offset] = data; 3323 break; 3324 default: 3325 return 1; 3326 } 3327 return 0; 3328 } 3329 3330 static inline bool kvm_pv_async_pf_enabled(struct kvm_vcpu *vcpu) 3331 { 3332 u64 mask = KVM_ASYNC_PF_ENABLED | KVM_ASYNC_PF_DELIVERY_AS_INT; 3333 3334 return (vcpu->arch.apf.msr_en_val & mask) == mask; 3335 } 3336 3337 static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) 3338 { 3339 gpa_t gpa = data & ~0x3f; 3340 3341 /* Bits 4:5 are reserved, Should be zero */ 3342 if (data & 0x30) 3343 return 1; 3344 3345 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_VMEXIT) && 3346 (data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT)) 3347 return 1; 3348 3349 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT) && 3350 (data & KVM_ASYNC_PF_DELIVERY_AS_INT)) 3351 return 1; 3352 3353 if (!lapic_in_kernel(vcpu)) 3354 return data ? 1 : 0; 3355 3356 vcpu->arch.apf.msr_en_val = data; 3357 3358 if (!kvm_pv_async_pf_enabled(vcpu)) { 3359 kvm_clear_async_pf_completion_queue(vcpu); 3360 kvm_async_pf_hash_reset(vcpu); 3361 return 0; 3362 } 3363 3364 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, 3365 sizeof(u64))) 3366 return 1; 3367 3368 vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); 3369 vcpu->arch.apf.delivery_as_pf_vmexit = data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT; 3370 3371 kvm_async_pf_wakeup_all(vcpu); 3372 3373 return 0; 3374 } 3375 3376 static int kvm_pv_enable_async_pf_int(struct kvm_vcpu *vcpu, u64 data) 3377 { 3378 /* Bits 8-63 are reserved */ 3379 if (data >> 8) 3380 return 1; 3381 3382 if (!lapic_in_kernel(vcpu)) 3383 return 1; 3384 3385 vcpu->arch.apf.msr_int_val = data; 3386 3387 vcpu->arch.apf.vec = data & KVM_ASYNC_PF_VEC_MASK; 3388 3389 return 0; 3390 } 3391 3392 static void kvmclock_reset(struct kvm_vcpu *vcpu) 3393 { 3394 kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.pv_time); 3395 vcpu->arch.time = 0; 3396 } 3397 3398 static void kvm_vcpu_flush_tlb_all(struct kvm_vcpu *vcpu) 3399 { 3400 ++vcpu->stat.tlb_flush; 3401 static_call(kvm_x86_flush_tlb_all)(vcpu); 3402 } 3403 3404 static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu) 3405 { 3406 ++vcpu->stat.tlb_flush; 3407 3408 if (!tdp_enabled) { 3409 /* 3410 * A TLB flush on behalf of the guest is equivalent to 3411 * INVPCID(all), toggling CR4.PGE, etc., which requires 3412 * a forced sync of the shadow page tables. Ensure all the 3413 * roots are synced and the guest TLB in hardware is clean. 3414 */ 3415 kvm_mmu_sync_roots(vcpu); 3416 kvm_mmu_sync_prev_roots(vcpu); 3417 } 3418 3419 static_call(kvm_x86_flush_tlb_guest)(vcpu); 3420 } 3421 3422 3423 static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu) 3424 { 3425 ++vcpu->stat.tlb_flush; 3426 static_call(kvm_x86_flush_tlb_current)(vcpu); 3427 } 3428 3429 /* 3430 * Service "local" TLB flush requests, which are specific to the current MMU 3431 * context. In addition to the generic event handling in vcpu_enter_guest(), 3432 * TLB flushes that are targeted at an MMU context also need to be serviced 3433 * prior before nested VM-Enter/VM-Exit. 3434 */ 3435 void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu) 3436 { 3437 if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu)) 3438 kvm_vcpu_flush_tlb_current(vcpu); 3439 3440 if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu)) 3441 kvm_vcpu_flush_tlb_guest(vcpu); 3442 } 3443 EXPORT_SYMBOL_GPL(kvm_service_local_tlb_flush_requests); 3444 3445 static void record_steal_time(struct kvm_vcpu *vcpu) 3446 { 3447 struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache; 3448 struct kvm_steal_time __user *st; 3449 struct kvm_memslots *slots; 3450 gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS; 3451 u64 steal; 3452 u32 version; 3453 3454 if (kvm_xen_msr_enabled(vcpu->kvm)) { 3455 kvm_xen_runstate_set_running(vcpu); 3456 return; 3457 } 3458 3459 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) 3460 return; 3461 3462 if (WARN_ON_ONCE(current->mm != vcpu->kvm->mm)) 3463 return; 3464 3465 slots = kvm_memslots(vcpu->kvm); 3466 3467 if (unlikely(slots->generation != ghc->generation || 3468 gpa != ghc->gpa || 3469 kvm_is_error_hva(ghc->hva) || !ghc->memslot)) { 3470 /* We rely on the fact that it fits in a single page. */ 3471 BUILD_BUG_ON((sizeof(*st) - 1) & KVM_STEAL_VALID_BITS); 3472 3473 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st)) || 3474 kvm_is_error_hva(ghc->hva) || !ghc->memslot) 3475 return; 3476 } 3477 3478 st = (struct kvm_steal_time __user *)ghc->hva; 3479 /* 3480 * Doing a TLB flush here, on the guest's behalf, can avoid 3481 * expensive IPIs. 3482 */ 3483 if (guest_pv_has(vcpu, KVM_FEATURE_PV_TLB_FLUSH)) { 3484 u8 st_preempted = 0; 3485 int err = -EFAULT; 3486 3487 if (!user_access_begin(st, sizeof(*st))) 3488 return; 3489 3490 asm volatile("1: xchgb %0, %2\n" 3491 "xor %1, %1\n" 3492 "2:\n" 3493 _ASM_EXTABLE_UA(1b, 2b) 3494 : "+q" (st_preempted), 3495 "+&r" (err), 3496 "+m" (st->preempted)); 3497 if (err) 3498 goto out; 3499 3500 user_access_end(); 3501 3502 vcpu->arch.st.preempted = 0; 3503 3504 trace_kvm_pv_tlb_flush(vcpu->vcpu_id, 3505 st_preempted & KVM_VCPU_FLUSH_TLB); 3506 if (st_preempted & KVM_VCPU_FLUSH_TLB) 3507 kvm_vcpu_flush_tlb_guest(vcpu); 3508 3509 if (!user_access_begin(st, sizeof(*st))) 3510 goto dirty; 3511 } else { 3512 if (!user_access_begin(st, sizeof(*st))) 3513 return; 3514 3515 unsafe_put_user(0, &st->preempted, out); 3516 vcpu->arch.st.preempted = 0; 3517 } 3518 3519 unsafe_get_user(version, &st->version, out); 3520 if (version & 1) 3521 version += 1; /* first time write, random junk */ 3522 3523 version += 1; 3524 unsafe_put_user(version, &st->version, out); 3525 3526 smp_wmb(); 3527 3528 unsafe_get_user(steal, &st->steal, out); 3529 steal += current->sched_info.run_delay - 3530 vcpu->arch.st.last_steal; 3531 vcpu->arch.st.last_steal = current->sched_info.run_delay; 3532 unsafe_put_user(steal, &st->steal, out); 3533 3534 version += 1; 3535 unsafe_put_user(version, &st->version, out); 3536 3537 out: 3538 user_access_end(); 3539 dirty: 3540 mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa)); 3541 } 3542 3543 int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 3544 { 3545 bool pr = false; 3546 u32 msr = msr_info->index; 3547 u64 data = msr_info->data; 3548 3549 if (msr && msr == vcpu->kvm->arch.xen_hvm_config.msr) 3550 return kvm_xen_write_hypercall_page(vcpu, data); 3551 3552 switch (msr) { 3553 case MSR_AMD64_NB_CFG: 3554 case MSR_IA32_UCODE_WRITE: 3555 case MSR_VM_HSAVE_PA: 3556 case MSR_AMD64_PATCH_LOADER: 3557 case MSR_AMD64_BU_CFG2: 3558 case MSR_AMD64_DC_CFG: 3559 case MSR_F15H_EX_CFG: 3560 break; 3561 3562 case MSR_IA32_UCODE_REV: 3563 if (msr_info->host_initiated) 3564 vcpu->arch.microcode_version = data; 3565 break; 3566 case MSR_IA32_ARCH_CAPABILITIES: 3567 if (!msr_info->host_initiated) 3568 return 1; 3569 vcpu->arch.arch_capabilities = data; 3570 break; 3571 case MSR_IA32_PERF_CAPABILITIES: 3572 if (!msr_info->host_initiated) 3573 return 1; 3574 if (data & ~kvm_caps.supported_perf_cap) 3575 return 1; 3576 3577 vcpu->arch.perf_capabilities = data; 3578 kvm_pmu_refresh(vcpu); 3579 return 0; 3580 case MSR_EFER: 3581 return set_efer(vcpu, msr_info); 3582 case MSR_K7_HWCR: 3583 data &= ~(u64)0x40; /* ignore flush filter disable */ 3584 data &= ~(u64)0x100; /* ignore ignne emulation enable */ 3585 data &= ~(u64)0x8; /* ignore TLB cache disable */ 3586 3587 /* Handle McStatusWrEn */ 3588 if (data == BIT_ULL(18)) { 3589 vcpu->arch.msr_hwcr = data; 3590 } else if (data != 0) { 3591 vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n", 3592 data); 3593 return 1; 3594 } 3595 break; 3596 case MSR_FAM10H_MMIO_CONF_BASE: 3597 if (data != 0) { 3598 vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: " 3599 "0x%llx\n", data); 3600 return 1; 3601 } 3602 break; 3603 case 0x200 ... MSR_IA32_MC0_CTL2 - 1: 3604 case MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) ... 0x2ff: 3605 return kvm_mtrr_set_msr(vcpu, msr, data); 3606 case MSR_IA32_APICBASE: 3607 return kvm_set_apic_base(vcpu, msr_info); 3608 case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff: 3609 return kvm_x2apic_msr_write(vcpu, msr, data); 3610 case MSR_IA32_TSC_DEADLINE: 3611 kvm_set_lapic_tscdeadline_msr(vcpu, data); 3612 break; 3613 case MSR_IA32_TSC_ADJUST: 3614 if (guest_cpuid_has(vcpu, X86_FEATURE_TSC_ADJUST)) { 3615 if (!msr_info->host_initiated) { 3616 s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr; 3617 adjust_tsc_offset_guest(vcpu, adj); 3618 /* Before back to guest, tsc_timestamp must be adjusted 3619 * as well, otherwise guest's percpu pvclock time could jump. 3620 */ 3621 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 3622 } 3623 vcpu->arch.ia32_tsc_adjust_msr = data; 3624 } 3625 break; 3626 case MSR_IA32_MISC_ENABLE: { 3627 u64 old_val = vcpu->arch.ia32_misc_enable_msr; 3628 3629 if (!msr_info->host_initiated) { 3630 /* RO bits */ 3631 if ((old_val ^ data) & MSR_IA32_MISC_ENABLE_PMU_RO_MASK) 3632 return 1; 3633 3634 /* R bits, i.e. writes are ignored, but don't fault. */ 3635 data = data & ~MSR_IA32_MISC_ENABLE_EMON; 3636 data |= old_val & MSR_IA32_MISC_ENABLE_EMON; 3637 } 3638 3639 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT) && 3640 ((old_val ^ data) & MSR_IA32_MISC_ENABLE_MWAIT)) { 3641 if (!guest_cpuid_has(vcpu, X86_FEATURE_XMM3)) 3642 return 1; 3643 vcpu->arch.ia32_misc_enable_msr = data; 3644 kvm_update_cpuid_runtime(vcpu); 3645 } else { 3646 vcpu->arch.ia32_misc_enable_msr = data; 3647 } 3648 break; 3649 } 3650 case MSR_IA32_SMBASE: 3651 if (!IS_ENABLED(CONFIG_KVM_SMM) || !msr_info->host_initiated) 3652 return 1; 3653 vcpu->arch.smbase = data; 3654 break; 3655 case MSR_IA32_POWER_CTL: 3656 vcpu->arch.msr_ia32_power_ctl = data; 3657 break; 3658 case MSR_IA32_TSC: 3659 if (msr_info->host_initiated) { 3660 kvm_synchronize_tsc(vcpu, data); 3661 } else { 3662 u64 adj = kvm_compute_l1_tsc_offset(vcpu, data) - vcpu->arch.l1_tsc_offset; 3663 adjust_tsc_offset_guest(vcpu, adj); 3664 vcpu->arch.ia32_tsc_adjust_msr += adj; 3665 } 3666 break; 3667 case MSR_IA32_XSS: 3668 if (!msr_info->host_initiated && 3669 !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)) 3670 return 1; 3671 /* 3672 * KVM supports exposing PT to the guest, but does not support 3673 * IA32_XSS[bit 8]. Guests have to use RDMSR/WRMSR rather than 3674 * XSAVES/XRSTORS to save/restore PT MSRs. 3675 */ 3676 if (data & ~kvm_caps.supported_xss) 3677 return 1; 3678 vcpu->arch.ia32_xss = data; 3679 kvm_update_cpuid_runtime(vcpu); 3680 break; 3681 case MSR_SMI_COUNT: 3682 if (!msr_info->host_initiated) 3683 return 1; 3684 vcpu->arch.smi_count = data; 3685 break; 3686 case MSR_KVM_WALL_CLOCK_NEW: 3687 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) 3688 return 1; 3689 3690 vcpu->kvm->arch.wall_clock = data; 3691 kvm_write_wall_clock(vcpu->kvm, data, 0); 3692 break; 3693 case MSR_KVM_WALL_CLOCK: 3694 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) 3695 return 1; 3696 3697 vcpu->kvm->arch.wall_clock = data; 3698 kvm_write_wall_clock(vcpu->kvm, data, 0); 3699 break; 3700 case MSR_KVM_SYSTEM_TIME_NEW: 3701 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) 3702 return 1; 3703 3704 kvm_write_system_time(vcpu, data, false, msr_info->host_initiated); 3705 break; 3706 case MSR_KVM_SYSTEM_TIME: 3707 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) 3708 return 1; 3709 3710 kvm_write_system_time(vcpu, data, true, msr_info->host_initiated); 3711 break; 3712 case MSR_KVM_ASYNC_PF_EN: 3713 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF)) 3714 return 1; 3715 3716 if (kvm_pv_enable_async_pf(vcpu, data)) 3717 return 1; 3718 break; 3719 case MSR_KVM_ASYNC_PF_INT: 3720 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) 3721 return 1; 3722 3723 if (kvm_pv_enable_async_pf_int(vcpu, data)) 3724 return 1; 3725 break; 3726 case MSR_KVM_ASYNC_PF_ACK: 3727 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) 3728 return 1; 3729 if (data & 0x1) { 3730 vcpu->arch.apf.pageready_pending = false; 3731 kvm_check_async_pf_completion(vcpu); 3732 } 3733 break; 3734 case MSR_KVM_STEAL_TIME: 3735 if (!guest_pv_has(vcpu, KVM_FEATURE_STEAL_TIME)) 3736 return 1; 3737 3738 if (unlikely(!sched_info_on())) 3739 return 1; 3740 3741 if (data & KVM_STEAL_RESERVED_MASK) 3742 return 1; 3743 3744 vcpu->arch.st.msr_val = data; 3745 3746 if (!(data & KVM_MSR_ENABLED)) 3747 break; 3748 3749 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); 3750 3751 break; 3752 case MSR_KVM_PV_EOI_EN: 3753 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_EOI)) 3754 return 1; 3755 3756 if (kvm_lapic_set_pv_eoi(vcpu, data, sizeof(u8))) 3757 return 1; 3758 break; 3759 3760 case MSR_KVM_POLL_CONTROL: 3761 if (!guest_pv_has(vcpu, KVM_FEATURE_POLL_CONTROL)) 3762 return 1; 3763 3764 /* only enable bit supported */ 3765 if (data & (-1ULL << 1)) 3766 return 1; 3767 3768 vcpu->arch.msr_kvm_poll_control = data; 3769 break; 3770 3771 case MSR_IA32_MCG_CTL: 3772 case MSR_IA32_MCG_STATUS: 3773 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: 3774 case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1: 3775 return set_msr_mce(vcpu, msr_info); 3776 3777 case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3: 3778 case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1: 3779 pr = true; 3780 fallthrough; 3781 case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3: 3782 case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1: 3783 if (kvm_pmu_is_valid_msr(vcpu, msr)) 3784 return kvm_pmu_set_msr(vcpu, msr_info); 3785 3786 if (pr || data != 0) 3787 vcpu_unimpl(vcpu, "disabled perfctr wrmsr: " 3788 "0x%x data 0x%llx\n", msr, data); 3789 break; 3790 case MSR_K7_CLK_CTL: 3791 /* 3792 * Ignore all writes to this no longer documented MSR. 3793 * Writes are only relevant for old K7 processors, 3794 * all pre-dating SVM, but a recommended workaround from 3795 * AMD for these chips. It is possible to specify the 3796 * affected processor models on the command line, hence 3797 * the need to ignore the workaround. 3798 */ 3799 break; 3800 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: 3801 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER: 3802 case HV_X64_MSR_SYNDBG_OPTIONS: 3803 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: 3804 case HV_X64_MSR_CRASH_CTL: 3805 case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT: 3806 case HV_X64_MSR_REENLIGHTENMENT_CONTROL: 3807 case HV_X64_MSR_TSC_EMULATION_CONTROL: 3808 case HV_X64_MSR_TSC_EMULATION_STATUS: 3809 return kvm_hv_set_msr_common(vcpu, msr, data, 3810 msr_info->host_initiated); 3811 case MSR_IA32_BBL_CR_CTL3: 3812 /* Drop writes to this legacy MSR -- see rdmsr 3813 * counterpart for further detail. 3814 */ 3815 if (report_ignored_msrs) 3816 vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n", 3817 msr, data); 3818 break; 3819 case MSR_AMD64_OSVW_ID_LENGTH: 3820 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) 3821 return 1; 3822 vcpu->arch.osvw.length = data; 3823 break; 3824 case MSR_AMD64_OSVW_STATUS: 3825 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) 3826 return 1; 3827 vcpu->arch.osvw.status = data; 3828 break; 3829 case MSR_PLATFORM_INFO: 3830 if (!msr_info->host_initiated || 3831 (!(data & MSR_PLATFORM_INFO_CPUID_FAULT) && 3832 cpuid_fault_enabled(vcpu))) 3833 return 1; 3834 vcpu->arch.msr_platform_info = data; 3835 break; 3836 case MSR_MISC_FEATURES_ENABLES: 3837 if (data & ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT || 3838 (data & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT && 3839 !supports_cpuid_fault(vcpu))) 3840 return 1; 3841 vcpu->arch.msr_misc_features_enables = data; 3842 break; 3843 #ifdef CONFIG_X86_64 3844 case MSR_IA32_XFD: 3845 if (!msr_info->host_initiated && 3846 !guest_cpuid_has(vcpu, X86_FEATURE_XFD)) 3847 return 1; 3848 3849 if (data & ~kvm_guest_supported_xfd(vcpu)) 3850 return 1; 3851 3852 fpu_update_guest_xfd(&vcpu->arch.guest_fpu, data); 3853 break; 3854 case MSR_IA32_XFD_ERR: 3855 if (!msr_info->host_initiated && 3856 !guest_cpuid_has(vcpu, X86_FEATURE_XFD)) 3857 return 1; 3858 3859 if (data & ~kvm_guest_supported_xfd(vcpu)) 3860 return 1; 3861 3862 vcpu->arch.guest_fpu.xfd_err = data; 3863 break; 3864 #endif 3865 case MSR_IA32_PEBS_ENABLE: 3866 case MSR_IA32_DS_AREA: 3867 case MSR_PEBS_DATA_CFG: 3868 case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5: 3869 if (kvm_pmu_is_valid_msr(vcpu, msr)) 3870 return kvm_pmu_set_msr(vcpu, msr_info); 3871 /* 3872 * Userspace is allowed to write '0' to MSRs that KVM reports 3873 * as to-be-saved, even if an MSRs isn't fully supported. 3874 */ 3875 return !msr_info->host_initiated || data; 3876 default: 3877 if (kvm_pmu_is_valid_msr(vcpu, msr)) 3878 return kvm_pmu_set_msr(vcpu, msr_info); 3879 return KVM_MSR_RET_INVALID; 3880 } 3881 return 0; 3882 } 3883 EXPORT_SYMBOL_GPL(kvm_set_msr_common); 3884 3885 static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) 3886 { 3887 u64 data; 3888 u64 mcg_cap = vcpu->arch.mcg_cap; 3889 unsigned bank_num = mcg_cap & 0xff; 3890 u32 offset, last_msr; 3891 3892 switch (msr) { 3893 case MSR_IA32_P5_MC_ADDR: 3894 case MSR_IA32_P5_MC_TYPE: 3895 data = 0; 3896 break; 3897 case MSR_IA32_MCG_CAP: 3898 data = vcpu->arch.mcg_cap; 3899 break; 3900 case MSR_IA32_MCG_CTL: 3901 if (!(mcg_cap & MCG_CTL_P) && !host) 3902 return 1; 3903 data = vcpu->arch.mcg_ctl; 3904 break; 3905 case MSR_IA32_MCG_STATUS: 3906 data = vcpu->arch.mcg_status; 3907 break; 3908 case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1: 3909 last_msr = MSR_IA32_MCx_CTL2(bank_num) - 1; 3910 if (msr > last_msr) 3911 return 1; 3912 3913 if (!(mcg_cap & MCG_CMCI_P) && !host) 3914 return 1; 3915 offset = array_index_nospec(msr - MSR_IA32_MC0_CTL2, 3916 last_msr + 1 - MSR_IA32_MC0_CTL2); 3917 data = vcpu->arch.mci_ctl2_banks[offset]; 3918 break; 3919 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: 3920 last_msr = MSR_IA32_MCx_CTL(bank_num) - 1; 3921 if (msr > last_msr) 3922 return 1; 3923 3924 offset = array_index_nospec(msr - MSR_IA32_MC0_CTL, 3925 last_msr + 1 - MSR_IA32_MC0_CTL); 3926 data = vcpu->arch.mce_banks[offset]; 3927 break; 3928 default: 3929 return 1; 3930 } 3931 *pdata = data; 3932 return 0; 3933 } 3934 3935 int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 3936 { 3937 switch (msr_info->index) { 3938 case MSR_IA32_PLATFORM_ID: 3939 case MSR_IA32_EBL_CR_POWERON: 3940 case MSR_IA32_LASTBRANCHFROMIP: 3941 case MSR_IA32_LASTBRANCHTOIP: 3942 case MSR_IA32_LASTINTFROMIP: 3943 case MSR_IA32_LASTINTTOIP: 3944 case MSR_AMD64_SYSCFG: 3945 case MSR_K8_TSEG_ADDR: 3946 case MSR_K8_TSEG_MASK: 3947 case MSR_VM_HSAVE_PA: 3948 case MSR_K8_INT_PENDING_MSG: 3949 case MSR_AMD64_NB_CFG: 3950 case MSR_FAM10H_MMIO_CONF_BASE: 3951 case MSR_AMD64_BU_CFG2: 3952 case MSR_IA32_PERF_CTL: 3953 case MSR_AMD64_DC_CFG: 3954 case MSR_F15H_EX_CFG: 3955 /* 3956 * Intel Sandy Bridge CPUs must support the RAPL (running average power 3957 * limit) MSRs. Just return 0, as we do not want to expose the host 3958 * data here. Do not conditionalize this on CPUID, as KVM does not do 3959 * so for existing CPU-specific MSRs. 3960 */ 3961 case MSR_RAPL_POWER_UNIT: 3962 case MSR_PP0_ENERGY_STATUS: /* Power plane 0 (core) */ 3963 case MSR_PP1_ENERGY_STATUS: /* Power plane 1 (graphics uncore) */ 3964 case MSR_PKG_ENERGY_STATUS: /* Total package */ 3965 case MSR_DRAM_ENERGY_STATUS: /* DRAM controller */ 3966 msr_info->data = 0; 3967 break; 3968 case MSR_IA32_PEBS_ENABLE: 3969 case MSR_IA32_DS_AREA: 3970 case MSR_PEBS_DATA_CFG: 3971 case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5: 3972 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) 3973 return kvm_pmu_get_msr(vcpu, msr_info); 3974 /* 3975 * Userspace is allowed to read MSRs that KVM reports as 3976 * to-be-saved, even if an MSR isn't fully supported. 3977 */ 3978 if (!msr_info->host_initiated) 3979 return 1; 3980 msr_info->data = 0; 3981 break; 3982 case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3: 3983 case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3: 3984 case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1: 3985 case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1: 3986 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) 3987 return kvm_pmu_get_msr(vcpu, msr_info); 3988 msr_info->data = 0; 3989 break; 3990 case MSR_IA32_UCODE_REV: 3991 msr_info->data = vcpu->arch.microcode_version; 3992 break; 3993 case MSR_IA32_ARCH_CAPABILITIES: 3994 if (!msr_info->host_initiated && 3995 !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES)) 3996 return 1; 3997 msr_info->data = vcpu->arch.arch_capabilities; 3998 break; 3999 case MSR_IA32_PERF_CAPABILITIES: 4000 if (!msr_info->host_initiated && 4001 !guest_cpuid_has(vcpu, X86_FEATURE_PDCM)) 4002 return 1; 4003 msr_info->data = vcpu->arch.perf_capabilities; 4004 break; 4005 case MSR_IA32_POWER_CTL: 4006 msr_info->data = vcpu->arch.msr_ia32_power_ctl; 4007 break; 4008 case MSR_IA32_TSC: { 4009 /* 4010 * Intel SDM states that MSR_IA32_TSC read adds the TSC offset 4011 * even when not intercepted. AMD manual doesn't explicitly 4012 * state this but appears to behave the same. 4013 * 4014 * On userspace reads and writes, however, we unconditionally 4015 * return L1's TSC value to ensure backwards-compatible 4016 * behavior for migration. 4017 */ 4018 u64 offset, ratio; 4019 4020 if (msr_info->host_initiated) { 4021 offset = vcpu->arch.l1_tsc_offset; 4022 ratio = vcpu->arch.l1_tsc_scaling_ratio; 4023 } else { 4024 offset = vcpu->arch.tsc_offset; 4025 ratio = vcpu->arch.tsc_scaling_ratio; 4026 } 4027 4028 msr_info->data = kvm_scale_tsc(rdtsc(), ratio) + offset; 4029 break; 4030 } 4031 case MSR_MTRRcap: 4032 case 0x200 ... MSR_IA32_MC0_CTL2 - 1: 4033 case MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) ... 0x2ff: 4034 return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data); 4035 case 0xcd: /* fsb frequency */ 4036 msr_info->data = 3; 4037 break; 4038 /* 4039 * MSR_EBC_FREQUENCY_ID 4040 * Conservative value valid for even the basic CPU models. 4041 * Models 0,1: 000 in bits 23:21 indicating a bus speed of 4042 * 100MHz, model 2 000 in bits 18:16 indicating 100MHz, 4043 * and 266MHz for model 3, or 4. Set Core Clock 4044 * Frequency to System Bus Frequency Ratio to 1 (bits 4045 * 31:24) even though these are only valid for CPU 4046 * models > 2, however guests may end up dividing or 4047 * multiplying by zero otherwise. 4048 */ 4049 case MSR_EBC_FREQUENCY_ID: 4050 msr_info->data = 1 << 24; 4051 break; 4052 case MSR_IA32_APICBASE: 4053 msr_info->data = kvm_get_apic_base(vcpu); 4054 break; 4055 case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff: 4056 return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data); 4057 case MSR_IA32_TSC_DEADLINE: 4058 msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu); 4059 break; 4060 case MSR_IA32_TSC_ADJUST: 4061 msr_info->data = (u64)vcpu->arch.ia32_tsc_adjust_msr; 4062 break; 4063 case MSR_IA32_MISC_ENABLE: 4064 msr_info->data = vcpu->arch.ia32_misc_enable_msr; 4065 break; 4066 case MSR_IA32_SMBASE: 4067 if (!IS_ENABLED(CONFIG_KVM_SMM) || !msr_info->host_initiated) 4068 return 1; 4069 msr_info->data = vcpu->arch.smbase; 4070 break; 4071 case MSR_SMI_COUNT: 4072 msr_info->data = vcpu->arch.smi_count; 4073 break; 4074 case MSR_IA32_PERF_STATUS: 4075 /* TSC increment by tick */ 4076 msr_info->data = 1000ULL; 4077 /* CPU multiplier */ 4078 msr_info->data |= (((uint64_t)4ULL) << 40); 4079 break; 4080 case MSR_EFER: 4081 msr_info->data = vcpu->arch.efer; 4082 break; 4083 case MSR_KVM_WALL_CLOCK: 4084 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) 4085 return 1; 4086 4087 msr_info->data = vcpu->kvm->arch.wall_clock; 4088 break; 4089 case MSR_KVM_WALL_CLOCK_NEW: 4090 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) 4091 return 1; 4092 4093 msr_info->data = vcpu->kvm->arch.wall_clock; 4094 break; 4095 case MSR_KVM_SYSTEM_TIME: 4096 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) 4097 return 1; 4098 4099 msr_info->data = vcpu->arch.time; 4100 break; 4101 case MSR_KVM_SYSTEM_TIME_NEW: 4102 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) 4103 return 1; 4104 4105 msr_info->data = vcpu->arch.time; 4106 break; 4107 case MSR_KVM_ASYNC_PF_EN: 4108 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF)) 4109 return 1; 4110 4111 msr_info->data = vcpu->arch.apf.msr_en_val; 4112 break; 4113 case MSR_KVM_ASYNC_PF_INT: 4114 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) 4115 return 1; 4116 4117 msr_info->data = vcpu->arch.apf.msr_int_val; 4118 break; 4119 case MSR_KVM_ASYNC_PF_ACK: 4120 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) 4121 return 1; 4122 4123 msr_info->data = 0; 4124 break; 4125 case MSR_KVM_STEAL_TIME: 4126 if (!guest_pv_has(vcpu, KVM_FEATURE_STEAL_TIME)) 4127 return 1; 4128 4129 msr_info->data = vcpu->arch.st.msr_val; 4130 break; 4131 case MSR_KVM_PV_EOI_EN: 4132 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_EOI)) 4133 return 1; 4134 4135 msr_info->data = vcpu->arch.pv_eoi.msr_val; 4136 break; 4137 case MSR_KVM_POLL_CONTROL: 4138 if (!guest_pv_has(vcpu, KVM_FEATURE_POLL_CONTROL)) 4139 return 1; 4140 4141 msr_info->data = vcpu->arch.msr_kvm_poll_control; 4142 break; 4143 case MSR_IA32_P5_MC_ADDR: 4144 case MSR_IA32_P5_MC_TYPE: 4145 case MSR_IA32_MCG_CAP: 4146 case MSR_IA32_MCG_CTL: 4147 case MSR_IA32_MCG_STATUS: 4148 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: 4149 case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1: 4150 return get_msr_mce(vcpu, msr_info->index, &msr_info->data, 4151 msr_info->host_initiated); 4152 case MSR_IA32_XSS: 4153 if (!msr_info->host_initiated && 4154 !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)) 4155 return 1; 4156 msr_info->data = vcpu->arch.ia32_xss; 4157 break; 4158 case MSR_K7_CLK_CTL: 4159 /* 4160 * Provide expected ramp-up count for K7. All other 4161 * are set to zero, indicating minimum divisors for 4162 * every field. 4163 * 4164 * This prevents guest kernels on AMD host with CPU 4165 * type 6, model 8 and higher from exploding due to 4166 * the rdmsr failing. 4167 */ 4168 msr_info->data = 0x20000000; 4169 break; 4170 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: 4171 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER: 4172 case HV_X64_MSR_SYNDBG_OPTIONS: 4173 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: 4174 case HV_X64_MSR_CRASH_CTL: 4175 case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT: 4176 case HV_X64_MSR_REENLIGHTENMENT_CONTROL: 4177 case HV_X64_MSR_TSC_EMULATION_CONTROL: 4178 case HV_X64_MSR_TSC_EMULATION_STATUS: 4179 return kvm_hv_get_msr_common(vcpu, 4180 msr_info->index, &msr_info->data, 4181 msr_info->host_initiated); 4182 case MSR_IA32_BBL_CR_CTL3: 4183 /* This legacy MSR exists but isn't fully documented in current 4184 * silicon. It is however accessed by winxp in very narrow 4185 * scenarios where it sets bit #19, itself documented as 4186 * a "reserved" bit. Best effort attempt to source coherent 4187 * read data here should the balance of the register be 4188 * interpreted by the guest: 4189 * 4190 * L2 cache control register 3: 64GB range, 256KB size, 4191 * enabled, latency 0x1, configured 4192 */ 4193 msr_info->data = 0xbe702111; 4194 break; 4195 case MSR_AMD64_OSVW_ID_LENGTH: 4196 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) 4197 return 1; 4198 msr_info->data = vcpu->arch.osvw.length; 4199 break; 4200 case MSR_AMD64_OSVW_STATUS: 4201 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) 4202 return 1; 4203 msr_info->data = vcpu->arch.osvw.status; 4204 break; 4205 case MSR_PLATFORM_INFO: 4206 if (!msr_info->host_initiated && 4207 !vcpu->kvm->arch.guest_can_read_msr_platform_info) 4208 return 1; 4209 msr_info->data = vcpu->arch.msr_platform_info; 4210 break; 4211 case MSR_MISC_FEATURES_ENABLES: 4212 msr_info->data = vcpu->arch.msr_misc_features_enables; 4213 break; 4214 case MSR_K7_HWCR: 4215 msr_info->data = vcpu->arch.msr_hwcr; 4216 break; 4217 #ifdef CONFIG_X86_64 4218 case MSR_IA32_XFD: 4219 if (!msr_info->host_initiated && 4220 !guest_cpuid_has(vcpu, X86_FEATURE_XFD)) 4221 return 1; 4222 4223 msr_info->data = vcpu->arch.guest_fpu.fpstate->xfd; 4224 break; 4225 case MSR_IA32_XFD_ERR: 4226 if (!msr_info->host_initiated && 4227 !guest_cpuid_has(vcpu, X86_FEATURE_XFD)) 4228 return 1; 4229 4230 msr_info->data = vcpu->arch.guest_fpu.xfd_err; 4231 break; 4232 #endif 4233 default: 4234 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) 4235 return kvm_pmu_get_msr(vcpu, msr_info); 4236 return KVM_MSR_RET_INVALID; 4237 } 4238 return 0; 4239 } 4240 EXPORT_SYMBOL_GPL(kvm_get_msr_common); 4241 4242 /* 4243 * Read or write a bunch of msrs. All parameters are kernel addresses. 4244 * 4245 * @return number of msrs set successfully. 4246 */ 4247 static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs, 4248 struct kvm_msr_entry *entries, 4249 int (*do_msr)(struct kvm_vcpu *vcpu, 4250 unsigned index, u64 *data)) 4251 { 4252 int i; 4253 4254 for (i = 0; i < msrs->nmsrs; ++i) 4255 if (do_msr(vcpu, entries[i].index, &entries[i].data)) 4256 break; 4257 4258 return i; 4259 } 4260 4261 /* 4262 * Read or write a bunch of msrs. Parameters are user addresses. 4263 * 4264 * @return number of msrs set successfully. 4265 */ 4266 static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs, 4267 int (*do_msr)(struct kvm_vcpu *vcpu, 4268 unsigned index, u64 *data), 4269 int writeback) 4270 { 4271 struct kvm_msrs msrs; 4272 struct kvm_msr_entry *entries; 4273 int r, n; 4274 unsigned size; 4275 4276 r = -EFAULT; 4277 if (copy_from_user(&msrs, user_msrs, sizeof(msrs))) 4278 goto out; 4279 4280 r = -E2BIG; 4281 if (msrs.nmsrs >= MAX_IO_MSRS) 4282 goto out; 4283 4284 size = sizeof(struct kvm_msr_entry) * msrs.nmsrs; 4285 entries = memdup_user(user_msrs->entries, size); 4286 if (IS_ERR(entries)) { 4287 r = PTR_ERR(entries); 4288 goto out; 4289 } 4290 4291 r = n = __msr_io(vcpu, &msrs, entries, do_msr); 4292 if (r < 0) 4293 goto out_free; 4294 4295 r = -EFAULT; 4296 if (writeback && copy_to_user(user_msrs->entries, entries, size)) 4297 goto out_free; 4298 4299 r = n; 4300 4301 out_free: 4302 kfree(entries); 4303 out: 4304 return r; 4305 } 4306 4307 static inline bool kvm_can_mwait_in_guest(void) 4308 { 4309 return boot_cpu_has(X86_FEATURE_MWAIT) && 4310 !boot_cpu_has_bug(X86_BUG_MONITOR) && 4311 boot_cpu_has(X86_FEATURE_ARAT); 4312 } 4313 4314 static int kvm_ioctl_get_supported_hv_cpuid(struct kvm_vcpu *vcpu, 4315 struct kvm_cpuid2 __user *cpuid_arg) 4316 { 4317 struct kvm_cpuid2 cpuid; 4318 int r; 4319 4320 r = -EFAULT; 4321 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) 4322 return r; 4323 4324 r = kvm_get_hv_cpuid(vcpu, &cpuid, cpuid_arg->entries); 4325 if (r) 4326 return r; 4327 4328 r = -EFAULT; 4329 if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid))) 4330 return r; 4331 4332 return 0; 4333 } 4334 4335 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) 4336 { 4337 int r = 0; 4338 4339 switch (ext) { 4340 case KVM_CAP_IRQCHIP: 4341 case KVM_CAP_HLT: 4342 case KVM_CAP_MMU_SHADOW_CACHE_CONTROL: 4343 case KVM_CAP_SET_TSS_ADDR: 4344 case KVM_CAP_EXT_CPUID: 4345 case KVM_CAP_EXT_EMUL_CPUID: 4346 case KVM_CAP_CLOCKSOURCE: 4347 case KVM_CAP_PIT: 4348 case KVM_CAP_NOP_IO_DELAY: 4349 case KVM_CAP_MP_STATE: 4350 case KVM_CAP_SYNC_MMU: 4351 case KVM_CAP_USER_NMI: 4352 case KVM_CAP_REINJECT_CONTROL: 4353 case KVM_CAP_IRQ_INJECT_STATUS: 4354 case KVM_CAP_IOEVENTFD: 4355 case KVM_CAP_IOEVENTFD_NO_LENGTH: 4356 case KVM_CAP_PIT2: 4357 case KVM_CAP_PIT_STATE2: 4358 case KVM_CAP_SET_IDENTITY_MAP_ADDR: 4359 case KVM_CAP_VCPU_EVENTS: 4360 case KVM_CAP_HYPERV: 4361 case KVM_CAP_HYPERV_VAPIC: 4362 case KVM_CAP_HYPERV_SPIN: 4363 case KVM_CAP_HYPERV_SYNIC: 4364 case KVM_CAP_HYPERV_SYNIC2: 4365 case KVM_CAP_HYPERV_VP_INDEX: 4366 case KVM_CAP_HYPERV_EVENTFD: 4367 case KVM_CAP_HYPERV_TLBFLUSH: 4368 case KVM_CAP_HYPERV_SEND_IPI: 4369 case KVM_CAP_HYPERV_CPUID: 4370 case KVM_CAP_HYPERV_ENFORCE_CPUID: 4371 case KVM_CAP_SYS_HYPERV_CPUID: 4372 case KVM_CAP_PCI_SEGMENT: 4373 case KVM_CAP_DEBUGREGS: 4374 case KVM_CAP_X86_ROBUST_SINGLESTEP: 4375 case KVM_CAP_XSAVE: 4376 case KVM_CAP_ASYNC_PF: 4377 case KVM_CAP_ASYNC_PF_INT: 4378 case KVM_CAP_GET_TSC_KHZ: 4379 case KVM_CAP_KVMCLOCK_CTRL: 4380 case KVM_CAP_READONLY_MEM: 4381 case KVM_CAP_HYPERV_TIME: 4382 case KVM_CAP_IOAPIC_POLARITY_IGNORED: 4383 case KVM_CAP_TSC_DEADLINE_TIMER: 4384 case KVM_CAP_DISABLE_QUIRKS: 4385 case KVM_CAP_SET_BOOT_CPU_ID: 4386 case KVM_CAP_SPLIT_IRQCHIP: 4387 case KVM_CAP_IMMEDIATE_EXIT: 4388 case KVM_CAP_PMU_EVENT_FILTER: 4389 case KVM_CAP_GET_MSR_FEATURES: 4390 case KVM_CAP_MSR_PLATFORM_INFO: 4391 case KVM_CAP_EXCEPTION_PAYLOAD: 4392 case KVM_CAP_X86_TRIPLE_FAULT_EVENT: 4393 case KVM_CAP_SET_GUEST_DEBUG: 4394 case KVM_CAP_LAST_CPU: 4395 case KVM_CAP_X86_USER_SPACE_MSR: 4396 case KVM_CAP_X86_MSR_FILTER: 4397 case KVM_CAP_ENFORCE_PV_FEATURE_CPUID: 4398 #ifdef CONFIG_X86_SGX_KVM 4399 case KVM_CAP_SGX_ATTRIBUTE: 4400 #endif 4401 case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM: 4402 case KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM: 4403 case KVM_CAP_SREGS2: 4404 case KVM_CAP_EXIT_ON_EMULATION_FAILURE: 4405 case KVM_CAP_VCPU_ATTRIBUTES: 4406 case KVM_CAP_SYS_ATTRIBUTES: 4407 case KVM_CAP_VAPIC: 4408 case KVM_CAP_ENABLE_CAP: 4409 case KVM_CAP_VM_DISABLE_NX_HUGE_PAGES: 4410 r = 1; 4411 break; 4412 case KVM_CAP_EXIT_HYPERCALL: 4413 r = KVM_EXIT_HYPERCALL_VALID_MASK; 4414 break; 4415 case KVM_CAP_SET_GUEST_DEBUG2: 4416 return KVM_GUESTDBG_VALID_MASK; 4417 #ifdef CONFIG_KVM_XEN 4418 case KVM_CAP_XEN_HVM: 4419 r = KVM_XEN_HVM_CONFIG_HYPERCALL_MSR | 4420 KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL | 4421 KVM_XEN_HVM_CONFIG_SHARED_INFO | 4422 KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL | 4423 KVM_XEN_HVM_CONFIG_EVTCHN_SEND; 4424 if (sched_info_on()) 4425 r |= KVM_XEN_HVM_CONFIG_RUNSTATE; 4426 break; 4427 #endif 4428 case KVM_CAP_SYNC_REGS: 4429 r = KVM_SYNC_X86_VALID_FIELDS; 4430 break; 4431 case KVM_CAP_ADJUST_CLOCK: 4432 r = KVM_CLOCK_VALID_FLAGS; 4433 break; 4434 case KVM_CAP_X86_DISABLE_EXITS: 4435 r |= KVM_X86_DISABLE_EXITS_HLT | KVM_X86_DISABLE_EXITS_PAUSE | 4436 KVM_X86_DISABLE_EXITS_CSTATE; 4437 if(kvm_can_mwait_in_guest()) 4438 r |= KVM_X86_DISABLE_EXITS_MWAIT; 4439 break; 4440 case KVM_CAP_X86_SMM: 4441 if (!IS_ENABLED(CONFIG_KVM_SMM)) 4442 break; 4443 4444 /* SMBASE is usually relocated above 1M on modern chipsets, 4445 * and SMM handlers might indeed rely on 4G segment limits, 4446 * so do not report SMM to be available if real mode is 4447 * emulated via vm86 mode. Still, do not go to great lengths 4448 * to avoid userspace's usage of the feature, because it is a 4449 * fringe case that is not enabled except via specific settings 4450 * of the module parameters. 4451 */ 4452 r = static_call(kvm_x86_has_emulated_msr)(kvm, MSR_IA32_SMBASE); 4453 break; 4454 case KVM_CAP_NR_VCPUS: 4455 r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS); 4456 break; 4457 case KVM_CAP_MAX_VCPUS: 4458 r = KVM_MAX_VCPUS; 4459 break; 4460 case KVM_CAP_MAX_VCPU_ID: 4461 r = KVM_MAX_VCPU_IDS; 4462 break; 4463 case KVM_CAP_PV_MMU: /* obsolete */ 4464 r = 0; 4465 break; 4466 case KVM_CAP_MCE: 4467 r = KVM_MAX_MCE_BANKS; 4468 break; 4469 case KVM_CAP_XCRS: 4470 r = boot_cpu_has(X86_FEATURE_XSAVE); 4471 break; 4472 case KVM_CAP_TSC_CONTROL: 4473 case KVM_CAP_VM_TSC_CONTROL: 4474 r = kvm_caps.has_tsc_control; 4475 break; 4476 case KVM_CAP_X2APIC_API: 4477 r = KVM_X2APIC_API_VALID_FLAGS; 4478 break; 4479 case KVM_CAP_NESTED_STATE: 4480 r = kvm_x86_ops.nested_ops->get_state ? 4481 kvm_x86_ops.nested_ops->get_state(NULL, NULL, 0) : 0; 4482 break; 4483 case KVM_CAP_HYPERV_DIRECT_TLBFLUSH: 4484 r = kvm_x86_ops.enable_l2_tlb_flush != NULL; 4485 break; 4486 case KVM_CAP_HYPERV_ENLIGHTENED_VMCS: 4487 r = kvm_x86_ops.nested_ops->enable_evmcs != NULL; 4488 break; 4489 case KVM_CAP_SMALLER_MAXPHYADDR: 4490 r = (int) allow_smaller_maxphyaddr; 4491 break; 4492 case KVM_CAP_STEAL_TIME: 4493 r = sched_info_on(); 4494 break; 4495 case KVM_CAP_X86_BUS_LOCK_EXIT: 4496 if (kvm_caps.has_bus_lock_exit) 4497 r = KVM_BUS_LOCK_DETECTION_OFF | 4498 KVM_BUS_LOCK_DETECTION_EXIT; 4499 else 4500 r = 0; 4501 break; 4502 case KVM_CAP_XSAVE2: { 4503 u64 guest_perm = xstate_get_guest_group_perm(); 4504 4505 r = xstate_required_size(kvm_caps.supported_xcr0 & guest_perm, false); 4506 if (r < sizeof(struct kvm_xsave)) 4507 r = sizeof(struct kvm_xsave); 4508 break; 4509 } 4510 case KVM_CAP_PMU_CAPABILITY: 4511 r = enable_pmu ? KVM_CAP_PMU_VALID_MASK : 0; 4512 break; 4513 case KVM_CAP_DISABLE_QUIRKS2: 4514 r = KVM_X86_VALID_QUIRKS; 4515 break; 4516 case KVM_CAP_X86_NOTIFY_VMEXIT: 4517 r = kvm_caps.has_notify_vmexit; 4518 break; 4519 default: 4520 break; 4521 } 4522 return r; 4523 } 4524 4525 static inline void __user *kvm_get_attr_addr(struct kvm_device_attr *attr) 4526 { 4527 void __user *uaddr = (void __user*)(unsigned long)attr->addr; 4528 4529 if ((u64)(unsigned long)uaddr != attr->addr) 4530 return ERR_PTR_USR(-EFAULT); 4531 return uaddr; 4532 } 4533 4534 static int kvm_x86_dev_get_attr(struct kvm_device_attr *attr) 4535 { 4536 u64 __user *uaddr = kvm_get_attr_addr(attr); 4537 4538 if (attr->group) 4539 return -ENXIO; 4540 4541 if (IS_ERR(uaddr)) 4542 return PTR_ERR(uaddr); 4543 4544 switch (attr->attr) { 4545 case KVM_X86_XCOMP_GUEST_SUPP: 4546 if (put_user(kvm_caps.supported_xcr0, uaddr)) 4547 return -EFAULT; 4548 return 0; 4549 default: 4550 return -ENXIO; 4551 break; 4552 } 4553 } 4554 4555 static int kvm_x86_dev_has_attr(struct kvm_device_attr *attr) 4556 { 4557 if (attr->group) 4558 return -ENXIO; 4559 4560 switch (attr->attr) { 4561 case KVM_X86_XCOMP_GUEST_SUPP: 4562 return 0; 4563 default: 4564 return -ENXIO; 4565 } 4566 } 4567 4568 long kvm_arch_dev_ioctl(struct file *filp, 4569 unsigned int ioctl, unsigned long arg) 4570 { 4571 void __user *argp = (void __user *)arg; 4572 long r; 4573 4574 switch (ioctl) { 4575 case KVM_GET_MSR_INDEX_LIST: { 4576 struct kvm_msr_list __user *user_msr_list = argp; 4577 struct kvm_msr_list msr_list; 4578 unsigned n; 4579 4580 r = -EFAULT; 4581 if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list))) 4582 goto out; 4583 n = msr_list.nmsrs; 4584 msr_list.nmsrs = num_msrs_to_save + num_emulated_msrs; 4585 if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list))) 4586 goto out; 4587 r = -E2BIG; 4588 if (n < msr_list.nmsrs) 4589 goto out; 4590 r = -EFAULT; 4591 if (copy_to_user(user_msr_list->indices, &msrs_to_save, 4592 num_msrs_to_save * sizeof(u32))) 4593 goto out; 4594 if (copy_to_user(user_msr_list->indices + num_msrs_to_save, 4595 &emulated_msrs, 4596 num_emulated_msrs * sizeof(u32))) 4597 goto out; 4598 r = 0; 4599 break; 4600 } 4601 case KVM_GET_SUPPORTED_CPUID: 4602 case KVM_GET_EMULATED_CPUID: { 4603 struct kvm_cpuid2 __user *cpuid_arg = argp; 4604 struct kvm_cpuid2 cpuid; 4605 4606 r = -EFAULT; 4607 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) 4608 goto out; 4609 4610 r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries, 4611 ioctl); 4612 if (r) 4613 goto out; 4614 4615 r = -EFAULT; 4616 if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid))) 4617 goto out; 4618 r = 0; 4619 break; 4620 } 4621 case KVM_X86_GET_MCE_CAP_SUPPORTED: 4622 r = -EFAULT; 4623 if (copy_to_user(argp, &kvm_caps.supported_mce_cap, 4624 sizeof(kvm_caps.supported_mce_cap))) 4625 goto out; 4626 r = 0; 4627 break; 4628 case KVM_GET_MSR_FEATURE_INDEX_LIST: { 4629 struct kvm_msr_list __user *user_msr_list = argp; 4630 struct kvm_msr_list msr_list; 4631 unsigned int n; 4632 4633 r = -EFAULT; 4634 if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list))) 4635 goto out; 4636 n = msr_list.nmsrs; 4637 msr_list.nmsrs = num_msr_based_features; 4638 if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list))) 4639 goto out; 4640 r = -E2BIG; 4641 if (n < msr_list.nmsrs) 4642 goto out; 4643 r = -EFAULT; 4644 if (copy_to_user(user_msr_list->indices, &msr_based_features, 4645 num_msr_based_features * sizeof(u32))) 4646 goto out; 4647 r = 0; 4648 break; 4649 } 4650 case KVM_GET_MSRS: 4651 r = msr_io(NULL, argp, do_get_msr_feature, 1); 4652 break; 4653 case KVM_GET_SUPPORTED_HV_CPUID: 4654 r = kvm_ioctl_get_supported_hv_cpuid(NULL, argp); 4655 break; 4656 case KVM_GET_DEVICE_ATTR: { 4657 struct kvm_device_attr attr; 4658 r = -EFAULT; 4659 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 4660 break; 4661 r = kvm_x86_dev_get_attr(&attr); 4662 break; 4663 } 4664 case KVM_HAS_DEVICE_ATTR: { 4665 struct kvm_device_attr attr; 4666 r = -EFAULT; 4667 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 4668 break; 4669 r = kvm_x86_dev_has_attr(&attr); 4670 break; 4671 } 4672 default: 4673 r = -EINVAL; 4674 break; 4675 } 4676 out: 4677 return r; 4678 } 4679 4680 static void wbinvd_ipi(void *garbage) 4681 { 4682 wbinvd(); 4683 } 4684 4685 static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu) 4686 { 4687 return kvm_arch_has_noncoherent_dma(vcpu->kvm); 4688 } 4689 4690 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 4691 { 4692 /* Address WBINVD may be executed by guest */ 4693 if (need_emulate_wbinvd(vcpu)) { 4694 if (static_call(kvm_x86_has_wbinvd_exit)()) 4695 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); 4696 else if (vcpu->cpu != -1 && vcpu->cpu != cpu) 4697 smp_call_function_single(vcpu->cpu, 4698 wbinvd_ipi, NULL, 1); 4699 } 4700 4701 static_call(kvm_x86_vcpu_load)(vcpu, cpu); 4702 4703 /* Save host pkru register if supported */ 4704 vcpu->arch.host_pkru = read_pkru(); 4705 4706 /* Apply any externally detected TSC adjustments (due to suspend) */ 4707 if (unlikely(vcpu->arch.tsc_offset_adjustment)) { 4708 adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment); 4709 vcpu->arch.tsc_offset_adjustment = 0; 4710 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 4711 } 4712 4713 if (unlikely(vcpu->cpu != cpu) || kvm_check_tsc_unstable()) { 4714 s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 : 4715 rdtsc() - vcpu->arch.last_host_tsc; 4716 if (tsc_delta < 0) 4717 mark_tsc_unstable("KVM discovered backwards TSC"); 4718 4719 if (kvm_check_tsc_unstable()) { 4720 u64 offset = kvm_compute_l1_tsc_offset(vcpu, 4721 vcpu->arch.last_guest_tsc); 4722 kvm_vcpu_write_tsc_offset(vcpu, offset); 4723 vcpu->arch.tsc_catchup = 1; 4724 } 4725 4726 if (kvm_lapic_hv_timer_in_use(vcpu)) 4727 kvm_lapic_restart_hv_timer(vcpu); 4728 4729 /* 4730 * On a host with synchronized TSC, there is no need to update 4731 * kvmclock on vcpu->cpu migration 4732 */ 4733 if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1) 4734 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu); 4735 if (vcpu->cpu != cpu) 4736 kvm_make_request(KVM_REQ_MIGRATE_TIMER, vcpu); 4737 vcpu->cpu = cpu; 4738 } 4739 4740 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); 4741 } 4742 4743 static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) 4744 { 4745 struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache; 4746 struct kvm_steal_time __user *st; 4747 struct kvm_memslots *slots; 4748 static const u8 preempted = KVM_VCPU_PREEMPTED; 4749 gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS; 4750 4751 /* 4752 * The vCPU can be marked preempted if and only if the VM-Exit was on 4753 * an instruction boundary and will not trigger guest emulation of any 4754 * kind (see vcpu_run). Vendor specific code controls (conservatively) 4755 * when this is true, for example allowing the vCPU to be marked 4756 * preempted if and only if the VM-Exit was due to a host interrupt. 4757 */ 4758 if (!vcpu->arch.at_instruction_boundary) { 4759 vcpu->stat.preemption_other++; 4760 return; 4761 } 4762 4763 vcpu->stat.preemption_reported++; 4764 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) 4765 return; 4766 4767 if (vcpu->arch.st.preempted) 4768 return; 4769 4770 /* This happens on process exit */ 4771 if (unlikely(current->mm != vcpu->kvm->mm)) 4772 return; 4773 4774 slots = kvm_memslots(vcpu->kvm); 4775 4776 if (unlikely(slots->generation != ghc->generation || 4777 gpa != ghc->gpa || 4778 kvm_is_error_hva(ghc->hva) || !ghc->memslot)) 4779 return; 4780 4781 st = (struct kvm_steal_time __user *)ghc->hva; 4782 BUILD_BUG_ON(sizeof(st->preempted) != sizeof(preempted)); 4783 4784 if (!copy_to_user_nofault(&st->preempted, &preempted, sizeof(preempted))) 4785 vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED; 4786 4787 mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa)); 4788 } 4789 4790 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 4791 { 4792 int idx; 4793 4794 if (vcpu->preempted) { 4795 if (!vcpu->arch.guest_state_protected) 4796 vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu); 4797 4798 /* 4799 * Take the srcu lock as memslots will be accessed to check the gfn 4800 * cache generation against the memslots generation. 4801 */ 4802 idx = srcu_read_lock(&vcpu->kvm->srcu); 4803 if (kvm_xen_msr_enabled(vcpu->kvm)) 4804 kvm_xen_runstate_set_preempted(vcpu); 4805 else 4806 kvm_steal_time_set_preempted(vcpu); 4807 srcu_read_unlock(&vcpu->kvm->srcu, idx); 4808 } 4809 4810 static_call(kvm_x86_vcpu_put)(vcpu); 4811 vcpu->arch.last_host_tsc = rdtsc(); 4812 } 4813 4814 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, 4815 struct kvm_lapic_state *s) 4816 { 4817 static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); 4818 4819 return kvm_apic_get_state(vcpu, s); 4820 } 4821 4822 static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu, 4823 struct kvm_lapic_state *s) 4824 { 4825 int r; 4826 4827 r = kvm_apic_set_state(vcpu, s); 4828 if (r) 4829 return r; 4830 update_cr8_intercept(vcpu); 4831 4832 return 0; 4833 } 4834 4835 static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu) 4836 { 4837 /* 4838 * We can accept userspace's request for interrupt injection 4839 * as long as we have a place to store the interrupt number. 4840 * The actual injection will happen when the CPU is able to 4841 * deliver the interrupt. 4842 */ 4843 if (kvm_cpu_has_extint(vcpu)) 4844 return false; 4845 4846 /* Acknowledging ExtINT does not happen if LINT0 is masked. */ 4847 return (!lapic_in_kernel(vcpu) || 4848 kvm_apic_accept_pic_intr(vcpu)); 4849 } 4850 4851 static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu) 4852 { 4853 /* 4854 * Do not cause an interrupt window exit if an exception 4855 * is pending or an event needs reinjection; userspace 4856 * might want to inject the interrupt manually using KVM_SET_REGS 4857 * or KVM_SET_SREGS. For that to work, we must be at an 4858 * instruction boundary and with no events half-injected. 4859 */ 4860 return (kvm_arch_interrupt_allowed(vcpu) && 4861 kvm_cpu_accept_dm_intr(vcpu) && 4862 !kvm_event_needs_reinjection(vcpu) && 4863 !kvm_is_exception_pending(vcpu)); 4864 } 4865 4866 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, 4867 struct kvm_interrupt *irq) 4868 { 4869 if (irq->irq >= KVM_NR_INTERRUPTS) 4870 return -EINVAL; 4871 4872 if (!irqchip_in_kernel(vcpu->kvm)) { 4873 kvm_queue_interrupt(vcpu, irq->irq, false); 4874 kvm_make_request(KVM_REQ_EVENT, vcpu); 4875 return 0; 4876 } 4877 4878 /* 4879 * With in-kernel LAPIC, we only use this to inject EXTINT, so 4880 * fail for in-kernel 8259. 4881 */ 4882 if (pic_in_kernel(vcpu->kvm)) 4883 return -ENXIO; 4884 4885 if (vcpu->arch.pending_external_vector != -1) 4886 return -EEXIST; 4887 4888 vcpu->arch.pending_external_vector = irq->irq; 4889 kvm_make_request(KVM_REQ_EVENT, vcpu); 4890 return 0; 4891 } 4892 4893 static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu) 4894 { 4895 kvm_inject_nmi(vcpu); 4896 4897 return 0; 4898 } 4899 4900 static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu, 4901 struct kvm_tpr_access_ctl *tac) 4902 { 4903 if (tac->flags) 4904 return -EINVAL; 4905 vcpu->arch.tpr_access_reporting = !!tac->enabled; 4906 return 0; 4907 } 4908 4909 static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu, 4910 u64 mcg_cap) 4911 { 4912 int r; 4913 unsigned bank_num = mcg_cap & 0xff, bank; 4914 4915 r = -EINVAL; 4916 if (!bank_num || bank_num > KVM_MAX_MCE_BANKS) 4917 goto out; 4918 if (mcg_cap & ~(kvm_caps.supported_mce_cap | 0xff | 0xff0000)) 4919 goto out; 4920 r = 0; 4921 vcpu->arch.mcg_cap = mcg_cap; 4922 /* Init IA32_MCG_CTL to all 1s */ 4923 if (mcg_cap & MCG_CTL_P) 4924 vcpu->arch.mcg_ctl = ~(u64)0; 4925 /* Init IA32_MCi_CTL to all 1s, IA32_MCi_CTL2 to all 0s */ 4926 for (bank = 0; bank < bank_num; bank++) { 4927 vcpu->arch.mce_banks[bank*4] = ~(u64)0; 4928 if (mcg_cap & MCG_CMCI_P) 4929 vcpu->arch.mci_ctl2_banks[bank] = 0; 4930 } 4931 4932 kvm_apic_after_set_mcg_cap(vcpu); 4933 4934 static_call(kvm_x86_setup_mce)(vcpu); 4935 out: 4936 return r; 4937 } 4938 4939 /* 4940 * Validate this is an UCNA (uncorrectable no action) error by checking the 4941 * MCG_STATUS and MCi_STATUS registers: 4942 * - none of the bits for Machine Check Exceptions are set 4943 * - both the VAL (valid) and UC (uncorrectable) bits are set 4944 * MCI_STATUS_PCC - Processor Context Corrupted 4945 * MCI_STATUS_S - Signaled as a Machine Check Exception 4946 * MCI_STATUS_AR - Software recoverable Action Required 4947 */ 4948 static bool is_ucna(struct kvm_x86_mce *mce) 4949 { 4950 return !mce->mcg_status && 4951 !(mce->status & (MCI_STATUS_PCC | MCI_STATUS_S | MCI_STATUS_AR)) && 4952 (mce->status & MCI_STATUS_VAL) && 4953 (mce->status & MCI_STATUS_UC); 4954 } 4955 4956 static int kvm_vcpu_x86_set_ucna(struct kvm_vcpu *vcpu, struct kvm_x86_mce *mce, u64* banks) 4957 { 4958 u64 mcg_cap = vcpu->arch.mcg_cap; 4959 4960 banks[1] = mce->status; 4961 banks[2] = mce->addr; 4962 banks[3] = mce->misc; 4963 vcpu->arch.mcg_status = mce->mcg_status; 4964 4965 if (!(mcg_cap & MCG_CMCI_P) || 4966 !(vcpu->arch.mci_ctl2_banks[mce->bank] & MCI_CTL2_CMCI_EN)) 4967 return 0; 4968 4969 if (lapic_in_kernel(vcpu)) 4970 kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTCMCI); 4971 4972 return 0; 4973 } 4974 4975 static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu, 4976 struct kvm_x86_mce *mce) 4977 { 4978 u64 mcg_cap = vcpu->arch.mcg_cap; 4979 unsigned bank_num = mcg_cap & 0xff; 4980 u64 *banks = vcpu->arch.mce_banks; 4981 4982 if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL)) 4983 return -EINVAL; 4984 4985 banks += array_index_nospec(4 * mce->bank, 4 * bank_num); 4986 4987 if (is_ucna(mce)) 4988 return kvm_vcpu_x86_set_ucna(vcpu, mce, banks); 4989 4990 /* 4991 * if IA32_MCG_CTL is not all 1s, the uncorrected error 4992 * reporting is disabled 4993 */ 4994 if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) && 4995 vcpu->arch.mcg_ctl != ~(u64)0) 4996 return 0; 4997 /* 4998 * if IA32_MCi_CTL is not all 1s, the uncorrected error 4999 * reporting is disabled for the bank 5000 */ 5001 if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0) 5002 return 0; 5003 if (mce->status & MCI_STATUS_UC) { 5004 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) || 5005 !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) { 5006 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 5007 return 0; 5008 } 5009 if (banks[1] & MCI_STATUS_VAL) 5010 mce->status |= MCI_STATUS_OVER; 5011 banks[2] = mce->addr; 5012 banks[3] = mce->misc; 5013 vcpu->arch.mcg_status = mce->mcg_status; 5014 banks[1] = mce->status; 5015 kvm_queue_exception(vcpu, MC_VECTOR); 5016 } else if (!(banks[1] & MCI_STATUS_VAL) 5017 || !(banks[1] & MCI_STATUS_UC)) { 5018 if (banks[1] & MCI_STATUS_VAL) 5019 mce->status |= MCI_STATUS_OVER; 5020 banks[2] = mce->addr; 5021 banks[3] = mce->misc; 5022 banks[1] = mce->status; 5023 } else 5024 banks[1] |= MCI_STATUS_OVER; 5025 return 0; 5026 } 5027 5028 static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, 5029 struct kvm_vcpu_events *events) 5030 { 5031 struct kvm_queued_exception *ex; 5032 5033 process_nmi(vcpu); 5034 5035 #ifdef CONFIG_KVM_SMM 5036 if (kvm_check_request(KVM_REQ_SMI, vcpu)) 5037 process_smi(vcpu); 5038 #endif 5039 5040 /* 5041 * KVM's ABI only allows for one exception to be migrated. Luckily, 5042 * the only time there can be two queued exceptions is if there's a 5043 * non-exiting _injected_ exception, and a pending exiting exception. 5044 * In that case, ignore the VM-Exiting exception as it's an extension 5045 * of the injected exception. 5046 */ 5047 if (vcpu->arch.exception_vmexit.pending && 5048 !vcpu->arch.exception.pending && 5049 !vcpu->arch.exception.injected) 5050 ex = &vcpu->arch.exception_vmexit; 5051 else 5052 ex = &vcpu->arch.exception; 5053 5054 /* 5055 * In guest mode, payload delivery should be deferred if the exception 5056 * will be intercepted by L1, e.g. KVM should not modifying CR2 if L1 5057 * intercepts #PF, ditto for DR6 and #DBs. If the per-VM capability, 5058 * KVM_CAP_EXCEPTION_PAYLOAD, is not set, userspace may or may not 5059 * propagate the payload and so it cannot be safely deferred. Deliver 5060 * the payload if the capability hasn't been requested. 5061 */ 5062 if (!vcpu->kvm->arch.exception_payload_enabled && 5063 ex->pending && ex->has_payload) 5064 kvm_deliver_exception_payload(vcpu, ex); 5065 5066 memset(events, 0, sizeof(*events)); 5067 5068 /* 5069 * The API doesn't provide the instruction length for software 5070 * exceptions, so don't report them. As long as the guest RIP 5071 * isn't advanced, we should expect to encounter the exception 5072 * again. 5073 */ 5074 if (!kvm_exception_is_soft(ex->vector)) { 5075 events->exception.injected = ex->injected; 5076 events->exception.pending = ex->pending; 5077 /* 5078 * For ABI compatibility, deliberately conflate 5079 * pending and injected exceptions when 5080 * KVM_CAP_EXCEPTION_PAYLOAD isn't enabled. 5081 */ 5082 if (!vcpu->kvm->arch.exception_payload_enabled) 5083 events->exception.injected |= ex->pending; 5084 } 5085 events->exception.nr = ex->vector; 5086 events->exception.has_error_code = ex->has_error_code; 5087 events->exception.error_code = ex->error_code; 5088 events->exception_has_payload = ex->has_payload; 5089 events->exception_payload = ex->payload; 5090 5091 events->interrupt.injected = 5092 vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft; 5093 events->interrupt.nr = vcpu->arch.interrupt.nr; 5094 events->interrupt.shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu); 5095 5096 events->nmi.injected = vcpu->arch.nmi_injected; 5097 events->nmi.pending = vcpu->arch.nmi_pending != 0; 5098 events->nmi.masked = static_call(kvm_x86_get_nmi_mask)(vcpu); 5099 5100 /* events->sipi_vector is never valid when reporting to user space */ 5101 5102 #ifdef CONFIG_KVM_SMM 5103 events->smi.smm = is_smm(vcpu); 5104 events->smi.pending = vcpu->arch.smi_pending; 5105 events->smi.smm_inside_nmi = 5106 !!(vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK); 5107 #endif 5108 events->smi.latched_init = kvm_lapic_latched_init(vcpu); 5109 5110 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING 5111 | KVM_VCPUEVENT_VALID_SHADOW 5112 | KVM_VCPUEVENT_VALID_SMM); 5113 if (vcpu->kvm->arch.exception_payload_enabled) 5114 events->flags |= KVM_VCPUEVENT_VALID_PAYLOAD; 5115 if (vcpu->kvm->arch.triple_fault_event) { 5116 events->triple_fault.pending = kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu); 5117 events->flags |= KVM_VCPUEVENT_VALID_TRIPLE_FAULT; 5118 } 5119 } 5120 5121 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, 5122 struct kvm_vcpu_events *events) 5123 { 5124 if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING 5125 | KVM_VCPUEVENT_VALID_SIPI_VECTOR 5126 | KVM_VCPUEVENT_VALID_SHADOW 5127 | KVM_VCPUEVENT_VALID_SMM 5128 | KVM_VCPUEVENT_VALID_PAYLOAD 5129 | KVM_VCPUEVENT_VALID_TRIPLE_FAULT)) 5130 return -EINVAL; 5131 5132 if (events->flags & KVM_VCPUEVENT_VALID_PAYLOAD) { 5133 if (!vcpu->kvm->arch.exception_payload_enabled) 5134 return -EINVAL; 5135 if (events->exception.pending) 5136 events->exception.injected = 0; 5137 else 5138 events->exception_has_payload = 0; 5139 } else { 5140 events->exception.pending = 0; 5141 events->exception_has_payload = 0; 5142 } 5143 5144 if ((events->exception.injected || events->exception.pending) && 5145 (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR)) 5146 return -EINVAL; 5147 5148 /* INITs are latched while in SMM */ 5149 if (events->flags & KVM_VCPUEVENT_VALID_SMM && 5150 (events->smi.smm || events->smi.pending) && 5151 vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) 5152 return -EINVAL; 5153 5154 process_nmi(vcpu); 5155 5156 /* 5157 * Flag that userspace is stuffing an exception, the next KVM_RUN will 5158 * morph the exception to a VM-Exit if appropriate. Do this only for 5159 * pending exceptions, already-injected exceptions are not subject to 5160 * intercpetion. Note, userspace that conflates pending and injected 5161 * is hosed, and will incorrectly convert an injected exception into a 5162 * pending exception, which in turn may cause a spurious VM-Exit. 5163 */ 5164 vcpu->arch.exception_from_userspace = events->exception.pending; 5165 5166 vcpu->arch.exception_vmexit.pending = false; 5167 5168 vcpu->arch.exception.injected = events->exception.injected; 5169 vcpu->arch.exception.pending = events->exception.pending; 5170 vcpu->arch.exception.vector = events->exception.nr; 5171 vcpu->arch.exception.has_error_code = events->exception.has_error_code; 5172 vcpu->arch.exception.error_code = events->exception.error_code; 5173 vcpu->arch.exception.has_payload = events->exception_has_payload; 5174 vcpu->arch.exception.payload = events->exception_payload; 5175 5176 vcpu->arch.interrupt.injected = events->interrupt.injected; 5177 vcpu->arch.interrupt.nr = events->interrupt.nr; 5178 vcpu->arch.interrupt.soft = events->interrupt.soft; 5179 if (events->flags & KVM_VCPUEVENT_VALID_SHADOW) 5180 static_call(kvm_x86_set_interrupt_shadow)(vcpu, 5181 events->interrupt.shadow); 5182 5183 vcpu->arch.nmi_injected = events->nmi.injected; 5184 if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) 5185 vcpu->arch.nmi_pending = events->nmi.pending; 5186 static_call(kvm_x86_set_nmi_mask)(vcpu, events->nmi.masked); 5187 5188 if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR && 5189 lapic_in_kernel(vcpu)) 5190 vcpu->arch.apic->sipi_vector = events->sipi_vector; 5191 5192 if (events->flags & KVM_VCPUEVENT_VALID_SMM) { 5193 #ifdef CONFIG_KVM_SMM 5194 if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) { 5195 kvm_leave_nested(vcpu); 5196 kvm_smm_changed(vcpu, events->smi.smm); 5197 } 5198 5199 vcpu->arch.smi_pending = events->smi.pending; 5200 5201 if (events->smi.smm) { 5202 if (events->smi.smm_inside_nmi) 5203 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; 5204 else 5205 vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK; 5206 } 5207 5208 #else 5209 if (events->smi.smm || events->smi.pending || 5210 events->smi.smm_inside_nmi) 5211 return -EINVAL; 5212 #endif 5213 5214 if (lapic_in_kernel(vcpu)) { 5215 if (events->smi.latched_init) 5216 set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); 5217 else 5218 clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); 5219 } 5220 } 5221 5222 if (events->flags & KVM_VCPUEVENT_VALID_TRIPLE_FAULT) { 5223 if (!vcpu->kvm->arch.triple_fault_event) 5224 return -EINVAL; 5225 if (events->triple_fault.pending) 5226 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 5227 else 5228 kvm_clear_request(KVM_REQ_TRIPLE_FAULT, vcpu); 5229 } 5230 5231 kvm_make_request(KVM_REQ_EVENT, vcpu); 5232 5233 return 0; 5234 } 5235 5236 static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu, 5237 struct kvm_debugregs *dbgregs) 5238 { 5239 unsigned long val; 5240 5241 memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db)); 5242 kvm_get_dr(vcpu, 6, &val); 5243 dbgregs->dr6 = val; 5244 dbgregs->dr7 = vcpu->arch.dr7; 5245 dbgregs->flags = 0; 5246 memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved)); 5247 } 5248 5249 static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, 5250 struct kvm_debugregs *dbgregs) 5251 { 5252 if (dbgregs->flags) 5253 return -EINVAL; 5254 5255 if (!kvm_dr6_valid(dbgregs->dr6)) 5256 return -EINVAL; 5257 if (!kvm_dr7_valid(dbgregs->dr7)) 5258 return -EINVAL; 5259 5260 memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); 5261 kvm_update_dr0123(vcpu); 5262 vcpu->arch.dr6 = dbgregs->dr6; 5263 vcpu->arch.dr7 = dbgregs->dr7; 5264 kvm_update_dr7(vcpu); 5265 5266 return 0; 5267 } 5268 5269 static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, 5270 struct kvm_xsave *guest_xsave) 5271 { 5272 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) 5273 return; 5274 5275 fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu, 5276 guest_xsave->region, 5277 sizeof(guest_xsave->region), 5278 vcpu->arch.pkru); 5279 } 5280 5281 static void kvm_vcpu_ioctl_x86_get_xsave2(struct kvm_vcpu *vcpu, 5282 u8 *state, unsigned int size) 5283 { 5284 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) 5285 return; 5286 5287 fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu, 5288 state, size, vcpu->arch.pkru); 5289 } 5290 5291 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, 5292 struct kvm_xsave *guest_xsave) 5293 { 5294 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) 5295 return 0; 5296 5297 return fpu_copy_uabi_to_guest_fpstate(&vcpu->arch.guest_fpu, 5298 guest_xsave->region, 5299 kvm_caps.supported_xcr0, 5300 &vcpu->arch.pkru); 5301 } 5302 5303 static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu, 5304 struct kvm_xcrs *guest_xcrs) 5305 { 5306 if (!boot_cpu_has(X86_FEATURE_XSAVE)) { 5307 guest_xcrs->nr_xcrs = 0; 5308 return; 5309 } 5310 5311 guest_xcrs->nr_xcrs = 1; 5312 guest_xcrs->flags = 0; 5313 guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK; 5314 guest_xcrs->xcrs[0].value = vcpu->arch.xcr0; 5315 } 5316 5317 static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu, 5318 struct kvm_xcrs *guest_xcrs) 5319 { 5320 int i, r = 0; 5321 5322 if (!boot_cpu_has(X86_FEATURE_XSAVE)) 5323 return -EINVAL; 5324 5325 if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags) 5326 return -EINVAL; 5327 5328 for (i = 0; i < guest_xcrs->nr_xcrs; i++) 5329 /* Only support XCR0 currently */ 5330 if (guest_xcrs->xcrs[i].xcr == XCR_XFEATURE_ENABLED_MASK) { 5331 r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK, 5332 guest_xcrs->xcrs[i].value); 5333 break; 5334 } 5335 if (r) 5336 r = -EINVAL; 5337 return r; 5338 } 5339 5340 /* 5341 * kvm_set_guest_paused() indicates to the guest kernel that it has been 5342 * stopped by the hypervisor. This function will be called from the host only. 5343 * EINVAL is returned when the host attempts to set the flag for a guest that 5344 * does not support pv clocks. 5345 */ 5346 static int kvm_set_guest_paused(struct kvm_vcpu *vcpu) 5347 { 5348 if (!vcpu->arch.pv_time.active) 5349 return -EINVAL; 5350 vcpu->arch.pvclock_set_guest_stopped_request = true; 5351 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 5352 return 0; 5353 } 5354 5355 static int kvm_arch_tsc_has_attr(struct kvm_vcpu *vcpu, 5356 struct kvm_device_attr *attr) 5357 { 5358 int r; 5359 5360 switch (attr->attr) { 5361 case KVM_VCPU_TSC_OFFSET: 5362 r = 0; 5363 break; 5364 default: 5365 r = -ENXIO; 5366 } 5367 5368 return r; 5369 } 5370 5371 static int kvm_arch_tsc_get_attr(struct kvm_vcpu *vcpu, 5372 struct kvm_device_attr *attr) 5373 { 5374 u64 __user *uaddr = kvm_get_attr_addr(attr); 5375 int r; 5376 5377 if (IS_ERR(uaddr)) 5378 return PTR_ERR(uaddr); 5379 5380 switch (attr->attr) { 5381 case KVM_VCPU_TSC_OFFSET: 5382 r = -EFAULT; 5383 if (put_user(vcpu->arch.l1_tsc_offset, uaddr)) 5384 break; 5385 r = 0; 5386 break; 5387 default: 5388 r = -ENXIO; 5389 } 5390 5391 return r; 5392 } 5393 5394 static int kvm_arch_tsc_set_attr(struct kvm_vcpu *vcpu, 5395 struct kvm_device_attr *attr) 5396 { 5397 u64 __user *uaddr = kvm_get_attr_addr(attr); 5398 struct kvm *kvm = vcpu->kvm; 5399 int r; 5400 5401 if (IS_ERR(uaddr)) 5402 return PTR_ERR(uaddr); 5403 5404 switch (attr->attr) { 5405 case KVM_VCPU_TSC_OFFSET: { 5406 u64 offset, tsc, ns; 5407 unsigned long flags; 5408 bool matched; 5409 5410 r = -EFAULT; 5411 if (get_user(offset, uaddr)) 5412 break; 5413 5414 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); 5415 5416 matched = (vcpu->arch.virtual_tsc_khz && 5417 kvm->arch.last_tsc_khz == vcpu->arch.virtual_tsc_khz && 5418 kvm->arch.last_tsc_offset == offset); 5419 5420 tsc = kvm_scale_tsc(rdtsc(), vcpu->arch.l1_tsc_scaling_ratio) + offset; 5421 ns = get_kvmclock_base_ns(); 5422 5423 __kvm_synchronize_tsc(vcpu, offset, tsc, ns, matched); 5424 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); 5425 5426 r = 0; 5427 break; 5428 } 5429 default: 5430 r = -ENXIO; 5431 } 5432 5433 return r; 5434 } 5435 5436 static int kvm_vcpu_ioctl_device_attr(struct kvm_vcpu *vcpu, 5437 unsigned int ioctl, 5438 void __user *argp) 5439 { 5440 struct kvm_device_attr attr; 5441 int r; 5442 5443 if (copy_from_user(&attr, argp, sizeof(attr))) 5444 return -EFAULT; 5445 5446 if (attr.group != KVM_VCPU_TSC_CTRL) 5447 return -ENXIO; 5448 5449 switch (ioctl) { 5450 case KVM_HAS_DEVICE_ATTR: 5451 r = kvm_arch_tsc_has_attr(vcpu, &attr); 5452 break; 5453 case KVM_GET_DEVICE_ATTR: 5454 r = kvm_arch_tsc_get_attr(vcpu, &attr); 5455 break; 5456 case KVM_SET_DEVICE_ATTR: 5457 r = kvm_arch_tsc_set_attr(vcpu, &attr); 5458 break; 5459 } 5460 5461 return r; 5462 } 5463 5464 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, 5465 struct kvm_enable_cap *cap) 5466 { 5467 int r; 5468 uint16_t vmcs_version; 5469 void __user *user_ptr; 5470 5471 if (cap->flags) 5472 return -EINVAL; 5473 5474 switch (cap->cap) { 5475 case KVM_CAP_HYPERV_SYNIC2: 5476 if (cap->args[0]) 5477 return -EINVAL; 5478 fallthrough; 5479 5480 case KVM_CAP_HYPERV_SYNIC: 5481 if (!irqchip_in_kernel(vcpu->kvm)) 5482 return -EINVAL; 5483 return kvm_hv_activate_synic(vcpu, cap->cap == 5484 KVM_CAP_HYPERV_SYNIC2); 5485 case KVM_CAP_HYPERV_ENLIGHTENED_VMCS: 5486 if (!kvm_x86_ops.nested_ops->enable_evmcs) 5487 return -ENOTTY; 5488 r = kvm_x86_ops.nested_ops->enable_evmcs(vcpu, &vmcs_version); 5489 if (!r) { 5490 user_ptr = (void __user *)(uintptr_t)cap->args[0]; 5491 if (copy_to_user(user_ptr, &vmcs_version, 5492 sizeof(vmcs_version))) 5493 r = -EFAULT; 5494 } 5495 return r; 5496 case KVM_CAP_HYPERV_DIRECT_TLBFLUSH: 5497 if (!kvm_x86_ops.enable_l2_tlb_flush) 5498 return -ENOTTY; 5499 5500 return static_call(kvm_x86_enable_l2_tlb_flush)(vcpu); 5501 5502 case KVM_CAP_HYPERV_ENFORCE_CPUID: 5503 return kvm_hv_set_enforce_cpuid(vcpu, cap->args[0]); 5504 5505 case KVM_CAP_ENFORCE_PV_FEATURE_CPUID: 5506 vcpu->arch.pv_cpuid.enforce = cap->args[0]; 5507 if (vcpu->arch.pv_cpuid.enforce) 5508 kvm_update_pv_runtime(vcpu); 5509 5510 return 0; 5511 default: 5512 return -EINVAL; 5513 } 5514 } 5515 5516 long kvm_arch_vcpu_ioctl(struct file *filp, 5517 unsigned int ioctl, unsigned long arg) 5518 { 5519 struct kvm_vcpu *vcpu = filp->private_data; 5520 void __user *argp = (void __user *)arg; 5521 int r; 5522 union { 5523 struct kvm_sregs2 *sregs2; 5524 struct kvm_lapic_state *lapic; 5525 struct kvm_xsave *xsave; 5526 struct kvm_xcrs *xcrs; 5527 void *buffer; 5528 } u; 5529 5530 vcpu_load(vcpu); 5531 5532 u.buffer = NULL; 5533 switch (ioctl) { 5534 case KVM_GET_LAPIC: { 5535 r = -EINVAL; 5536 if (!lapic_in_kernel(vcpu)) 5537 goto out; 5538 u.lapic = kzalloc(sizeof(struct kvm_lapic_state), 5539 GFP_KERNEL_ACCOUNT); 5540 5541 r = -ENOMEM; 5542 if (!u.lapic) 5543 goto out; 5544 r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic); 5545 if (r) 5546 goto out; 5547 r = -EFAULT; 5548 if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state))) 5549 goto out; 5550 r = 0; 5551 break; 5552 } 5553 case KVM_SET_LAPIC: { 5554 r = -EINVAL; 5555 if (!lapic_in_kernel(vcpu)) 5556 goto out; 5557 u.lapic = memdup_user(argp, sizeof(*u.lapic)); 5558 if (IS_ERR(u.lapic)) { 5559 r = PTR_ERR(u.lapic); 5560 goto out_nofree; 5561 } 5562 5563 r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic); 5564 break; 5565 } 5566 case KVM_INTERRUPT: { 5567 struct kvm_interrupt irq; 5568 5569 r = -EFAULT; 5570 if (copy_from_user(&irq, argp, sizeof(irq))) 5571 goto out; 5572 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); 5573 break; 5574 } 5575 case KVM_NMI: { 5576 r = kvm_vcpu_ioctl_nmi(vcpu); 5577 break; 5578 } 5579 case KVM_SMI: { 5580 r = kvm_inject_smi(vcpu); 5581 break; 5582 } 5583 case KVM_SET_CPUID: { 5584 struct kvm_cpuid __user *cpuid_arg = argp; 5585 struct kvm_cpuid cpuid; 5586 5587 r = -EFAULT; 5588 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) 5589 goto out; 5590 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries); 5591 break; 5592 } 5593 case KVM_SET_CPUID2: { 5594 struct kvm_cpuid2 __user *cpuid_arg = argp; 5595 struct kvm_cpuid2 cpuid; 5596 5597 r = -EFAULT; 5598 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) 5599 goto out; 5600 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid, 5601 cpuid_arg->entries); 5602 break; 5603 } 5604 case KVM_GET_CPUID2: { 5605 struct kvm_cpuid2 __user *cpuid_arg = argp; 5606 struct kvm_cpuid2 cpuid; 5607 5608 r = -EFAULT; 5609 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) 5610 goto out; 5611 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid, 5612 cpuid_arg->entries); 5613 if (r) 5614 goto out; 5615 r = -EFAULT; 5616 if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid))) 5617 goto out; 5618 r = 0; 5619 break; 5620 } 5621 case KVM_GET_MSRS: { 5622 int idx = srcu_read_lock(&vcpu->kvm->srcu); 5623 r = msr_io(vcpu, argp, do_get_msr, 1); 5624 srcu_read_unlock(&vcpu->kvm->srcu, idx); 5625 break; 5626 } 5627 case KVM_SET_MSRS: { 5628 int idx = srcu_read_lock(&vcpu->kvm->srcu); 5629 r = msr_io(vcpu, argp, do_set_msr, 0); 5630 srcu_read_unlock(&vcpu->kvm->srcu, idx); 5631 break; 5632 } 5633 case KVM_TPR_ACCESS_REPORTING: { 5634 struct kvm_tpr_access_ctl tac; 5635 5636 r = -EFAULT; 5637 if (copy_from_user(&tac, argp, sizeof(tac))) 5638 goto out; 5639 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac); 5640 if (r) 5641 goto out; 5642 r = -EFAULT; 5643 if (copy_to_user(argp, &tac, sizeof(tac))) 5644 goto out; 5645 r = 0; 5646 break; 5647 }; 5648 case KVM_SET_VAPIC_ADDR: { 5649 struct kvm_vapic_addr va; 5650 int idx; 5651 5652 r = -EINVAL; 5653 if (!lapic_in_kernel(vcpu)) 5654 goto out; 5655 r = -EFAULT; 5656 if (copy_from_user(&va, argp, sizeof(va))) 5657 goto out; 5658 idx = srcu_read_lock(&vcpu->kvm->srcu); 5659 r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); 5660 srcu_read_unlock(&vcpu->kvm->srcu, idx); 5661 break; 5662 } 5663 case KVM_X86_SETUP_MCE: { 5664 u64 mcg_cap; 5665 5666 r = -EFAULT; 5667 if (copy_from_user(&mcg_cap, argp, sizeof(mcg_cap))) 5668 goto out; 5669 r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap); 5670 break; 5671 } 5672 case KVM_X86_SET_MCE: { 5673 struct kvm_x86_mce mce; 5674 5675 r = -EFAULT; 5676 if (copy_from_user(&mce, argp, sizeof(mce))) 5677 goto out; 5678 r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce); 5679 break; 5680 } 5681 case KVM_GET_VCPU_EVENTS: { 5682 struct kvm_vcpu_events events; 5683 5684 kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events); 5685 5686 r = -EFAULT; 5687 if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events))) 5688 break; 5689 r = 0; 5690 break; 5691 } 5692 case KVM_SET_VCPU_EVENTS: { 5693 struct kvm_vcpu_events events; 5694 5695 r = -EFAULT; 5696 if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events))) 5697 break; 5698 5699 r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events); 5700 break; 5701 } 5702 case KVM_GET_DEBUGREGS: { 5703 struct kvm_debugregs dbgregs; 5704 5705 kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs); 5706 5707 r = -EFAULT; 5708 if (copy_to_user(argp, &dbgregs, 5709 sizeof(struct kvm_debugregs))) 5710 break; 5711 r = 0; 5712 break; 5713 } 5714 case KVM_SET_DEBUGREGS: { 5715 struct kvm_debugregs dbgregs; 5716 5717 r = -EFAULT; 5718 if (copy_from_user(&dbgregs, argp, 5719 sizeof(struct kvm_debugregs))) 5720 break; 5721 5722 r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs); 5723 break; 5724 } 5725 case KVM_GET_XSAVE: { 5726 r = -EINVAL; 5727 if (vcpu->arch.guest_fpu.uabi_size > sizeof(struct kvm_xsave)) 5728 break; 5729 5730 u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL_ACCOUNT); 5731 r = -ENOMEM; 5732 if (!u.xsave) 5733 break; 5734 5735 kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave); 5736 5737 r = -EFAULT; 5738 if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave))) 5739 break; 5740 r = 0; 5741 break; 5742 } 5743 case KVM_SET_XSAVE: { 5744 int size = vcpu->arch.guest_fpu.uabi_size; 5745 5746 u.xsave = memdup_user(argp, size); 5747 if (IS_ERR(u.xsave)) { 5748 r = PTR_ERR(u.xsave); 5749 goto out_nofree; 5750 } 5751 5752 r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave); 5753 break; 5754 } 5755 5756 case KVM_GET_XSAVE2: { 5757 int size = vcpu->arch.guest_fpu.uabi_size; 5758 5759 u.xsave = kzalloc(size, GFP_KERNEL_ACCOUNT); 5760 r = -ENOMEM; 5761 if (!u.xsave) 5762 break; 5763 5764 kvm_vcpu_ioctl_x86_get_xsave2(vcpu, u.buffer, size); 5765 5766 r = -EFAULT; 5767 if (copy_to_user(argp, u.xsave, size)) 5768 break; 5769 5770 r = 0; 5771 break; 5772 } 5773 5774 case KVM_GET_XCRS: { 5775 u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL_ACCOUNT); 5776 r = -ENOMEM; 5777 if (!u.xcrs) 5778 break; 5779 5780 kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs); 5781 5782 r = -EFAULT; 5783 if (copy_to_user(argp, u.xcrs, 5784 sizeof(struct kvm_xcrs))) 5785 break; 5786 r = 0; 5787 break; 5788 } 5789 case KVM_SET_XCRS: { 5790 u.xcrs = memdup_user(argp, sizeof(*u.xcrs)); 5791 if (IS_ERR(u.xcrs)) { 5792 r = PTR_ERR(u.xcrs); 5793 goto out_nofree; 5794 } 5795 5796 r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs); 5797 break; 5798 } 5799 case KVM_SET_TSC_KHZ: { 5800 u32 user_tsc_khz; 5801 5802 r = -EINVAL; 5803 user_tsc_khz = (u32)arg; 5804 5805 if (kvm_caps.has_tsc_control && 5806 user_tsc_khz >= kvm_caps.max_guest_tsc_khz) 5807 goto out; 5808 5809 if (user_tsc_khz == 0) 5810 user_tsc_khz = tsc_khz; 5811 5812 if (!kvm_set_tsc_khz(vcpu, user_tsc_khz)) 5813 r = 0; 5814 5815 goto out; 5816 } 5817 case KVM_GET_TSC_KHZ: { 5818 r = vcpu->arch.virtual_tsc_khz; 5819 goto out; 5820 } 5821 case KVM_KVMCLOCK_CTRL: { 5822 r = kvm_set_guest_paused(vcpu); 5823 goto out; 5824 } 5825 case KVM_ENABLE_CAP: { 5826 struct kvm_enable_cap cap; 5827 5828 r = -EFAULT; 5829 if (copy_from_user(&cap, argp, sizeof(cap))) 5830 goto out; 5831 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); 5832 break; 5833 } 5834 case KVM_GET_NESTED_STATE: { 5835 struct kvm_nested_state __user *user_kvm_nested_state = argp; 5836 u32 user_data_size; 5837 5838 r = -EINVAL; 5839 if (!kvm_x86_ops.nested_ops->get_state) 5840 break; 5841 5842 BUILD_BUG_ON(sizeof(user_data_size) != sizeof(user_kvm_nested_state->size)); 5843 r = -EFAULT; 5844 if (get_user(user_data_size, &user_kvm_nested_state->size)) 5845 break; 5846 5847 r = kvm_x86_ops.nested_ops->get_state(vcpu, user_kvm_nested_state, 5848 user_data_size); 5849 if (r < 0) 5850 break; 5851 5852 if (r > user_data_size) { 5853 if (put_user(r, &user_kvm_nested_state->size)) 5854 r = -EFAULT; 5855 else 5856 r = -E2BIG; 5857 break; 5858 } 5859 5860 r = 0; 5861 break; 5862 } 5863 case KVM_SET_NESTED_STATE: { 5864 struct kvm_nested_state __user *user_kvm_nested_state = argp; 5865 struct kvm_nested_state kvm_state; 5866 int idx; 5867 5868 r = -EINVAL; 5869 if (!kvm_x86_ops.nested_ops->set_state) 5870 break; 5871 5872 r = -EFAULT; 5873 if (copy_from_user(&kvm_state, user_kvm_nested_state, sizeof(kvm_state))) 5874 break; 5875 5876 r = -EINVAL; 5877 if (kvm_state.size < sizeof(kvm_state)) 5878 break; 5879 5880 if (kvm_state.flags & 5881 ~(KVM_STATE_NESTED_RUN_PENDING | KVM_STATE_NESTED_GUEST_MODE 5882 | KVM_STATE_NESTED_EVMCS | KVM_STATE_NESTED_MTF_PENDING 5883 | KVM_STATE_NESTED_GIF_SET)) 5884 break; 5885 5886 /* nested_run_pending implies guest_mode. */ 5887 if ((kvm_state.flags & KVM_STATE_NESTED_RUN_PENDING) 5888 && !(kvm_state.flags & KVM_STATE_NESTED_GUEST_MODE)) 5889 break; 5890 5891 idx = srcu_read_lock(&vcpu->kvm->srcu); 5892 r = kvm_x86_ops.nested_ops->set_state(vcpu, user_kvm_nested_state, &kvm_state); 5893 srcu_read_unlock(&vcpu->kvm->srcu, idx); 5894 break; 5895 } 5896 case KVM_GET_SUPPORTED_HV_CPUID: 5897 r = kvm_ioctl_get_supported_hv_cpuid(vcpu, argp); 5898 break; 5899 #ifdef CONFIG_KVM_XEN 5900 case KVM_XEN_VCPU_GET_ATTR: { 5901 struct kvm_xen_vcpu_attr xva; 5902 5903 r = -EFAULT; 5904 if (copy_from_user(&xva, argp, sizeof(xva))) 5905 goto out; 5906 r = kvm_xen_vcpu_get_attr(vcpu, &xva); 5907 if (!r && copy_to_user(argp, &xva, sizeof(xva))) 5908 r = -EFAULT; 5909 break; 5910 } 5911 case KVM_XEN_VCPU_SET_ATTR: { 5912 struct kvm_xen_vcpu_attr xva; 5913 5914 r = -EFAULT; 5915 if (copy_from_user(&xva, argp, sizeof(xva))) 5916 goto out; 5917 r = kvm_xen_vcpu_set_attr(vcpu, &xva); 5918 break; 5919 } 5920 #endif 5921 case KVM_GET_SREGS2: { 5922 u.sregs2 = kzalloc(sizeof(struct kvm_sregs2), GFP_KERNEL); 5923 r = -ENOMEM; 5924 if (!u.sregs2) 5925 goto out; 5926 __get_sregs2(vcpu, u.sregs2); 5927 r = -EFAULT; 5928 if (copy_to_user(argp, u.sregs2, sizeof(struct kvm_sregs2))) 5929 goto out; 5930 r = 0; 5931 break; 5932 } 5933 case KVM_SET_SREGS2: { 5934 u.sregs2 = memdup_user(argp, sizeof(struct kvm_sregs2)); 5935 if (IS_ERR(u.sregs2)) { 5936 r = PTR_ERR(u.sregs2); 5937 u.sregs2 = NULL; 5938 goto out; 5939 } 5940 r = __set_sregs2(vcpu, u.sregs2); 5941 break; 5942 } 5943 case KVM_HAS_DEVICE_ATTR: 5944 case KVM_GET_DEVICE_ATTR: 5945 case KVM_SET_DEVICE_ATTR: 5946 r = kvm_vcpu_ioctl_device_attr(vcpu, ioctl, argp); 5947 break; 5948 default: 5949 r = -EINVAL; 5950 } 5951 out: 5952 kfree(u.buffer); 5953 out_nofree: 5954 vcpu_put(vcpu); 5955 return r; 5956 } 5957 5958 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) 5959 { 5960 return VM_FAULT_SIGBUS; 5961 } 5962 5963 static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr) 5964 { 5965 int ret; 5966 5967 if (addr > (unsigned int)(-3 * PAGE_SIZE)) 5968 return -EINVAL; 5969 ret = static_call(kvm_x86_set_tss_addr)(kvm, addr); 5970 return ret; 5971 } 5972 5973 static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm, 5974 u64 ident_addr) 5975 { 5976 return static_call(kvm_x86_set_identity_map_addr)(kvm, ident_addr); 5977 } 5978 5979 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, 5980 unsigned long kvm_nr_mmu_pages) 5981 { 5982 if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES) 5983 return -EINVAL; 5984 5985 mutex_lock(&kvm->slots_lock); 5986 5987 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages); 5988 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; 5989 5990 mutex_unlock(&kvm->slots_lock); 5991 return 0; 5992 } 5993 5994 static unsigned long kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm) 5995 { 5996 return kvm->arch.n_max_mmu_pages; 5997 } 5998 5999 static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) 6000 { 6001 struct kvm_pic *pic = kvm->arch.vpic; 6002 int r; 6003 6004 r = 0; 6005 switch (chip->chip_id) { 6006 case KVM_IRQCHIP_PIC_MASTER: 6007 memcpy(&chip->chip.pic, &pic->pics[0], 6008 sizeof(struct kvm_pic_state)); 6009 break; 6010 case KVM_IRQCHIP_PIC_SLAVE: 6011 memcpy(&chip->chip.pic, &pic->pics[1], 6012 sizeof(struct kvm_pic_state)); 6013 break; 6014 case KVM_IRQCHIP_IOAPIC: 6015 kvm_get_ioapic(kvm, &chip->chip.ioapic); 6016 break; 6017 default: 6018 r = -EINVAL; 6019 break; 6020 } 6021 return r; 6022 } 6023 6024 static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) 6025 { 6026 struct kvm_pic *pic = kvm->arch.vpic; 6027 int r; 6028 6029 r = 0; 6030 switch (chip->chip_id) { 6031 case KVM_IRQCHIP_PIC_MASTER: 6032 spin_lock(&pic->lock); 6033 memcpy(&pic->pics[0], &chip->chip.pic, 6034 sizeof(struct kvm_pic_state)); 6035 spin_unlock(&pic->lock); 6036 break; 6037 case KVM_IRQCHIP_PIC_SLAVE: 6038 spin_lock(&pic->lock); 6039 memcpy(&pic->pics[1], &chip->chip.pic, 6040 sizeof(struct kvm_pic_state)); 6041 spin_unlock(&pic->lock); 6042 break; 6043 case KVM_IRQCHIP_IOAPIC: 6044 kvm_set_ioapic(kvm, &chip->chip.ioapic); 6045 break; 6046 default: 6047 r = -EINVAL; 6048 break; 6049 } 6050 kvm_pic_update_irq(pic); 6051 return r; 6052 } 6053 6054 static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps) 6055 { 6056 struct kvm_kpit_state *kps = &kvm->arch.vpit->pit_state; 6057 6058 BUILD_BUG_ON(sizeof(*ps) != sizeof(kps->channels)); 6059 6060 mutex_lock(&kps->lock); 6061 memcpy(ps, &kps->channels, sizeof(*ps)); 6062 mutex_unlock(&kps->lock); 6063 return 0; 6064 } 6065 6066 static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps) 6067 { 6068 int i; 6069 struct kvm_pit *pit = kvm->arch.vpit; 6070 6071 mutex_lock(&pit->pit_state.lock); 6072 memcpy(&pit->pit_state.channels, ps, sizeof(*ps)); 6073 for (i = 0; i < 3; i++) 6074 kvm_pit_load_count(pit, i, ps->channels[i].count, 0); 6075 mutex_unlock(&pit->pit_state.lock); 6076 return 0; 6077 } 6078 6079 static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) 6080 { 6081 mutex_lock(&kvm->arch.vpit->pit_state.lock); 6082 memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels, 6083 sizeof(ps->channels)); 6084 ps->flags = kvm->arch.vpit->pit_state.flags; 6085 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 6086 memset(&ps->reserved, 0, sizeof(ps->reserved)); 6087 return 0; 6088 } 6089 6090 static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) 6091 { 6092 int start = 0; 6093 int i; 6094 u32 prev_legacy, cur_legacy; 6095 struct kvm_pit *pit = kvm->arch.vpit; 6096 6097 mutex_lock(&pit->pit_state.lock); 6098 prev_legacy = pit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY; 6099 cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY; 6100 if (!prev_legacy && cur_legacy) 6101 start = 1; 6102 memcpy(&pit->pit_state.channels, &ps->channels, 6103 sizeof(pit->pit_state.channels)); 6104 pit->pit_state.flags = ps->flags; 6105 for (i = 0; i < 3; i++) 6106 kvm_pit_load_count(pit, i, pit->pit_state.channels[i].count, 6107 start && i == 0); 6108 mutex_unlock(&pit->pit_state.lock); 6109 return 0; 6110 } 6111 6112 static int kvm_vm_ioctl_reinject(struct kvm *kvm, 6113 struct kvm_reinject_control *control) 6114 { 6115 struct kvm_pit *pit = kvm->arch.vpit; 6116 6117 /* pit->pit_state.lock was overloaded to prevent userspace from getting 6118 * an inconsistent state after running multiple KVM_REINJECT_CONTROL 6119 * ioctls in parallel. Use a separate lock if that ioctl isn't rare. 6120 */ 6121 mutex_lock(&pit->pit_state.lock); 6122 kvm_pit_set_reinject(pit, control->pit_reinject); 6123 mutex_unlock(&pit->pit_state.lock); 6124 6125 return 0; 6126 } 6127 6128 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) 6129 { 6130 6131 /* 6132 * Flush all CPUs' dirty log buffers to the dirty_bitmap. Called 6133 * before reporting dirty_bitmap to userspace. KVM flushes the buffers 6134 * on all VM-Exits, thus we only need to kick running vCPUs to force a 6135 * VM-Exit. 6136 */ 6137 struct kvm_vcpu *vcpu; 6138 unsigned long i; 6139 6140 kvm_for_each_vcpu(i, vcpu, kvm) 6141 kvm_vcpu_kick(vcpu); 6142 } 6143 6144 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, 6145 bool line_status) 6146 { 6147 if (!irqchip_in_kernel(kvm)) 6148 return -ENXIO; 6149 6150 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 6151 irq_event->irq, irq_event->level, 6152 line_status); 6153 return 0; 6154 } 6155 6156 int kvm_vm_ioctl_enable_cap(struct kvm *kvm, 6157 struct kvm_enable_cap *cap) 6158 { 6159 int r; 6160 6161 if (cap->flags) 6162 return -EINVAL; 6163 6164 switch (cap->cap) { 6165 case KVM_CAP_DISABLE_QUIRKS2: 6166 r = -EINVAL; 6167 if (cap->args[0] & ~KVM_X86_VALID_QUIRKS) 6168 break; 6169 fallthrough; 6170 case KVM_CAP_DISABLE_QUIRKS: 6171 kvm->arch.disabled_quirks = cap->args[0]; 6172 r = 0; 6173 break; 6174 case KVM_CAP_SPLIT_IRQCHIP: { 6175 mutex_lock(&kvm->lock); 6176 r = -EINVAL; 6177 if (cap->args[0] > MAX_NR_RESERVED_IOAPIC_PINS) 6178 goto split_irqchip_unlock; 6179 r = -EEXIST; 6180 if (irqchip_in_kernel(kvm)) 6181 goto split_irqchip_unlock; 6182 if (kvm->created_vcpus) 6183 goto split_irqchip_unlock; 6184 r = kvm_setup_empty_irq_routing(kvm); 6185 if (r) 6186 goto split_irqchip_unlock; 6187 /* Pairs with irqchip_in_kernel. */ 6188 smp_wmb(); 6189 kvm->arch.irqchip_mode = KVM_IRQCHIP_SPLIT; 6190 kvm->arch.nr_reserved_ioapic_pins = cap->args[0]; 6191 kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_ABSENT); 6192 r = 0; 6193 split_irqchip_unlock: 6194 mutex_unlock(&kvm->lock); 6195 break; 6196 } 6197 case KVM_CAP_X2APIC_API: 6198 r = -EINVAL; 6199 if (cap->args[0] & ~KVM_X2APIC_API_VALID_FLAGS) 6200 break; 6201 6202 if (cap->args[0] & KVM_X2APIC_API_USE_32BIT_IDS) 6203 kvm->arch.x2apic_format = true; 6204 if (cap->args[0] & KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK) 6205 kvm->arch.x2apic_broadcast_quirk_disabled = true; 6206 6207 r = 0; 6208 break; 6209 case KVM_CAP_X86_DISABLE_EXITS: 6210 r = -EINVAL; 6211 if (cap->args[0] & ~KVM_X86_DISABLE_VALID_EXITS) 6212 break; 6213 6214 if ((cap->args[0] & KVM_X86_DISABLE_EXITS_MWAIT) && 6215 kvm_can_mwait_in_guest()) 6216 kvm->arch.mwait_in_guest = true; 6217 if (cap->args[0] & KVM_X86_DISABLE_EXITS_HLT) 6218 kvm->arch.hlt_in_guest = true; 6219 if (cap->args[0] & KVM_X86_DISABLE_EXITS_PAUSE) 6220 kvm->arch.pause_in_guest = true; 6221 if (cap->args[0] & KVM_X86_DISABLE_EXITS_CSTATE) 6222 kvm->arch.cstate_in_guest = true; 6223 r = 0; 6224 break; 6225 case KVM_CAP_MSR_PLATFORM_INFO: 6226 kvm->arch.guest_can_read_msr_platform_info = cap->args[0]; 6227 r = 0; 6228 break; 6229 case KVM_CAP_EXCEPTION_PAYLOAD: 6230 kvm->arch.exception_payload_enabled = cap->args[0]; 6231 r = 0; 6232 break; 6233 case KVM_CAP_X86_TRIPLE_FAULT_EVENT: 6234 kvm->arch.triple_fault_event = cap->args[0]; 6235 r = 0; 6236 break; 6237 case KVM_CAP_X86_USER_SPACE_MSR: 6238 r = -EINVAL; 6239 if (cap->args[0] & ~KVM_MSR_EXIT_REASON_VALID_MASK) 6240 break; 6241 kvm->arch.user_space_msr_mask = cap->args[0]; 6242 r = 0; 6243 break; 6244 case KVM_CAP_X86_BUS_LOCK_EXIT: 6245 r = -EINVAL; 6246 if (cap->args[0] & ~KVM_BUS_LOCK_DETECTION_VALID_MODE) 6247 break; 6248 6249 if ((cap->args[0] & KVM_BUS_LOCK_DETECTION_OFF) && 6250 (cap->args[0] & KVM_BUS_LOCK_DETECTION_EXIT)) 6251 break; 6252 6253 if (kvm_caps.has_bus_lock_exit && 6254 cap->args[0] & KVM_BUS_LOCK_DETECTION_EXIT) 6255 kvm->arch.bus_lock_detection_enabled = true; 6256 r = 0; 6257 break; 6258 #ifdef CONFIG_X86_SGX_KVM 6259 case KVM_CAP_SGX_ATTRIBUTE: { 6260 unsigned long allowed_attributes = 0; 6261 6262 r = sgx_set_attribute(&allowed_attributes, cap->args[0]); 6263 if (r) 6264 break; 6265 6266 /* KVM only supports the PROVISIONKEY privileged attribute. */ 6267 if ((allowed_attributes & SGX_ATTR_PROVISIONKEY) && 6268 !(allowed_attributes & ~SGX_ATTR_PROVISIONKEY)) 6269 kvm->arch.sgx_provisioning_allowed = true; 6270 else 6271 r = -EINVAL; 6272 break; 6273 } 6274 #endif 6275 case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM: 6276 r = -EINVAL; 6277 if (!kvm_x86_ops.vm_copy_enc_context_from) 6278 break; 6279 6280 r = static_call(kvm_x86_vm_copy_enc_context_from)(kvm, cap->args[0]); 6281 break; 6282 case KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM: 6283 r = -EINVAL; 6284 if (!kvm_x86_ops.vm_move_enc_context_from) 6285 break; 6286 6287 r = static_call(kvm_x86_vm_move_enc_context_from)(kvm, cap->args[0]); 6288 break; 6289 case KVM_CAP_EXIT_HYPERCALL: 6290 if (cap->args[0] & ~KVM_EXIT_HYPERCALL_VALID_MASK) { 6291 r = -EINVAL; 6292 break; 6293 } 6294 kvm->arch.hypercall_exit_enabled = cap->args[0]; 6295 r = 0; 6296 break; 6297 case KVM_CAP_EXIT_ON_EMULATION_FAILURE: 6298 r = -EINVAL; 6299 if (cap->args[0] & ~1) 6300 break; 6301 kvm->arch.exit_on_emulation_error = cap->args[0]; 6302 r = 0; 6303 break; 6304 case KVM_CAP_PMU_CAPABILITY: 6305 r = -EINVAL; 6306 if (!enable_pmu || (cap->args[0] & ~KVM_CAP_PMU_VALID_MASK)) 6307 break; 6308 6309 mutex_lock(&kvm->lock); 6310 if (!kvm->created_vcpus) { 6311 kvm->arch.enable_pmu = !(cap->args[0] & KVM_PMU_CAP_DISABLE); 6312 r = 0; 6313 } 6314 mutex_unlock(&kvm->lock); 6315 break; 6316 case KVM_CAP_MAX_VCPU_ID: 6317 r = -EINVAL; 6318 if (cap->args[0] > KVM_MAX_VCPU_IDS) 6319 break; 6320 6321 mutex_lock(&kvm->lock); 6322 if (kvm->arch.max_vcpu_ids == cap->args[0]) { 6323 r = 0; 6324 } else if (!kvm->arch.max_vcpu_ids) { 6325 kvm->arch.max_vcpu_ids = cap->args[0]; 6326 r = 0; 6327 } 6328 mutex_unlock(&kvm->lock); 6329 break; 6330 case KVM_CAP_X86_NOTIFY_VMEXIT: 6331 r = -EINVAL; 6332 if ((u32)cap->args[0] & ~KVM_X86_NOTIFY_VMEXIT_VALID_BITS) 6333 break; 6334 if (!kvm_caps.has_notify_vmexit) 6335 break; 6336 if (!((u32)cap->args[0] & KVM_X86_NOTIFY_VMEXIT_ENABLED)) 6337 break; 6338 mutex_lock(&kvm->lock); 6339 if (!kvm->created_vcpus) { 6340 kvm->arch.notify_window = cap->args[0] >> 32; 6341 kvm->arch.notify_vmexit_flags = (u32)cap->args[0]; 6342 r = 0; 6343 } 6344 mutex_unlock(&kvm->lock); 6345 break; 6346 case KVM_CAP_VM_DISABLE_NX_HUGE_PAGES: 6347 r = -EINVAL; 6348 6349 /* 6350 * Since the risk of disabling NX hugepages is a guest crashing 6351 * the system, ensure the userspace process has permission to 6352 * reboot the system. 6353 * 6354 * Note that unlike the reboot() syscall, the process must have 6355 * this capability in the root namespace because exposing 6356 * /dev/kvm into a container does not limit the scope of the 6357 * iTLB multihit bug to that container. In other words, 6358 * this must use capable(), not ns_capable(). 6359 */ 6360 if (!capable(CAP_SYS_BOOT)) { 6361 r = -EPERM; 6362 break; 6363 } 6364 6365 if (cap->args[0]) 6366 break; 6367 6368 mutex_lock(&kvm->lock); 6369 if (!kvm->created_vcpus) { 6370 kvm->arch.disable_nx_huge_pages = true; 6371 r = 0; 6372 } 6373 mutex_unlock(&kvm->lock); 6374 break; 6375 default: 6376 r = -EINVAL; 6377 break; 6378 } 6379 return r; 6380 } 6381 6382 static struct kvm_x86_msr_filter *kvm_alloc_msr_filter(bool default_allow) 6383 { 6384 struct kvm_x86_msr_filter *msr_filter; 6385 6386 msr_filter = kzalloc(sizeof(*msr_filter), GFP_KERNEL_ACCOUNT); 6387 if (!msr_filter) 6388 return NULL; 6389 6390 msr_filter->default_allow = default_allow; 6391 return msr_filter; 6392 } 6393 6394 static void kvm_free_msr_filter(struct kvm_x86_msr_filter *msr_filter) 6395 { 6396 u32 i; 6397 6398 if (!msr_filter) 6399 return; 6400 6401 for (i = 0; i < msr_filter->count; i++) 6402 kfree(msr_filter->ranges[i].bitmap); 6403 6404 kfree(msr_filter); 6405 } 6406 6407 static int kvm_add_msr_filter(struct kvm_x86_msr_filter *msr_filter, 6408 struct kvm_msr_filter_range *user_range) 6409 { 6410 unsigned long *bitmap = NULL; 6411 size_t bitmap_size; 6412 6413 if (!user_range->nmsrs) 6414 return 0; 6415 6416 if (user_range->flags & ~KVM_MSR_FILTER_RANGE_VALID_MASK) 6417 return -EINVAL; 6418 6419 if (!user_range->flags) 6420 return -EINVAL; 6421 6422 bitmap_size = BITS_TO_LONGS(user_range->nmsrs) * sizeof(long); 6423 if (!bitmap_size || bitmap_size > KVM_MSR_FILTER_MAX_BITMAP_SIZE) 6424 return -EINVAL; 6425 6426 bitmap = memdup_user((__user u8*)user_range->bitmap, bitmap_size); 6427 if (IS_ERR(bitmap)) 6428 return PTR_ERR(bitmap); 6429 6430 msr_filter->ranges[msr_filter->count] = (struct msr_bitmap_range) { 6431 .flags = user_range->flags, 6432 .base = user_range->base, 6433 .nmsrs = user_range->nmsrs, 6434 .bitmap = bitmap, 6435 }; 6436 6437 msr_filter->count++; 6438 return 0; 6439 } 6440 6441 static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, 6442 struct kvm_msr_filter *filter) 6443 { 6444 struct kvm_x86_msr_filter *new_filter, *old_filter; 6445 bool default_allow; 6446 bool empty = true; 6447 int r = 0; 6448 u32 i; 6449 6450 if (filter->flags & ~KVM_MSR_FILTER_VALID_MASK) 6451 return -EINVAL; 6452 6453 for (i = 0; i < ARRAY_SIZE(filter->ranges); i++) 6454 empty &= !filter->ranges[i].nmsrs; 6455 6456 default_allow = !(filter->flags & KVM_MSR_FILTER_DEFAULT_DENY); 6457 if (empty && !default_allow) 6458 return -EINVAL; 6459 6460 new_filter = kvm_alloc_msr_filter(default_allow); 6461 if (!new_filter) 6462 return -ENOMEM; 6463 6464 for (i = 0; i < ARRAY_SIZE(filter->ranges); i++) { 6465 r = kvm_add_msr_filter(new_filter, &filter->ranges[i]); 6466 if (r) { 6467 kvm_free_msr_filter(new_filter); 6468 return r; 6469 } 6470 } 6471 6472 mutex_lock(&kvm->lock); 6473 6474 /* The per-VM filter is protected by kvm->lock... */ 6475 old_filter = srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1); 6476 6477 rcu_assign_pointer(kvm->arch.msr_filter, new_filter); 6478 synchronize_srcu(&kvm->srcu); 6479 6480 kvm_free_msr_filter(old_filter); 6481 6482 kvm_make_all_cpus_request(kvm, KVM_REQ_MSR_FILTER_CHANGED); 6483 mutex_unlock(&kvm->lock); 6484 6485 return 0; 6486 } 6487 6488 #ifdef CONFIG_KVM_COMPAT 6489 /* for KVM_X86_SET_MSR_FILTER */ 6490 struct kvm_msr_filter_range_compat { 6491 __u32 flags; 6492 __u32 nmsrs; 6493 __u32 base; 6494 __u32 bitmap; 6495 }; 6496 6497 struct kvm_msr_filter_compat { 6498 __u32 flags; 6499 struct kvm_msr_filter_range_compat ranges[KVM_MSR_FILTER_MAX_RANGES]; 6500 }; 6501 6502 #define KVM_X86_SET_MSR_FILTER_COMPAT _IOW(KVMIO, 0xc6, struct kvm_msr_filter_compat) 6503 6504 long kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl, 6505 unsigned long arg) 6506 { 6507 void __user *argp = (void __user *)arg; 6508 struct kvm *kvm = filp->private_data; 6509 long r = -ENOTTY; 6510 6511 switch (ioctl) { 6512 case KVM_X86_SET_MSR_FILTER_COMPAT: { 6513 struct kvm_msr_filter __user *user_msr_filter = argp; 6514 struct kvm_msr_filter_compat filter_compat; 6515 struct kvm_msr_filter filter; 6516 int i; 6517 6518 if (copy_from_user(&filter_compat, user_msr_filter, 6519 sizeof(filter_compat))) 6520 return -EFAULT; 6521 6522 filter.flags = filter_compat.flags; 6523 for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) { 6524 struct kvm_msr_filter_range_compat *cr; 6525 6526 cr = &filter_compat.ranges[i]; 6527 filter.ranges[i] = (struct kvm_msr_filter_range) { 6528 .flags = cr->flags, 6529 .nmsrs = cr->nmsrs, 6530 .base = cr->base, 6531 .bitmap = (__u8 *)(ulong)cr->bitmap, 6532 }; 6533 } 6534 6535 r = kvm_vm_ioctl_set_msr_filter(kvm, &filter); 6536 break; 6537 } 6538 } 6539 6540 return r; 6541 } 6542 #endif 6543 6544 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER 6545 static int kvm_arch_suspend_notifier(struct kvm *kvm) 6546 { 6547 struct kvm_vcpu *vcpu; 6548 unsigned long i; 6549 int ret = 0; 6550 6551 mutex_lock(&kvm->lock); 6552 kvm_for_each_vcpu(i, vcpu, kvm) { 6553 if (!vcpu->arch.pv_time.active) 6554 continue; 6555 6556 ret = kvm_set_guest_paused(vcpu); 6557 if (ret) { 6558 kvm_err("Failed to pause guest VCPU%d: %d\n", 6559 vcpu->vcpu_id, ret); 6560 break; 6561 } 6562 } 6563 mutex_unlock(&kvm->lock); 6564 6565 return ret ? NOTIFY_BAD : NOTIFY_DONE; 6566 } 6567 6568 int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state) 6569 { 6570 switch (state) { 6571 case PM_HIBERNATION_PREPARE: 6572 case PM_SUSPEND_PREPARE: 6573 return kvm_arch_suspend_notifier(kvm); 6574 } 6575 6576 return NOTIFY_DONE; 6577 } 6578 #endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */ 6579 6580 static int kvm_vm_ioctl_get_clock(struct kvm *kvm, void __user *argp) 6581 { 6582 struct kvm_clock_data data = { 0 }; 6583 6584 get_kvmclock(kvm, &data); 6585 if (copy_to_user(argp, &data, sizeof(data))) 6586 return -EFAULT; 6587 6588 return 0; 6589 } 6590 6591 static int kvm_vm_ioctl_set_clock(struct kvm *kvm, void __user *argp) 6592 { 6593 struct kvm_arch *ka = &kvm->arch; 6594 struct kvm_clock_data data; 6595 u64 now_raw_ns; 6596 6597 if (copy_from_user(&data, argp, sizeof(data))) 6598 return -EFAULT; 6599 6600 /* 6601 * Only KVM_CLOCK_REALTIME is used, but allow passing the 6602 * result of KVM_GET_CLOCK back to KVM_SET_CLOCK. 6603 */ 6604 if (data.flags & ~KVM_CLOCK_VALID_FLAGS) 6605 return -EINVAL; 6606 6607 kvm_hv_request_tsc_page_update(kvm); 6608 kvm_start_pvclock_update(kvm); 6609 pvclock_update_vm_gtod_copy(kvm); 6610 6611 /* 6612 * This pairs with kvm_guest_time_update(): when masterclock is 6613 * in use, we use master_kernel_ns + kvmclock_offset to set 6614 * unsigned 'system_time' so if we use get_kvmclock_ns() (which 6615 * is slightly ahead) here we risk going negative on unsigned 6616 * 'system_time' when 'data.clock' is very small. 6617 */ 6618 if (data.flags & KVM_CLOCK_REALTIME) { 6619 u64 now_real_ns = ktime_get_real_ns(); 6620 6621 /* 6622 * Avoid stepping the kvmclock backwards. 6623 */ 6624 if (now_real_ns > data.realtime) 6625 data.clock += now_real_ns - data.realtime; 6626 } 6627 6628 if (ka->use_master_clock) 6629 now_raw_ns = ka->master_kernel_ns; 6630 else 6631 now_raw_ns = get_kvmclock_base_ns(); 6632 ka->kvmclock_offset = data.clock - now_raw_ns; 6633 kvm_end_pvclock_update(kvm); 6634 return 0; 6635 } 6636 6637 long kvm_arch_vm_ioctl(struct file *filp, 6638 unsigned int ioctl, unsigned long arg) 6639 { 6640 struct kvm *kvm = filp->private_data; 6641 void __user *argp = (void __user *)arg; 6642 int r = -ENOTTY; 6643 /* 6644 * This union makes it completely explicit to gcc-3.x 6645 * that these two variables' stack usage should be 6646 * combined, not added together. 6647 */ 6648 union { 6649 struct kvm_pit_state ps; 6650 struct kvm_pit_state2 ps2; 6651 struct kvm_pit_config pit_config; 6652 } u; 6653 6654 switch (ioctl) { 6655 case KVM_SET_TSS_ADDR: 6656 r = kvm_vm_ioctl_set_tss_addr(kvm, arg); 6657 break; 6658 case KVM_SET_IDENTITY_MAP_ADDR: { 6659 u64 ident_addr; 6660 6661 mutex_lock(&kvm->lock); 6662 r = -EINVAL; 6663 if (kvm->created_vcpus) 6664 goto set_identity_unlock; 6665 r = -EFAULT; 6666 if (copy_from_user(&ident_addr, argp, sizeof(ident_addr))) 6667 goto set_identity_unlock; 6668 r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr); 6669 set_identity_unlock: 6670 mutex_unlock(&kvm->lock); 6671 break; 6672 } 6673 case KVM_SET_NR_MMU_PAGES: 6674 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg); 6675 break; 6676 case KVM_GET_NR_MMU_PAGES: 6677 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm); 6678 break; 6679 case KVM_CREATE_IRQCHIP: { 6680 mutex_lock(&kvm->lock); 6681 6682 r = -EEXIST; 6683 if (irqchip_in_kernel(kvm)) 6684 goto create_irqchip_unlock; 6685 6686 r = -EINVAL; 6687 if (kvm->created_vcpus) 6688 goto create_irqchip_unlock; 6689 6690 r = kvm_pic_init(kvm); 6691 if (r) 6692 goto create_irqchip_unlock; 6693 6694 r = kvm_ioapic_init(kvm); 6695 if (r) { 6696 kvm_pic_destroy(kvm); 6697 goto create_irqchip_unlock; 6698 } 6699 6700 r = kvm_setup_default_irq_routing(kvm); 6701 if (r) { 6702 kvm_ioapic_destroy(kvm); 6703 kvm_pic_destroy(kvm); 6704 goto create_irqchip_unlock; 6705 } 6706 /* Write kvm->irq_routing before enabling irqchip_in_kernel. */ 6707 smp_wmb(); 6708 kvm->arch.irqchip_mode = KVM_IRQCHIP_KERNEL; 6709 kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_ABSENT); 6710 create_irqchip_unlock: 6711 mutex_unlock(&kvm->lock); 6712 break; 6713 } 6714 case KVM_CREATE_PIT: 6715 u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY; 6716 goto create_pit; 6717 case KVM_CREATE_PIT2: 6718 r = -EFAULT; 6719 if (copy_from_user(&u.pit_config, argp, 6720 sizeof(struct kvm_pit_config))) 6721 goto out; 6722 create_pit: 6723 mutex_lock(&kvm->lock); 6724 r = -EEXIST; 6725 if (kvm->arch.vpit) 6726 goto create_pit_unlock; 6727 r = -ENOMEM; 6728 kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags); 6729 if (kvm->arch.vpit) 6730 r = 0; 6731 create_pit_unlock: 6732 mutex_unlock(&kvm->lock); 6733 break; 6734 case KVM_GET_IRQCHIP: { 6735 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ 6736 struct kvm_irqchip *chip; 6737 6738 chip = memdup_user(argp, sizeof(*chip)); 6739 if (IS_ERR(chip)) { 6740 r = PTR_ERR(chip); 6741 goto out; 6742 } 6743 6744 r = -ENXIO; 6745 if (!irqchip_kernel(kvm)) 6746 goto get_irqchip_out; 6747 r = kvm_vm_ioctl_get_irqchip(kvm, chip); 6748 if (r) 6749 goto get_irqchip_out; 6750 r = -EFAULT; 6751 if (copy_to_user(argp, chip, sizeof(*chip))) 6752 goto get_irqchip_out; 6753 r = 0; 6754 get_irqchip_out: 6755 kfree(chip); 6756 break; 6757 } 6758 case KVM_SET_IRQCHIP: { 6759 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ 6760 struct kvm_irqchip *chip; 6761 6762 chip = memdup_user(argp, sizeof(*chip)); 6763 if (IS_ERR(chip)) { 6764 r = PTR_ERR(chip); 6765 goto out; 6766 } 6767 6768 r = -ENXIO; 6769 if (!irqchip_kernel(kvm)) 6770 goto set_irqchip_out; 6771 r = kvm_vm_ioctl_set_irqchip(kvm, chip); 6772 set_irqchip_out: 6773 kfree(chip); 6774 break; 6775 } 6776 case KVM_GET_PIT: { 6777 r = -EFAULT; 6778 if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state))) 6779 goto out; 6780 r = -ENXIO; 6781 if (!kvm->arch.vpit) 6782 goto out; 6783 r = kvm_vm_ioctl_get_pit(kvm, &u.ps); 6784 if (r) 6785 goto out; 6786 r = -EFAULT; 6787 if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state))) 6788 goto out; 6789 r = 0; 6790 break; 6791 } 6792 case KVM_SET_PIT: { 6793 r = -EFAULT; 6794 if (copy_from_user(&u.ps, argp, sizeof(u.ps))) 6795 goto out; 6796 mutex_lock(&kvm->lock); 6797 r = -ENXIO; 6798 if (!kvm->arch.vpit) 6799 goto set_pit_out; 6800 r = kvm_vm_ioctl_set_pit(kvm, &u.ps); 6801 set_pit_out: 6802 mutex_unlock(&kvm->lock); 6803 break; 6804 } 6805 case KVM_GET_PIT2: { 6806 r = -ENXIO; 6807 if (!kvm->arch.vpit) 6808 goto out; 6809 r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2); 6810 if (r) 6811 goto out; 6812 r = -EFAULT; 6813 if (copy_to_user(argp, &u.ps2, sizeof(u.ps2))) 6814 goto out; 6815 r = 0; 6816 break; 6817 } 6818 case KVM_SET_PIT2: { 6819 r = -EFAULT; 6820 if (copy_from_user(&u.ps2, argp, sizeof(u.ps2))) 6821 goto out; 6822 mutex_lock(&kvm->lock); 6823 r = -ENXIO; 6824 if (!kvm->arch.vpit) 6825 goto set_pit2_out; 6826 r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2); 6827 set_pit2_out: 6828 mutex_unlock(&kvm->lock); 6829 break; 6830 } 6831 case KVM_REINJECT_CONTROL: { 6832 struct kvm_reinject_control control; 6833 r = -EFAULT; 6834 if (copy_from_user(&control, argp, sizeof(control))) 6835 goto out; 6836 r = -ENXIO; 6837 if (!kvm->arch.vpit) 6838 goto out; 6839 r = kvm_vm_ioctl_reinject(kvm, &control); 6840 break; 6841 } 6842 case KVM_SET_BOOT_CPU_ID: 6843 r = 0; 6844 mutex_lock(&kvm->lock); 6845 if (kvm->created_vcpus) 6846 r = -EBUSY; 6847 else 6848 kvm->arch.bsp_vcpu_id = arg; 6849 mutex_unlock(&kvm->lock); 6850 break; 6851 #ifdef CONFIG_KVM_XEN 6852 case KVM_XEN_HVM_CONFIG: { 6853 struct kvm_xen_hvm_config xhc; 6854 r = -EFAULT; 6855 if (copy_from_user(&xhc, argp, sizeof(xhc))) 6856 goto out; 6857 r = kvm_xen_hvm_config(kvm, &xhc); 6858 break; 6859 } 6860 case KVM_XEN_HVM_GET_ATTR: { 6861 struct kvm_xen_hvm_attr xha; 6862 6863 r = -EFAULT; 6864 if (copy_from_user(&xha, argp, sizeof(xha))) 6865 goto out; 6866 r = kvm_xen_hvm_get_attr(kvm, &xha); 6867 if (!r && copy_to_user(argp, &xha, sizeof(xha))) 6868 r = -EFAULT; 6869 break; 6870 } 6871 case KVM_XEN_HVM_SET_ATTR: { 6872 struct kvm_xen_hvm_attr xha; 6873 6874 r = -EFAULT; 6875 if (copy_from_user(&xha, argp, sizeof(xha))) 6876 goto out; 6877 r = kvm_xen_hvm_set_attr(kvm, &xha); 6878 break; 6879 } 6880 case KVM_XEN_HVM_EVTCHN_SEND: { 6881 struct kvm_irq_routing_xen_evtchn uxe; 6882 6883 r = -EFAULT; 6884 if (copy_from_user(&uxe, argp, sizeof(uxe))) 6885 goto out; 6886 r = kvm_xen_hvm_evtchn_send(kvm, &uxe); 6887 break; 6888 } 6889 #endif 6890 case KVM_SET_CLOCK: 6891 r = kvm_vm_ioctl_set_clock(kvm, argp); 6892 break; 6893 case KVM_GET_CLOCK: 6894 r = kvm_vm_ioctl_get_clock(kvm, argp); 6895 break; 6896 case KVM_SET_TSC_KHZ: { 6897 u32 user_tsc_khz; 6898 6899 r = -EINVAL; 6900 user_tsc_khz = (u32)arg; 6901 6902 if (kvm_caps.has_tsc_control && 6903 user_tsc_khz >= kvm_caps.max_guest_tsc_khz) 6904 goto out; 6905 6906 if (user_tsc_khz == 0) 6907 user_tsc_khz = tsc_khz; 6908 6909 WRITE_ONCE(kvm->arch.default_tsc_khz, user_tsc_khz); 6910 r = 0; 6911 6912 goto out; 6913 } 6914 case KVM_GET_TSC_KHZ: { 6915 r = READ_ONCE(kvm->arch.default_tsc_khz); 6916 goto out; 6917 } 6918 case KVM_MEMORY_ENCRYPT_OP: { 6919 r = -ENOTTY; 6920 if (!kvm_x86_ops.mem_enc_ioctl) 6921 goto out; 6922 6923 r = static_call(kvm_x86_mem_enc_ioctl)(kvm, argp); 6924 break; 6925 } 6926 case KVM_MEMORY_ENCRYPT_REG_REGION: { 6927 struct kvm_enc_region region; 6928 6929 r = -EFAULT; 6930 if (copy_from_user(®ion, argp, sizeof(region))) 6931 goto out; 6932 6933 r = -ENOTTY; 6934 if (!kvm_x86_ops.mem_enc_register_region) 6935 goto out; 6936 6937 r = static_call(kvm_x86_mem_enc_register_region)(kvm, ®ion); 6938 break; 6939 } 6940 case KVM_MEMORY_ENCRYPT_UNREG_REGION: { 6941 struct kvm_enc_region region; 6942 6943 r = -EFAULT; 6944 if (copy_from_user(®ion, argp, sizeof(region))) 6945 goto out; 6946 6947 r = -ENOTTY; 6948 if (!kvm_x86_ops.mem_enc_unregister_region) 6949 goto out; 6950 6951 r = static_call(kvm_x86_mem_enc_unregister_region)(kvm, ®ion); 6952 break; 6953 } 6954 case KVM_HYPERV_EVENTFD: { 6955 struct kvm_hyperv_eventfd hvevfd; 6956 6957 r = -EFAULT; 6958 if (copy_from_user(&hvevfd, argp, sizeof(hvevfd))) 6959 goto out; 6960 r = kvm_vm_ioctl_hv_eventfd(kvm, &hvevfd); 6961 break; 6962 } 6963 case KVM_SET_PMU_EVENT_FILTER: 6964 r = kvm_vm_ioctl_set_pmu_event_filter(kvm, argp); 6965 break; 6966 case KVM_X86_SET_MSR_FILTER: { 6967 struct kvm_msr_filter __user *user_msr_filter = argp; 6968 struct kvm_msr_filter filter; 6969 6970 if (copy_from_user(&filter, user_msr_filter, sizeof(filter))) 6971 return -EFAULT; 6972 6973 r = kvm_vm_ioctl_set_msr_filter(kvm, &filter); 6974 break; 6975 } 6976 default: 6977 r = -ENOTTY; 6978 } 6979 out: 6980 return r; 6981 } 6982 6983 static void kvm_init_msr_list(void) 6984 { 6985 u32 dummy[2]; 6986 unsigned i; 6987 6988 BUILD_BUG_ON_MSG(KVM_PMC_MAX_FIXED != 3, 6989 "Please update the fixed PMCs in msrs_to_saved_all[]"); 6990 6991 num_msrs_to_save = 0; 6992 num_emulated_msrs = 0; 6993 num_msr_based_features = 0; 6994 6995 for (i = 0; i < ARRAY_SIZE(msrs_to_save_all); i++) { 6996 if (rdmsr_safe(msrs_to_save_all[i], &dummy[0], &dummy[1]) < 0) 6997 continue; 6998 6999 /* 7000 * Even MSRs that are valid in the host may not be exposed 7001 * to the guests in some cases. 7002 */ 7003 switch (msrs_to_save_all[i]) { 7004 case MSR_IA32_BNDCFGS: 7005 if (!kvm_mpx_supported()) 7006 continue; 7007 break; 7008 case MSR_TSC_AUX: 7009 if (!kvm_cpu_cap_has(X86_FEATURE_RDTSCP) && 7010 !kvm_cpu_cap_has(X86_FEATURE_RDPID)) 7011 continue; 7012 break; 7013 case MSR_IA32_UMWAIT_CONTROL: 7014 if (!kvm_cpu_cap_has(X86_FEATURE_WAITPKG)) 7015 continue; 7016 break; 7017 case MSR_IA32_RTIT_CTL: 7018 case MSR_IA32_RTIT_STATUS: 7019 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT)) 7020 continue; 7021 break; 7022 case MSR_IA32_RTIT_CR3_MATCH: 7023 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) || 7024 !intel_pt_validate_hw_cap(PT_CAP_cr3_filtering)) 7025 continue; 7026 break; 7027 case MSR_IA32_RTIT_OUTPUT_BASE: 7028 case MSR_IA32_RTIT_OUTPUT_MASK: 7029 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) || 7030 (!intel_pt_validate_hw_cap(PT_CAP_topa_output) && 7031 !intel_pt_validate_hw_cap(PT_CAP_single_range_output))) 7032 continue; 7033 break; 7034 case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: 7035 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) || 7036 msrs_to_save_all[i] - MSR_IA32_RTIT_ADDR0_A >= 7037 intel_pt_validate_hw_cap(PT_CAP_num_address_ranges) * 2) 7038 continue; 7039 break; 7040 case MSR_ARCH_PERFMON_PERFCTR0 ... MSR_ARCH_PERFMON_PERFCTR_MAX: 7041 if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_PERFCTR0 >= 7042 min(KVM_INTEL_PMC_MAX_GENERIC, kvm_pmu_cap.num_counters_gp)) 7043 continue; 7044 break; 7045 case MSR_ARCH_PERFMON_EVENTSEL0 ... MSR_ARCH_PERFMON_EVENTSEL_MAX: 7046 if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_EVENTSEL0 >= 7047 min(KVM_INTEL_PMC_MAX_GENERIC, kvm_pmu_cap.num_counters_gp)) 7048 continue; 7049 break; 7050 case MSR_IA32_XFD: 7051 case MSR_IA32_XFD_ERR: 7052 if (!kvm_cpu_cap_has(X86_FEATURE_XFD)) 7053 continue; 7054 break; 7055 default: 7056 break; 7057 } 7058 7059 msrs_to_save[num_msrs_to_save++] = msrs_to_save_all[i]; 7060 } 7061 7062 for (i = 0; i < ARRAY_SIZE(emulated_msrs_all); i++) { 7063 if (!static_call(kvm_x86_has_emulated_msr)(NULL, emulated_msrs_all[i])) 7064 continue; 7065 7066 emulated_msrs[num_emulated_msrs++] = emulated_msrs_all[i]; 7067 } 7068 7069 for (i = 0; i < ARRAY_SIZE(msr_based_features_all); i++) { 7070 struct kvm_msr_entry msr; 7071 7072 msr.index = msr_based_features_all[i]; 7073 if (kvm_get_msr_feature(&msr)) 7074 continue; 7075 7076 msr_based_features[num_msr_based_features++] = msr_based_features_all[i]; 7077 } 7078 } 7079 7080 static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len, 7081 const void *v) 7082 { 7083 int handled = 0; 7084 int n; 7085 7086 do { 7087 n = min(len, 8); 7088 if (!(lapic_in_kernel(vcpu) && 7089 !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v)) 7090 && kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, n, v)) 7091 break; 7092 handled += n; 7093 addr += n; 7094 len -= n; 7095 v += n; 7096 } while (len); 7097 7098 return handled; 7099 } 7100 7101 static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v) 7102 { 7103 int handled = 0; 7104 int n; 7105 7106 do { 7107 n = min(len, 8); 7108 if (!(lapic_in_kernel(vcpu) && 7109 !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev, 7110 addr, n, v)) 7111 && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v)) 7112 break; 7113 trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, v); 7114 handled += n; 7115 addr += n; 7116 len -= n; 7117 v += n; 7118 } while (len); 7119 7120 return handled; 7121 } 7122 7123 void kvm_set_segment(struct kvm_vcpu *vcpu, 7124 struct kvm_segment *var, int seg) 7125 { 7126 static_call(kvm_x86_set_segment)(vcpu, var, seg); 7127 } 7128 7129 void kvm_get_segment(struct kvm_vcpu *vcpu, 7130 struct kvm_segment *var, int seg) 7131 { 7132 static_call(kvm_x86_get_segment)(vcpu, var, seg); 7133 } 7134 7135 gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u64 access, 7136 struct x86_exception *exception) 7137 { 7138 struct kvm_mmu *mmu = vcpu->arch.mmu; 7139 gpa_t t_gpa; 7140 7141 BUG_ON(!mmu_is_nested(vcpu)); 7142 7143 /* NPT walks are always user-walks */ 7144 access |= PFERR_USER_MASK; 7145 t_gpa = mmu->gva_to_gpa(vcpu, mmu, gpa, access, exception); 7146 7147 return t_gpa; 7148 } 7149 7150 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, 7151 struct x86_exception *exception) 7152 { 7153 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 7154 7155 u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; 7156 return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); 7157 } 7158 EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_read); 7159 7160 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, 7161 struct x86_exception *exception) 7162 { 7163 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 7164 7165 u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; 7166 access |= PFERR_WRITE_MASK; 7167 return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); 7168 } 7169 EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_write); 7170 7171 /* uses this to access any guest's mapped memory without checking CPL */ 7172 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, 7173 struct x86_exception *exception) 7174 { 7175 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 7176 7177 return mmu->gva_to_gpa(vcpu, mmu, gva, 0, exception); 7178 } 7179 7180 static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, 7181 struct kvm_vcpu *vcpu, u64 access, 7182 struct x86_exception *exception) 7183 { 7184 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 7185 void *data = val; 7186 int r = X86EMUL_CONTINUE; 7187 7188 while (bytes) { 7189 gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access, exception); 7190 unsigned offset = addr & (PAGE_SIZE-1); 7191 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset); 7192 int ret; 7193 7194 if (gpa == INVALID_GPA) 7195 return X86EMUL_PROPAGATE_FAULT; 7196 ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, data, 7197 offset, toread); 7198 if (ret < 0) { 7199 r = X86EMUL_IO_NEEDED; 7200 goto out; 7201 } 7202 7203 bytes -= toread; 7204 data += toread; 7205 addr += toread; 7206 } 7207 out: 7208 return r; 7209 } 7210 7211 /* used for instruction fetching */ 7212 static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt, 7213 gva_t addr, void *val, unsigned int bytes, 7214 struct x86_exception *exception) 7215 { 7216 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7217 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 7218 u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; 7219 unsigned offset; 7220 int ret; 7221 7222 /* Inline kvm_read_guest_virt_helper for speed. */ 7223 gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access|PFERR_FETCH_MASK, 7224 exception); 7225 if (unlikely(gpa == INVALID_GPA)) 7226 return X86EMUL_PROPAGATE_FAULT; 7227 7228 offset = addr & (PAGE_SIZE-1); 7229 if (WARN_ON(offset + bytes > PAGE_SIZE)) 7230 bytes = (unsigned)PAGE_SIZE - offset; 7231 ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, val, 7232 offset, bytes); 7233 if (unlikely(ret < 0)) 7234 return X86EMUL_IO_NEEDED; 7235 7236 return X86EMUL_CONTINUE; 7237 } 7238 7239 int kvm_read_guest_virt(struct kvm_vcpu *vcpu, 7240 gva_t addr, void *val, unsigned int bytes, 7241 struct x86_exception *exception) 7242 { 7243 u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; 7244 7245 /* 7246 * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED 7247 * is returned, but our callers are not ready for that and they blindly 7248 * call kvm_inject_page_fault. Ensure that they at least do not leak 7249 * uninitialized kernel stack memory into cr2 and error code. 7250 */ 7251 memset(exception, 0, sizeof(*exception)); 7252 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, 7253 exception); 7254 } 7255 EXPORT_SYMBOL_GPL(kvm_read_guest_virt); 7256 7257 static int emulator_read_std(struct x86_emulate_ctxt *ctxt, 7258 gva_t addr, void *val, unsigned int bytes, 7259 struct x86_exception *exception, bool system) 7260 { 7261 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7262 u64 access = 0; 7263 7264 if (system) 7265 access |= PFERR_IMPLICIT_ACCESS; 7266 else if (static_call(kvm_x86_get_cpl)(vcpu) == 3) 7267 access |= PFERR_USER_MASK; 7268 7269 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception); 7270 } 7271 7272 static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, 7273 struct kvm_vcpu *vcpu, u64 access, 7274 struct x86_exception *exception) 7275 { 7276 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 7277 void *data = val; 7278 int r = X86EMUL_CONTINUE; 7279 7280 while (bytes) { 7281 gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access, exception); 7282 unsigned offset = addr & (PAGE_SIZE-1); 7283 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset); 7284 int ret; 7285 7286 if (gpa == INVALID_GPA) 7287 return X86EMUL_PROPAGATE_FAULT; 7288 ret = kvm_vcpu_write_guest(vcpu, gpa, data, towrite); 7289 if (ret < 0) { 7290 r = X86EMUL_IO_NEEDED; 7291 goto out; 7292 } 7293 7294 bytes -= towrite; 7295 data += towrite; 7296 addr += towrite; 7297 } 7298 out: 7299 return r; 7300 } 7301 7302 static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, 7303 unsigned int bytes, struct x86_exception *exception, 7304 bool system) 7305 { 7306 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7307 u64 access = PFERR_WRITE_MASK; 7308 7309 if (system) 7310 access |= PFERR_IMPLICIT_ACCESS; 7311 else if (static_call(kvm_x86_get_cpl)(vcpu) == 3) 7312 access |= PFERR_USER_MASK; 7313 7314 return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, 7315 access, exception); 7316 } 7317 7318 int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val, 7319 unsigned int bytes, struct x86_exception *exception) 7320 { 7321 /* kvm_write_guest_virt_system can pull in tons of pages. */ 7322 vcpu->arch.l1tf_flush_l1d = true; 7323 7324 return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, 7325 PFERR_WRITE_MASK, exception); 7326 } 7327 EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system); 7328 7329 static int kvm_can_emulate_insn(struct kvm_vcpu *vcpu, int emul_type, 7330 void *insn, int insn_len) 7331 { 7332 return static_call(kvm_x86_can_emulate_instruction)(vcpu, emul_type, 7333 insn, insn_len); 7334 } 7335 7336 int handle_ud(struct kvm_vcpu *vcpu) 7337 { 7338 static const char kvm_emulate_prefix[] = { __KVM_EMULATE_PREFIX }; 7339 int fep_flags = READ_ONCE(force_emulation_prefix); 7340 int emul_type = EMULTYPE_TRAP_UD; 7341 char sig[5]; /* ud2; .ascii "kvm" */ 7342 struct x86_exception e; 7343 7344 if (unlikely(!kvm_can_emulate_insn(vcpu, emul_type, NULL, 0))) 7345 return 1; 7346 7347 if (fep_flags && 7348 kvm_read_guest_virt(vcpu, kvm_get_linear_rip(vcpu), 7349 sig, sizeof(sig), &e) == 0 && 7350 memcmp(sig, kvm_emulate_prefix, sizeof(sig)) == 0) { 7351 if (fep_flags & KVM_FEP_CLEAR_RFLAGS_RF) 7352 kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) & ~X86_EFLAGS_RF); 7353 kvm_rip_write(vcpu, kvm_rip_read(vcpu) + sizeof(sig)); 7354 emul_type = EMULTYPE_TRAP_UD_FORCED; 7355 } 7356 7357 return kvm_emulate_instruction(vcpu, emul_type); 7358 } 7359 EXPORT_SYMBOL_GPL(handle_ud); 7360 7361 static int vcpu_is_mmio_gpa(struct kvm_vcpu *vcpu, unsigned long gva, 7362 gpa_t gpa, bool write) 7363 { 7364 /* For APIC access vmexit */ 7365 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) 7366 return 1; 7367 7368 if (vcpu_match_mmio_gpa(vcpu, gpa)) { 7369 trace_vcpu_match_mmio(gva, gpa, write, true); 7370 return 1; 7371 } 7372 7373 return 0; 7374 } 7375 7376 static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, 7377 gpa_t *gpa, struct x86_exception *exception, 7378 bool write) 7379 { 7380 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 7381 u64 access = ((static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0) 7382 | (write ? PFERR_WRITE_MASK : 0); 7383 7384 /* 7385 * currently PKRU is only applied to ept enabled guest so 7386 * there is no pkey in EPT page table for L1 guest or EPT 7387 * shadow page table for L2 guest. 7388 */ 7389 if (vcpu_match_mmio_gva(vcpu, gva) && (!is_paging(vcpu) || 7390 !permission_fault(vcpu, vcpu->arch.walk_mmu, 7391 vcpu->arch.mmio_access, 0, access))) { 7392 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT | 7393 (gva & (PAGE_SIZE - 1)); 7394 trace_vcpu_match_mmio(gva, *gpa, write, false); 7395 return 1; 7396 } 7397 7398 *gpa = mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); 7399 7400 if (*gpa == INVALID_GPA) 7401 return -1; 7402 7403 return vcpu_is_mmio_gpa(vcpu, gva, *gpa, write); 7404 } 7405 7406 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, 7407 const void *val, int bytes) 7408 { 7409 int ret; 7410 7411 ret = kvm_vcpu_write_guest(vcpu, gpa, val, bytes); 7412 if (ret < 0) 7413 return 0; 7414 kvm_page_track_write(vcpu, gpa, val, bytes); 7415 return 1; 7416 } 7417 7418 struct read_write_emulator_ops { 7419 int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val, 7420 int bytes); 7421 int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa, 7422 void *val, int bytes); 7423 int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa, 7424 int bytes, void *val); 7425 int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa, 7426 void *val, int bytes); 7427 bool write; 7428 }; 7429 7430 static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes) 7431 { 7432 if (vcpu->mmio_read_completed) { 7433 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, 7434 vcpu->mmio_fragments[0].gpa, val); 7435 vcpu->mmio_read_completed = 0; 7436 return 1; 7437 } 7438 7439 return 0; 7440 } 7441 7442 static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, 7443 void *val, int bytes) 7444 { 7445 return !kvm_vcpu_read_guest(vcpu, gpa, val, bytes); 7446 } 7447 7448 static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, 7449 void *val, int bytes) 7450 { 7451 return emulator_write_phys(vcpu, gpa, val, bytes); 7452 } 7453 7454 static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val) 7455 { 7456 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, val); 7457 return vcpu_mmio_write(vcpu, gpa, bytes, val); 7458 } 7459 7460 static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, 7461 void *val, int bytes) 7462 { 7463 trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, NULL); 7464 return X86EMUL_IO_NEEDED; 7465 } 7466 7467 static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, 7468 void *val, int bytes) 7469 { 7470 struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0]; 7471 7472 memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); 7473 return X86EMUL_CONTINUE; 7474 } 7475 7476 static const struct read_write_emulator_ops read_emultor = { 7477 .read_write_prepare = read_prepare, 7478 .read_write_emulate = read_emulate, 7479 .read_write_mmio = vcpu_mmio_read, 7480 .read_write_exit_mmio = read_exit_mmio, 7481 }; 7482 7483 static const struct read_write_emulator_ops write_emultor = { 7484 .read_write_emulate = write_emulate, 7485 .read_write_mmio = write_mmio, 7486 .read_write_exit_mmio = write_exit_mmio, 7487 .write = true, 7488 }; 7489 7490 static int emulator_read_write_onepage(unsigned long addr, void *val, 7491 unsigned int bytes, 7492 struct x86_exception *exception, 7493 struct kvm_vcpu *vcpu, 7494 const struct read_write_emulator_ops *ops) 7495 { 7496 gpa_t gpa; 7497 int handled, ret; 7498 bool write = ops->write; 7499 struct kvm_mmio_fragment *frag; 7500 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 7501 7502 /* 7503 * If the exit was due to a NPF we may already have a GPA. 7504 * If the GPA is present, use it to avoid the GVA to GPA table walk. 7505 * Note, this cannot be used on string operations since string 7506 * operation using rep will only have the initial GPA from the NPF 7507 * occurred. 7508 */ 7509 if (ctxt->gpa_available && emulator_can_use_gpa(ctxt) && 7510 (addr & ~PAGE_MASK) == (ctxt->gpa_val & ~PAGE_MASK)) { 7511 gpa = ctxt->gpa_val; 7512 ret = vcpu_is_mmio_gpa(vcpu, addr, gpa, write); 7513 } else { 7514 ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write); 7515 if (ret < 0) 7516 return X86EMUL_PROPAGATE_FAULT; 7517 } 7518 7519 if (!ret && ops->read_write_emulate(vcpu, gpa, val, bytes)) 7520 return X86EMUL_CONTINUE; 7521 7522 /* 7523 * Is this MMIO handled locally? 7524 */ 7525 handled = ops->read_write_mmio(vcpu, gpa, bytes, val); 7526 if (handled == bytes) 7527 return X86EMUL_CONTINUE; 7528 7529 gpa += handled; 7530 bytes -= handled; 7531 val += handled; 7532 7533 WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS); 7534 frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++]; 7535 frag->gpa = gpa; 7536 frag->data = val; 7537 frag->len = bytes; 7538 return X86EMUL_CONTINUE; 7539 } 7540 7541 static int emulator_read_write(struct x86_emulate_ctxt *ctxt, 7542 unsigned long addr, 7543 void *val, unsigned int bytes, 7544 struct x86_exception *exception, 7545 const struct read_write_emulator_ops *ops) 7546 { 7547 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7548 gpa_t gpa; 7549 int rc; 7550 7551 if (ops->read_write_prepare && 7552 ops->read_write_prepare(vcpu, val, bytes)) 7553 return X86EMUL_CONTINUE; 7554 7555 vcpu->mmio_nr_fragments = 0; 7556 7557 /* Crossing a page boundary? */ 7558 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) { 7559 int now; 7560 7561 now = -addr & ~PAGE_MASK; 7562 rc = emulator_read_write_onepage(addr, val, now, exception, 7563 vcpu, ops); 7564 7565 if (rc != X86EMUL_CONTINUE) 7566 return rc; 7567 addr += now; 7568 if (ctxt->mode != X86EMUL_MODE_PROT64) 7569 addr = (u32)addr; 7570 val += now; 7571 bytes -= now; 7572 } 7573 7574 rc = emulator_read_write_onepage(addr, val, bytes, exception, 7575 vcpu, ops); 7576 if (rc != X86EMUL_CONTINUE) 7577 return rc; 7578 7579 if (!vcpu->mmio_nr_fragments) 7580 return rc; 7581 7582 gpa = vcpu->mmio_fragments[0].gpa; 7583 7584 vcpu->mmio_needed = 1; 7585 vcpu->mmio_cur_fragment = 0; 7586 7587 vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len); 7588 vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write; 7589 vcpu->run->exit_reason = KVM_EXIT_MMIO; 7590 vcpu->run->mmio.phys_addr = gpa; 7591 7592 return ops->read_write_exit_mmio(vcpu, gpa, val, bytes); 7593 } 7594 7595 static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt, 7596 unsigned long addr, 7597 void *val, 7598 unsigned int bytes, 7599 struct x86_exception *exception) 7600 { 7601 return emulator_read_write(ctxt, addr, val, bytes, 7602 exception, &read_emultor); 7603 } 7604 7605 static int emulator_write_emulated(struct x86_emulate_ctxt *ctxt, 7606 unsigned long addr, 7607 const void *val, 7608 unsigned int bytes, 7609 struct x86_exception *exception) 7610 { 7611 return emulator_read_write(ctxt, addr, (void *)val, bytes, 7612 exception, &write_emultor); 7613 } 7614 7615 #define emulator_try_cmpxchg_user(t, ptr, old, new) \ 7616 (__try_cmpxchg_user((t __user *)(ptr), (t *)(old), *(t *)(new), efault ## t)) 7617 7618 static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt, 7619 unsigned long addr, 7620 const void *old, 7621 const void *new, 7622 unsigned int bytes, 7623 struct x86_exception *exception) 7624 { 7625 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7626 u64 page_line_mask; 7627 unsigned long hva; 7628 gpa_t gpa; 7629 int r; 7630 7631 /* guests cmpxchg8b have to be emulated atomically */ 7632 if (bytes > 8 || (bytes & (bytes - 1))) 7633 goto emul_write; 7634 7635 gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL); 7636 7637 if (gpa == INVALID_GPA || 7638 (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) 7639 goto emul_write; 7640 7641 /* 7642 * Emulate the atomic as a straight write to avoid #AC if SLD is 7643 * enabled in the host and the access splits a cache line. 7644 */ 7645 if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) 7646 page_line_mask = ~(cache_line_size() - 1); 7647 else 7648 page_line_mask = PAGE_MASK; 7649 7650 if (((gpa + bytes - 1) & page_line_mask) != (gpa & page_line_mask)) 7651 goto emul_write; 7652 7653 hva = kvm_vcpu_gfn_to_hva(vcpu, gpa_to_gfn(gpa)); 7654 if (kvm_is_error_hva(hva)) 7655 goto emul_write; 7656 7657 hva += offset_in_page(gpa); 7658 7659 switch (bytes) { 7660 case 1: 7661 r = emulator_try_cmpxchg_user(u8, hva, old, new); 7662 break; 7663 case 2: 7664 r = emulator_try_cmpxchg_user(u16, hva, old, new); 7665 break; 7666 case 4: 7667 r = emulator_try_cmpxchg_user(u32, hva, old, new); 7668 break; 7669 case 8: 7670 r = emulator_try_cmpxchg_user(u64, hva, old, new); 7671 break; 7672 default: 7673 BUG(); 7674 } 7675 7676 if (r < 0) 7677 return X86EMUL_UNHANDLEABLE; 7678 if (r) 7679 return X86EMUL_CMPXCHG_FAILED; 7680 7681 kvm_page_track_write(vcpu, gpa, new, bytes); 7682 7683 return X86EMUL_CONTINUE; 7684 7685 emul_write: 7686 printk_once(KERN_WARNING "kvm: emulating exchange as write\n"); 7687 7688 return emulator_write_emulated(ctxt, addr, new, bytes, exception); 7689 } 7690 7691 static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size, 7692 unsigned short port, void *data, 7693 unsigned int count, bool in) 7694 { 7695 unsigned i; 7696 int r; 7697 7698 WARN_ON_ONCE(vcpu->arch.pio.count); 7699 for (i = 0; i < count; i++) { 7700 if (in) 7701 r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, port, size, data); 7702 else 7703 r = kvm_io_bus_write(vcpu, KVM_PIO_BUS, port, size, data); 7704 7705 if (r) { 7706 if (i == 0) 7707 goto userspace_io; 7708 7709 /* 7710 * Userspace must have unregistered the device while PIO 7711 * was running. Drop writes / read as 0. 7712 */ 7713 if (in) 7714 memset(data, 0, size * (count - i)); 7715 break; 7716 } 7717 7718 data += size; 7719 } 7720 return 1; 7721 7722 userspace_io: 7723 vcpu->arch.pio.port = port; 7724 vcpu->arch.pio.in = in; 7725 vcpu->arch.pio.count = count; 7726 vcpu->arch.pio.size = size; 7727 7728 if (in) 7729 memset(vcpu->arch.pio_data, 0, size * count); 7730 else 7731 memcpy(vcpu->arch.pio_data, data, size * count); 7732 7733 vcpu->run->exit_reason = KVM_EXIT_IO; 7734 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; 7735 vcpu->run->io.size = size; 7736 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE; 7737 vcpu->run->io.count = count; 7738 vcpu->run->io.port = port; 7739 return 0; 7740 } 7741 7742 static int emulator_pio_in(struct kvm_vcpu *vcpu, int size, 7743 unsigned short port, void *val, unsigned int count) 7744 { 7745 int r = emulator_pio_in_out(vcpu, size, port, val, count, true); 7746 if (r) 7747 trace_kvm_pio(KVM_PIO_IN, port, size, count, val); 7748 7749 return r; 7750 } 7751 7752 static void complete_emulator_pio_in(struct kvm_vcpu *vcpu, void *val) 7753 { 7754 int size = vcpu->arch.pio.size; 7755 unsigned int count = vcpu->arch.pio.count; 7756 memcpy(val, vcpu->arch.pio_data, size * count); 7757 trace_kvm_pio(KVM_PIO_IN, vcpu->arch.pio.port, size, count, vcpu->arch.pio_data); 7758 vcpu->arch.pio.count = 0; 7759 } 7760 7761 static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt, 7762 int size, unsigned short port, void *val, 7763 unsigned int count) 7764 { 7765 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7766 if (vcpu->arch.pio.count) { 7767 /* 7768 * Complete a previous iteration that required userspace I/O. 7769 * Note, @count isn't guaranteed to match pio.count as userspace 7770 * can modify ECX before rerunning the vCPU. Ignore any such 7771 * shenanigans as KVM doesn't support modifying the rep count, 7772 * and the emulator ensures @count doesn't overflow the buffer. 7773 */ 7774 complete_emulator_pio_in(vcpu, val); 7775 return 1; 7776 } 7777 7778 return emulator_pio_in(vcpu, size, port, val, count); 7779 } 7780 7781 static int emulator_pio_out(struct kvm_vcpu *vcpu, int size, 7782 unsigned short port, const void *val, 7783 unsigned int count) 7784 { 7785 trace_kvm_pio(KVM_PIO_OUT, port, size, count, val); 7786 return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false); 7787 } 7788 7789 static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt, 7790 int size, unsigned short port, 7791 const void *val, unsigned int count) 7792 { 7793 return emulator_pio_out(emul_to_vcpu(ctxt), size, port, val, count); 7794 } 7795 7796 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg) 7797 { 7798 return static_call(kvm_x86_get_segment_base)(vcpu, seg); 7799 } 7800 7801 static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address) 7802 { 7803 kvm_mmu_invlpg(emul_to_vcpu(ctxt), address); 7804 } 7805 7806 static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu) 7807 { 7808 if (!need_emulate_wbinvd(vcpu)) 7809 return X86EMUL_CONTINUE; 7810 7811 if (static_call(kvm_x86_has_wbinvd_exit)()) { 7812 int cpu = get_cpu(); 7813 7814 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); 7815 on_each_cpu_mask(vcpu->arch.wbinvd_dirty_mask, 7816 wbinvd_ipi, NULL, 1); 7817 put_cpu(); 7818 cpumask_clear(vcpu->arch.wbinvd_dirty_mask); 7819 } else 7820 wbinvd(); 7821 return X86EMUL_CONTINUE; 7822 } 7823 7824 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu) 7825 { 7826 kvm_emulate_wbinvd_noskip(vcpu); 7827 return kvm_skip_emulated_instruction(vcpu); 7828 } 7829 EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd); 7830 7831 7832 7833 static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt) 7834 { 7835 kvm_emulate_wbinvd_noskip(emul_to_vcpu(ctxt)); 7836 } 7837 7838 static void emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, 7839 unsigned long *dest) 7840 { 7841 kvm_get_dr(emul_to_vcpu(ctxt), dr, dest); 7842 } 7843 7844 static int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, 7845 unsigned long value) 7846 { 7847 7848 return kvm_set_dr(emul_to_vcpu(ctxt), dr, value); 7849 } 7850 7851 static u64 mk_cr_64(u64 curr_cr, u32 new_val) 7852 { 7853 return (curr_cr & ~((1ULL << 32) - 1)) | new_val; 7854 } 7855 7856 static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr) 7857 { 7858 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7859 unsigned long value; 7860 7861 switch (cr) { 7862 case 0: 7863 value = kvm_read_cr0(vcpu); 7864 break; 7865 case 2: 7866 value = vcpu->arch.cr2; 7867 break; 7868 case 3: 7869 value = kvm_read_cr3(vcpu); 7870 break; 7871 case 4: 7872 value = kvm_read_cr4(vcpu); 7873 break; 7874 case 8: 7875 value = kvm_get_cr8(vcpu); 7876 break; 7877 default: 7878 kvm_err("%s: unexpected cr %u\n", __func__, cr); 7879 return 0; 7880 } 7881 7882 return value; 7883 } 7884 7885 static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val) 7886 { 7887 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7888 int res = 0; 7889 7890 switch (cr) { 7891 case 0: 7892 res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val)); 7893 break; 7894 case 2: 7895 vcpu->arch.cr2 = val; 7896 break; 7897 case 3: 7898 res = kvm_set_cr3(vcpu, val); 7899 break; 7900 case 4: 7901 res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val)); 7902 break; 7903 case 8: 7904 res = kvm_set_cr8(vcpu, val); 7905 break; 7906 default: 7907 kvm_err("%s: unexpected cr %u\n", __func__, cr); 7908 res = -1; 7909 } 7910 7911 return res; 7912 } 7913 7914 static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt) 7915 { 7916 return static_call(kvm_x86_get_cpl)(emul_to_vcpu(ctxt)); 7917 } 7918 7919 static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) 7920 { 7921 static_call(kvm_x86_get_gdt)(emul_to_vcpu(ctxt), dt); 7922 } 7923 7924 static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) 7925 { 7926 static_call(kvm_x86_get_idt)(emul_to_vcpu(ctxt), dt); 7927 } 7928 7929 static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) 7930 { 7931 static_call(kvm_x86_set_gdt)(emul_to_vcpu(ctxt), dt); 7932 } 7933 7934 static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) 7935 { 7936 static_call(kvm_x86_set_idt)(emul_to_vcpu(ctxt), dt); 7937 } 7938 7939 static unsigned long emulator_get_cached_segment_base( 7940 struct x86_emulate_ctxt *ctxt, int seg) 7941 { 7942 return get_segment_base(emul_to_vcpu(ctxt), seg); 7943 } 7944 7945 static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector, 7946 struct desc_struct *desc, u32 *base3, 7947 int seg) 7948 { 7949 struct kvm_segment var; 7950 7951 kvm_get_segment(emul_to_vcpu(ctxt), &var, seg); 7952 *selector = var.selector; 7953 7954 if (var.unusable) { 7955 memset(desc, 0, sizeof(*desc)); 7956 if (base3) 7957 *base3 = 0; 7958 return false; 7959 } 7960 7961 if (var.g) 7962 var.limit >>= 12; 7963 set_desc_limit(desc, var.limit); 7964 set_desc_base(desc, (unsigned long)var.base); 7965 #ifdef CONFIG_X86_64 7966 if (base3) 7967 *base3 = var.base >> 32; 7968 #endif 7969 desc->type = var.type; 7970 desc->s = var.s; 7971 desc->dpl = var.dpl; 7972 desc->p = var.present; 7973 desc->avl = var.avl; 7974 desc->l = var.l; 7975 desc->d = var.db; 7976 desc->g = var.g; 7977 7978 return true; 7979 } 7980 7981 static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector, 7982 struct desc_struct *desc, u32 base3, 7983 int seg) 7984 { 7985 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7986 struct kvm_segment var; 7987 7988 var.selector = selector; 7989 var.base = get_desc_base(desc); 7990 #ifdef CONFIG_X86_64 7991 var.base |= ((u64)base3) << 32; 7992 #endif 7993 var.limit = get_desc_limit(desc); 7994 if (desc->g) 7995 var.limit = (var.limit << 12) | 0xfff; 7996 var.type = desc->type; 7997 var.dpl = desc->dpl; 7998 var.db = desc->d; 7999 var.s = desc->s; 8000 var.l = desc->l; 8001 var.g = desc->g; 8002 var.avl = desc->avl; 8003 var.present = desc->p; 8004 var.unusable = !var.present; 8005 var.padding = 0; 8006 8007 kvm_set_segment(vcpu, &var, seg); 8008 return; 8009 } 8010 8011 static int emulator_get_msr_with_filter(struct x86_emulate_ctxt *ctxt, 8012 u32 msr_index, u64 *pdata) 8013 { 8014 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 8015 int r; 8016 8017 r = kvm_get_msr_with_filter(vcpu, msr_index, pdata); 8018 if (r < 0) 8019 return X86EMUL_UNHANDLEABLE; 8020 8021 if (r) { 8022 if (kvm_msr_user_space(vcpu, msr_index, KVM_EXIT_X86_RDMSR, 0, 8023 complete_emulated_rdmsr, r)) 8024 return X86EMUL_IO_NEEDED; 8025 8026 trace_kvm_msr_read_ex(msr_index); 8027 return X86EMUL_PROPAGATE_FAULT; 8028 } 8029 8030 trace_kvm_msr_read(msr_index, *pdata); 8031 return X86EMUL_CONTINUE; 8032 } 8033 8034 static int emulator_set_msr_with_filter(struct x86_emulate_ctxt *ctxt, 8035 u32 msr_index, u64 data) 8036 { 8037 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 8038 int r; 8039 8040 r = kvm_set_msr_with_filter(vcpu, msr_index, data); 8041 if (r < 0) 8042 return X86EMUL_UNHANDLEABLE; 8043 8044 if (r) { 8045 if (kvm_msr_user_space(vcpu, msr_index, KVM_EXIT_X86_WRMSR, data, 8046 complete_emulated_msr_access, r)) 8047 return X86EMUL_IO_NEEDED; 8048 8049 trace_kvm_msr_write_ex(msr_index, data); 8050 return X86EMUL_PROPAGATE_FAULT; 8051 } 8052 8053 trace_kvm_msr_write(msr_index, data); 8054 return X86EMUL_CONTINUE; 8055 } 8056 8057 static int emulator_get_msr(struct x86_emulate_ctxt *ctxt, 8058 u32 msr_index, u64 *pdata) 8059 { 8060 return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata); 8061 } 8062 8063 static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt, 8064 u32 pmc) 8065 { 8066 if (kvm_pmu_is_valid_rdpmc_ecx(emul_to_vcpu(ctxt), pmc)) 8067 return 0; 8068 return -EINVAL; 8069 } 8070 8071 static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt, 8072 u32 pmc, u64 *pdata) 8073 { 8074 return kvm_pmu_rdpmc(emul_to_vcpu(ctxt), pmc, pdata); 8075 } 8076 8077 static void emulator_halt(struct x86_emulate_ctxt *ctxt) 8078 { 8079 emul_to_vcpu(ctxt)->arch.halt_request = 1; 8080 } 8081 8082 static int emulator_intercept(struct x86_emulate_ctxt *ctxt, 8083 struct x86_instruction_info *info, 8084 enum x86_intercept_stage stage) 8085 { 8086 return static_call(kvm_x86_check_intercept)(emul_to_vcpu(ctxt), info, stage, 8087 &ctxt->exception); 8088 } 8089 8090 static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt, 8091 u32 *eax, u32 *ebx, u32 *ecx, u32 *edx, 8092 bool exact_only) 8093 { 8094 return kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx, exact_only); 8095 } 8096 8097 static bool emulator_guest_has_long_mode(struct x86_emulate_ctxt *ctxt) 8098 { 8099 return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_LM); 8100 } 8101 8102 static bool emulator_guest_has_movbe(struct x86_emulate_ctxt *ctxt) 8103 { 8104 return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_MOVBE); 8105 } 8106 8107 static bool emulator_guest_has_fxsr(struct x86_emulate_ctxt *ctxt) 8108 { 8109 return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_FXSR); 8110 } 8111 8112 static bool emulator_guest_has_rdpid(struct x86_emulate_ctxt *ctxt) 8113 { 8114 return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_RDPID); 8115 } 8116 8117 static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg) 8118 { 8119 return kvm_register_read_raw(emul_to_vcpu(ctxt), reg); 8120 } 8121 8122 static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val) 8123 { 8124 kvm_register_write_raw(emul_to_vcpu(ctxt), reg, val); 8125 } 8126 8127 static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked) 8128 { 8129 static_call(kvm_x86_set_nmi_mask)(emul_to_vcpu(ctxt), masked); 8130 } 8131 8132 static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt) 8133 { 8134 return emul_to_vcpu(ctxt)->arch.hflags; 8135 } 8136 8137 #ifndef CONFIG_KVM_SMM 8138 static int emulator_leave_smm(struct x86_emulate_ctxt *ctxt) 8139 { 8140 WARN_ON_ONCE(1); 8141 return X86EMUL_UNHANDLEABLE; 8142 } 8143 #endif 8144 8145 static void emulator_triple_fault(struct x86_emulate_ctxt *ctxt) 8146 { 8147 kvm_make_request(KVM_REQ_TRIPLE_FAULT, emul_to_vcpu(ctxt)); 8148 } 8149 8150 static int emulator_set_xcr(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr) 8151 { 8152 return __kvm_set_xcr(emul_to_vcpu(ctxt), index, xcr); 8153 } 8154 8155 static void emulator_vm_bugged(struct x86_emulate_ctxt *ctxt) 8156 { 8157 struct kvm *kvm = emul_to_vcpu(ctxt)->kvm; 8158 8159 if (!kvm->vm_bugged) 8160 kvm_vm_bugged(kvm); 8161 } 8162 8163 static const struct x86_emulate_ops emulate_ops = { 8164 .vm_bugged = emulator_vm_bugged, 8165 .read_gpr = emulator_read_gpr, 8166 .write_gpr = emulator_write_gpr, 8167 .read_std = emulator_read_std, 8168 .write_std = emulator_write_std, 8169 .fetch = kvm_fetch_guest_virt, 8170 .read_emulated = emulator_read_emulated, 8171 .write_emulated = emulator_write_emulated, 8172 .cmpxchg_emulated = emulator_cmpxchg_emulated, 8173 .invlpg = emulator_invlpg, 8174 .pio_in_emulated = emulator_pio_in_emulated, 8175 .pio_out_emulated = emulator_pio_out_emulated, 8176 .get_segment = emulator_get_segment, 8177 .set_segment = emulator_set_segment, 8178 .get_cached_segment_base = emulator_get_cached_segment_base, 8179 .get_gdt = emulator_get_gdt, 8180 .get_idt = emulator_get_idt, 8181 .set_gdt = emulator_set_gdt, 8182 .set_idt = emulator_set_idt, 8183 .get_cr = emulator_get_cr, 8184 .set_cr = emulator_set_cr, 8185 .cpl = emulator_get_cpl, 8186 .get_dr = emulator_get_dr, 8187 .set_dr = emulator_set_dr, 8188 .set_msr_with_filter = emulator_set_msr_with_filter, 8189 .get_msr_with_filter = emulator_get_msr_with_filter, 8190 .get_msr = emulator_get_msr, 8191 .check_pmc = emulator_check_pmc, 8192 .read_pmc = emulator_read_pmc, 8193 .halt = emulator_halt, 8194 .wbinvd = emulator_wbinvd, 8195 .fix_hypercall = emulator_fix_hypercall, 8196 .intercept = emulator_intercept, 8197 .get_cpuid = emulator_get_cpuid, 8198 .guest_has_long_mode = emulator_guest_has_long_mode, 8199 .guest_has_movbe = emulator_guest_has_movbe, 8200 .guest_has_fxsr = emulator_guest_has_fxsr, 8201 .guest_has_rdpid = emulator_guest_has_rdpid, 8202 .set_nmi_mask = emulator_set_nmi_mask, 8203 .get_hflags = emulator_get_hflags, 8204 .leave_smm = emulator_leave_smm, 8205 .triple_fault = emulator_triple_fault, 8206 .set_xcr = emulator_set_xcr, 8207 }; 8208 8209 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) 8210 { 8211 u32 int_shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu); 8212 /* 8213 * an sti; sti; sequence only disable interrupts for the first 8214 * instruction. So, if the last instruction, be it emulated or 8215 * not, left the system with the INT_STI flag enabled, it 8216 * means that the last instruction is an sti. We should not 8217 * leave the flag on in this case. The same goes for mov ss 8218 */ 8219 if (int_shadow & mask) 8220 mask = 0; 8221 if (unlikely(int_shadow || mask)) { 8222 static_call(kvm_x86_set_interrupt_shadow)(vcpu, mask); 8223 if (!mask) 8224 kvm_make_request(KVM_REQ_EVENT, vcpu); 8225 } 8226 } 8227 8228 static void inject_emulated_exception(struct kvm_vcpu *vcpu) 8229 { 8230 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 8231 8232 if (ctxt->exception.vector == PF_VECTOR) 8233 kvm_inject_emulated_page_fault(vcpu, &ctxt->exception); 8234 else if (ctxt->exception.error_code_valid) 8235 kvm_queue_exception_e(vcpu, ctxt->exception.vector, 8236 ctxt->exception.error_code); 8237 else 8238 kvm_queue_exception(vcpu, ctxt->exception.vector); 8239 } 8240 8241 static struct x86_emulate_ctxt *alloc_emulate_ctxt(struct kvm_vcpu *vcpu) 8242 { 8243 struct x86_emulate_ctxt *ctxt; 8244 8245 ctxt = kmem_cache_zalloc(x86_emulator_cache, GFP_KERNEL_ACCOUNT); 8246 if (!ctxt) { 8247 pr_err("kvm: failed to allocate vcpu's emulator\n"); 8248 return NULL; 8249 } 8250 8251 ctxt->vcpu = vcpu; 8252 ctxt->ops = &emulate_ops; 8253 vcpu->arch.emulate_ctxt = ctxt; 8254 8255 return ctxt; 8256 } 8257 8258 static void init_emulate_ctxt(struct kvm_vcpu *vcpu) 8259 { 8260 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 8261 int cs_db, cs_l; 8262 8263 static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l); 8264 8265 ctxt->gpa_available = false; 8266 ctxt->eflags = kvm_get_rflags(vcpu); 8267 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0; 8268 8269 ctxt->eip = kvm_rip_read(vcpu); 8270 ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : 8271 (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 : 8272 (cs_l && is_long_mode(vcpu)) ? X86EMUL_MODE_PROT64 : 8273 cs_db ? X86EMUL_MODE_PROT32 : 8274 X86EMUL_MODE_PROT16; 8275 BUILD_BUG_ON(HF_GUEST_MASK != X86EMUL_GUEST_MASK); 8276 8277 ctxt->interruptibility = 0; 8278 ctxt->have_exception = false; 8279 ctxt->exception.vector = -1; 8280 ctxt->perm_ok = false; 8281 8282 init_decode_cache(ctxt); 8283 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; 8284 } 8285 8286 void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip) 8287 { 8288 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 8289 int ret; 8290 8291 init_emulate_ctxt(vcpu); 8292 8293 ctxt->op_bytes = 2; 8294 ctxt->ad_bytes = 2; 8295 ctxt->_eip = ctxt->eip + inc_eip; 8296 ret = emulate_int_real(ctxt, irq); 8297 8298 if (ret != X86EMUL_CONTINUE) { 8299 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 8300 } else { 8301 ctxt->eip = ctxt->_eip; 8302 kvm_rip_write(vcpu, ctxt->eip); 8303 kvm_set_rflags(vcpu, ctxt->eflags); 8304 } 8305 } 8306 EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt); 8307 8308 static void prepare_emulation_failure_exit(struct kvm_vcpu *vcpu, u64 *data, 8309 u8 ndata, u8 *insn_bytes, u8 insn_size) 8310 { 8311 struct kvm_run *run = vcpu->run; 8312 u64 info[5]; 8313 u8 info_start; 8314 8315 /* 8316 * Zero the whole array used to retrieve the exit info, as casting to 8317 * u32 for select entries will leave some chunks uninitialized. 8318 */ 8319 memset(&info, 0, sizeof(info)); 8320 8321 static_call(kvm_x86_get_exit_info)(vcpu, (u32 *)&info[0], &info[1], 8322 &info[2], (u32 *)&info[3], 8323 (u32 *)&info[4]); 8324 8325 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 8326 run->emulation_failure.suberror = KVM_INTERNAL_ERROR_EMULATION; 8327 8328 /* 8329 * There's currently space for 13 entries, but 5 are used for the exit 8330 * reason and info. Restrict to 4 to reduce the maintenance burden 8331 * when expanding kvm_run.emulation_failure in the future. 8332 */ 8333 if (WARN_ON_ONCE(ndata > 4)) 8334 ndata = 4; 8335 8336 /* Always include the flags as a 'data' entry. */ 8337 info_start = 1; 8338 run->emulation_failure.flags = 0; 8339 8340 if (insn_size) { 8341 BUILD_BUG_ON((sizeof(run->emulation_failure.insn_size) + 8342 sizeof(run->emulation_failure.insn_bytes) != 16)); 8343 info_start += 2; 8344 run->emulation_failure.flags |= 8345 KVM_INTERNAL_ERROR_EMULATION_FLAG_INSTRUCTION_BYTES; 8346 run->emulation_failure.insn_size = insn_size; 8347 memset(run->emulation_failure.insn_bytes, 0x90, 8348 sizeof(run->emulation_failure.insn_bytes)); 8349 memcpy(run->emulation_failure.insn_bytes, insn_bytes, insn_size); 8350 } 8351 8352 memcpy(&run->internal.data[info_start], info, sizeof(info)); 8353 memcpy(&run->internal.data[info_start + ARRAY_SIZE(info)], data, 8354 ndata * sizeof(data[0])); 8355 8356 run->emulation_failure.ndata = info_start + ARRAY_SIZE(info) + ndata; 8357 } 8358 8359 static void prepare_emulation_ctxt_failure_exit(struct kvm_vcpu *vcpu) 8360 { 8361 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 8362 8363 prepare_emulation_failure_exit(vcpu, NULL, 0, ctxt->fetch.data, 8364 ctxt->fetch.end - ctxt->fetch.data); 8365 } 8366 8367 void __kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu, u64 *data, 8368 u8 ndata) 8369 { 8370 prepare_emulation_failure_exit(vcpu, data, ndata, NULL, 0); 8371 } 8372 EXPORT_SYMBOL_GPL(__kvm_prepare_emulation_failure_exit); 8373 8374 void kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu) 8375 { 8376 __kvm_prepare_emulation_failure_exit(vcpu, NULL, 0); 8377 } 8378 EXPORT_SYMBOL_GPL(kvm_prepare_emulation_failure_exit); 8379 8380 static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type) 8381 { 8382 struct kvm *kvm = vcpu->kvm; 8383 8384 ++vcpu->stat.insn_emulation_fail; 8385 trace_kvm_emulate_insn_failed(vcpu); 8386 8387 if (emulation_type & EMULTYPE_VMWARE_GP) { 8388 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); 8389 return 1; 8390 } 8391 8392 if (kvm->arch.exit_on_emulation_error || 8393 (emulation_type & EMULTYPE_SKIP)) { 8394 prepare_emulation_ctxt_failure_exit(vcpu); 8395 return 0; 8396 } 8397 8398 kvm_queue_exception(vcpu, UD_VECTOR); 8399 8400 if (!is_guest_mode(vcpu) && static_call(kvm_x86_get_cpl)(vcpu) == 0) { 8401 prepare_emulation_ctxt_failure_exit(vcpu); 8402 return 0; 8403 } 8404 8405 return 1; 8406 } 8407 8408 static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, 8409 bool write_fault_to_shadow_pgtable, 8410 int emulation_type) 8411 { 8412 gpa_t gpa = cr2_or_gpa; 8413 kvm_pfn_t pfn; 8414 8415 if (!(emulation_type & EMULTYPE_ALLOW_RETRY_PF)) 8416 return false; 8417 8418 if (WARN_ON_ONCE(is_guest_mode(vcpu)) || 8419 WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF))) 8420 return false; 8421 8422 if (!vcpu->arch.mmu->root_role.direct) { 8423 /* 8424 * Write permission should be allowed since only 8425 * write access need to be emulated. 8426 */ 8427 gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL); 8428 8429 /* 8430 * If the mapping is invalid in guest, let cpu retry 8431 * it to generate fault. 8432 */ 8433 if (gpa == INVALID_GPA) 8434 return true; 8435 } 8436 8437 /* 8438 * Do not retry the unhandleable instruction if it faults on the 8439 * readonly host memory, otherwise it will goto a infinite loop: 8440 * retry instruction -> write #PF -> emulation fail -> retry 8441 * instruction -> ... 8442 */ 8443 pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa)); 8444 8445 /* 8446 * If the instruction failed on the error pfn, it can not be fixed, 8447 * report the error to userspace. 8448 */ 8449 if (is_error_noslot_pfn(pfn)) 8450 return false; 8451 8452 kvm_release_pfn_clean(pfn); 8453 8454 /* The instructions are well-emulated on direct mmu. */ 8455 if (vcpu->arch.mmu->root_role.direct) { 8456 unsigned int indirect_shadow_pages; 8457 8458 write_lock(&vcpu->kvm->mmu_lock); 8459 indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages; 8460 write_unlock(&vcpu->kvm->mmu_lock); 8461 8462 if (indirect_shadow_pages) 8463 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); 8464 8465 return true; 8466 } 8467 8468 /* 8469 * if emulation was due to access to shadowed page table 8470 * and it failed try to unshadow page and re-enter the 8471 * guest to let CPU execute the instruction. 8472 */ 8473 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); 8474 8475 /* 8476 * If the access faults on its page table, it can not 8477 * be fixed by unprotecting shadow page and it should 8478 * be reported to userspace. 8479 */ 8480 return !write_fault_to_shadow_pgtable; 8481 } 8482 8483 static bool retry_instruction(struct x86_emulate_ctxt *ctxt, 8484 gpa_t cr2_or_gpa, int emulation_type) 8485 { 8486 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 8487 unsigned long last_retry_eip, last_retry_addr, gpa = cr2_or_gpa; 8488 8489 last_retry_eip = vcpu->arch.last_retry_eip; 8490 last_retry_addr = vcpu->arch.last_retry_addr; 8491 8492 /* 8493 * If the emulation is caused by #PF and it is non-page_table 8494 * writing instruction, it means the VM-EXIT is caused by shadow 8495 * page protected, we can zap the shadow page and retry this 8496 * instruction directly. 8497 * 8498 * Note: if the guest uses a non-page-table modifying instruction 8499 * on the PDE that points to the instruction, then we will unmap 8500 * the instruction and go to an infinite loop. So, we cache the 8501 * last retried eip and the last fault address, if we meet the eip 8502 * and the address again, we can break out of the potential infinite 8503 * loop. 8504 */ 8505 vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0; 8506 8507 if (!(emulation_type & EMULTYPE_ALLOW_RETRY_PF)) 8508 return false; 8509 8510 if (WARN_ON_ONCE(is_guest_mode(vcpu)) || 8511 WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF))) 8512 return false; 8513 8514 if (x86_page_table_writing_insn(ctxt)) 8515 return false; 8516 8517 if (ctxt->eip == last_retry_eip && last_retry_addr == cr2_or_gpa) 8518 return false; 8519 8520 vcpu->arch.last_retry_eip = ctxt->eip; 8521 vcpu->arch.last_retry_addr = cr2_or_gpa; 8522 8523 if (!vcpu->arch.mmu->root_role.direct) 8524 gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL); 8525 8526 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); 8527 8528 return true; 8529 } 8530 8531 static int complete_emulated_mmio(struct kvm_vcpu *vcpu); 8532 static int complete_emulated_pio(struct kvm_vcpu *vcpu); 8533 8534 static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7, 8535 unsigned long *db) 8536 { 8537 u32 dr6 = 0; 8538 int i; 8539 u32 enable, rwlen; 8540 8541 enable = dr7; 8542 rwlen = dr7 >> 16; 8543 for (i = 0; i < 4; i++, enable >>= 2, rwlen >>= 4) 8544 if ((enable & 3) && (rwlen & 15) == type && db[i] == addr) 8545 dr6 |= (1 << i); 8546 return dr6; 8547 } 8548 8549 static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu) 8550 { 8551 struct kvm_run *kvm_run = vcpu->run; 8552 8553 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { 8554 kvm_run->debug.arch.dr6 = DR6_BS | DR6_ACTIVE_LOW; 8555 kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu); 8556 kvm_run->debug.arch.exception = DB_VECTOR; 8557 kvm_run->exit_reason = KVM_EXIT_DEBUG; 8558 return 0; 8559 } 8560 kvm_queue_exception_p(vcpu, DB_VECTOR, DR6_BS); 8561 return 1; 8562 } 8563 8564 int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu) 8565 { 8566 unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu); 8567 int r; 8568 8569 r = static_call(kvm_x86_skip_emulated_instruction)(vcpu); 8570 if (unlikely(!r)) 8571 return 0; 8572 8573 kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_INSTRUCTIONS); 8574 8575 /* 8576 * rflags is the old, "raw" value of the flags. The new value has 8577 * not been saved yet. 8578 * 8579 * This is correct even for TF set by the guest, because "the 8580 * processor will not generate this exception after the instruction 8581 * that sets the TF flag". 8582 */ 8583 if (unlikely(rflags & X86_EFLAGS_TF)) 8584 r = kvm_vcpu_do_singlestep(vcpu); 8585 return r; 8586 } 8587 EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction); 8588 8589 static bool kvm_is_code_breakpoint_inhibited(struct kvm_vcpu *vcpu) 8590 { 8591 u32 shadow; 8592 8593 if (kvm_get_rflags(vcpu) & X86_EFLAGS_RF) 8594 return true; 8595 8596 /* 8597 * Intel CPUs inhibit code #DBs when MOV/POP SS blocking is active, 8598 * but AMD CPUs do not. MOV/POP SS blocking is rare, check that first 8599 * to avoid the relatively expensive CPUID lookup. 8600 */ 8601 shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu); 8602 return (shadow & KVM_X86_SHADOW_INT_MOV_SS) && 8603 guest_cpuid_is_intel(vcpu); 8604 } 8605 8606 static bool kvm_vcpu_check_code_breakpoint(struct kvm_vcpu *vcpu, 8607 int emulation_type, int *r) 8608 { 8609 WARN_ON_ONCE(emulation_type & EMULTYPE_NO_DECODE); 8610 8611 /* 8612 * Do not check for code breakpoints if hardware has already done the 8613 * checks, as inferred from the emulation type. On NO_DECODE and SKIP, 8614 * the instruction has passed all exception checks, and all intercepted 8615 * exceptions that trigger emulation have lower priority than code 8616 * breakpoints, i.e. the fact that the intercepted exception occurred 8617 * means any code breakpoints have already been serviced. 8618 * 8619 * Note, KVM needs to check for code #DBs on EMULTYPE_TRAP_UD_FORCED as 8620 * hardware has checked the RIP of the magic prefix, but not the RIP of 8621 * the instruction being emulated. The intent of forced emulation is 8622 * to behave as if KVM intercepted the instruction without an exception 8623 * and without a prefix. 8624 */ 8625 if (emulation_type & (EMULTYPE_NO_DECODE | EMULTYPE_SKIP | 8626 EMULTYPE_TRAP_UD | EMULTYPE_VMWARE_GP | EMULTYPE_PF)) 8627 return false; 8628 8629 if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) && 8630 (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) { 8631 struct kvm_run *kvm_run = vcpu->run; 8632 unsigned long eip = kvm_get_linear_rip(vcpu); 8633 u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0, 8634 vcpu->arch.guest_debug_dr7, 8635 vcpu->arch.eff_db); 8636 8637 if (dr6 != 0) { 8638 kvm_run->debug.arch.dr6 = dr6 | DR6_ACTIVE_LOW; 8639 kvm_run->debug.arch.pc = eip; 8640 kvm_run->debug.arch.exception = DB_VECTOR; 8641 kvm_run->exit_reason = KVM_EXIT_DEBUG; 8642 *r = 0; 8643 return true; 8644 } 8645 } 8646 8647 if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) && 8648 !kvm_is_code_breakpoint_inhibited(vcpu)) { 8649 unsigned long eip = kvm_get_linear_rip(vcpu); 8650 u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0, 8651 vcpu->arch.dr7, 8652 vcpu->arch.db); 8653 8654 if (dr6 != 0) { 8655 kvm_queue_exception_p(vcpu, DB_VECTOR, dr6); 8656 *r = 1; 8657 return true; 8658 } 8659 } 8660 8661 return false; 8662 } 8663 8664 static bool is_vmware_backdoor_opcode(struct x86_emulate_ctxt *ctxt) 8665 { 8666 switch (ctxt->opcode_len) { 8667 case 1: 8668 switch (ctxt->b) { 8669 case 0xe4: /* IN */ 8670 case 0xe5: 8671 case 0xec: 8672 case 0xed: 8673 case 0xe6: /* OUT */ 8674 case 0xe7: 8675 case 0xee: 8676 case 0xef: 8677 case 0x6c: /* INS */ 8678 case 0x6d: 8679 case 0x6e: /* OUTS */ 8680 case 0x6f: 8681 return true; 8682 } 8683 break; 8684 case 2: 8685 switch (ctxt->b) { 8686 case 0x33: /* RDPMC */ 8687 return true; 8688 } 8689 break; 8690 } 8691 8692 return false; 8693 } 8694 8695 /* 8696 * Decode an instruction for emulation. The caller is responsible for handling 8697 * code breakpoints. Note, manually detecting code breakpoints is unnecessary 8698 * (and wrong) when emulating on an intercepted fault-like exception[*], as 8699 * code breakpoints have higher priority and thus have already been done by 8700 * hardware. 8701 * 8702 * [*] Except #MC, which is higher priority, but KVM should never emulate in 8703 * response to a machine check. 8704 */ 8705 int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type, 8706 void *insn, int insn_len) 8707 { 8708 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 8709 int r; 8710 8711 init_emulate_ctxt(vcpu); 8712 8713 r = x86_decode_insn(ctxt, insn, insn_len, emulation_type); 8714 8715 trace_kvm_emulate_insn_start(vcpu); 8716 ++vcpu->stat.insn_emulation; 8717 8718 return r; 8719 } 8720 EXPORT_SYMBOL_GPL(x86_decode_emulated_instruction); 8721 8722 int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, 8723 int emulation_type, void *insn, int insn_len) 8724 { 8725 int r; 8726 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 8727 bool writeback = true; 8728 bool write_fault_to_spt; 8729 8730 if (unlikely(!kvm_can_emulate_insn(vcpu, emulation_type, insn, insn_len))) 8731 return 1; 8732 8733 vcpu->arch.l1tf_flush_l1d = true; 8734 8735 /* 8736 * Clear write_fault_to_shadow_pgtable here to ensure it is 8737 * never reused. 8738 */ 8739 write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable; 8740 vcpu->arch.write_fault_to_shadow_pgtable = false; 8741 8742 if (!(emulation_type & EMULTYPE_NO_DECODE)) { 8743 kvm_clear_exception_queue(vcpu); 8744 8745 /* 8746 * Return immediately if RIP hits a code breakpoint, such #DBs 8747 * are fault-like and are higher priority than any faults on 8748 * the code fetch itself. 8749 */ 8750 if (kvm_vcpu_check_code_breakpoint(vcpu, emulation_type, &r)) 8751 return r; 8752 8753 r = x86_decode_emulated_instruction(vcpu, emulation_type, 8754 insn, insn_len); 8755 if (r != EMULATION_OK) { 8756 if ((emulation_type & EMULTYPE_TRAP_UD) || 8757 (emulation_type & EMULTYPE_TRAP_UD_FORCED)) { 8758 kvm_queue_exception(vcpu, UD_VECTOR); 8759 return 1; 8760 } 8761 if (reexecute_instruction(vcpu, cr2_or_gpa, 8762 write_fault_to_spt, 8763 emulation_type)) 8764 return 1; 8765 if (ctxt->have_exception) { 8766 /* 8767 * #UD should result in just EMULATION_FAILED, and trap-like 8768 * exception should not be encountered during decode. 8769 */ 8770 WARN_ON_ONCE(ctxt->exception.vector == UD_VECTOR || 8771 exception_type(ctxt->exception.vector) == EXCPT_TRAP); 8772 inject_emulated_exception(vcpu); 8773 return 1; 8774 } 8775 return handle_emulation_failure(vcpu, emulation_type); 8776 } 8777 } 8778 8779 if ((emulation_type & EMULTYPE_VMWARE_GP) && 8780 !is_vmware_backdoor_opcode(ctxt)) { 8781 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); 8782 return 1; 8783 } 8784 8785 /* 8786 * EMULTYPE_SKIP without EMULTYPE_COMPLETE_USER_EXIT is intended for 8787 * use *only* by vendor callbacks for kvm_skip_emulated_instruction(). 8788 * The caller is responsible for updating interruptibility state and 8789 * injecting single-step #DBs. 8790 */ 8791 if (emulation_type & EMULTYPE_SKIP) { 8792 if (ctxt->mode != X86EMUL_MODE_PROT64) 8793 ctxt->eip = (u32)ctxt->_eip; 8794 else 8795 ctxt->eip = ctxt->_eip; 8796 8797 if (emulation_type & EMULTYPE_COMPLETE_USER_EXIT) { 8798 r = 1; 8799 goto writeback; 8800 } 8801 8802 kvm_rip_write(vcpu, ctxt->eip); 8803 if (ctxt->eflags & X86_EFLAGS_RF) 8804 kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF); 8805 return 1; 8806 } 8807 8808 if (retry_instruction(ctxt, cr2_or_gpa, emulation_type)) 8809 return 1; 8810 8811 /* this is needed for vmware backdoor interface to work since it 8812 changes registers values during IO operation */ 8813 if (vcpu->arch.emulate_regs_need_sync_from_vcpu) { 8814 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; 8815 emulator_invalidate_register_cache(ctxt); 8816 } 8817 8818 restart: 8819 if (emulation_type & EMULTYPE_PF) { 8820 /* Save the faulting GPA (cr2) in the address field */ 8821 ctxt->exception.address = cr2_or_gpa; 8822 8823 /* With shadow page tables, cr2 contains a GVA or nGPA. */ 8824 if (vcpu->arch.mmu->root_role.direct) { 8825 ctxt->gpa_available = true; 8826 ctxt->gpa_val = cr2_or_gpa; 8827 } 8828 } else { 8829 /* Sanitize the address out of an abundance of paranoia. */ 8830 ctxt->exception.address = 0; 8831 } 8832 8833 r = x86_emulate_insn(ctxt); 8834 8835 if (r == EMULATION_INTERCEPTED) 8836 return 1; 8837 8838 if (r == EMULATION_FAILED) { 8839 if (reexecute_instruction(vcpu, cr2_or_gpa, write_fault_to_spt, 8840 emulation_type)) 8841 return 1; 8842 8843 return handle_emulation_failure(vcpu, emulation_type); 8844 } 8845 8846 if (ctxt->have_exception) { 8847 r = 1; 8848 inject_emulated_exception(vcpu); 8849 } else if (vcpu->arch.pio.count) { 8850 if (!vcpu->arch.pio.in) { 8851 /* FIXME: return into emulator if single-stepping. */ 8852 vcpu->arch.pio.count = 0; 8853 } else { 8854 writeback = false; 8855 vcpu->arch.complete_userspace_io = complete_emulated_pio; 8856 } 8857 r = 0; 8858 } else if (vcpu->mmio_needed) { 8859 ++vcpu->stat.mmio_exits; 8860 8861 if (!vcpu->mmio_is_write) 8862 writeback = false; 8863 r = 0; 8864 vcpu->arch.complete_userspace_io = complete_emulated_mmio; 8865 } else if (vcpu->arch.complete_userspace_io) { 8866 writeback = false; 8867 r = 0; 8868 } else if (r == EMULATION_RESTART) 8869 goto restart; 8870 else 8871 r = 1; 8872 8873 writeback: 8874 if (writeback) { 8875 unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu); 8876 toggle_interruptibility(vcpu, ctxt->interruptibility); 8877 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; 8878 8879 /* 8880 * Note, EXCPT_DB is assumed to be fault-like as the emulator 8881 * only supports code breakpoints and general detect #DB, both 8882 * of which are fault-like. 8883 */ 8884 if (!ctxt->have_exception || 8885 exception_type(ctxt->exception.vector) == EXCPT_TRAP) { 8886 kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_INSTRUCTIONS); 8887 if (ctxt->is_branch) 8888 kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_BRANCH_INSTRUCTIONS); 8889 kvm_rip_write(vcpu, ctxt->eip); 8890 if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP))) 8891 r = kvm_vcpu_do_singlestep(vcpu); 8892 static_call_cond(kvm_x86_update_emulated_instruction)(vcpu); 8893 __kvm_set_rflags(vcpu, ctxt->eflags); 8894 } 8895 8896 /* 8897 * For STI, interrupts are shadowed; so KVM_REQ_EVENT will 8898 * do nothing, and it will be requested again as soon as 8899 * the shadow expires. But we still need to check here, 8900 * because POPF has no interrupt shadow. 8901 */ 8902 if (unlikely((ctxt->eflags & ~rflags) & X86_EFLAGS_IF)) 8903 kvm_make_request(KVM_REQ_EVENT, vcpu); 8904 } else 8905 vcpu->arch.emulate_regs_need_sync_to_vcpu = true; 8906 8907 return r; 8908 } 8909 8910 int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type) 8911 { 8912 return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0); 8913 } 8914 EXPORT_SYMBOL_GPL(kvm_emulate_instruction); 8915 8916 int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu, 8917 void *insn, int insn_len) 8918 { 8919 return x86_emulate_instruction(vcpu, 0, 0, insn, insn_len); 8920 } 8921 EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer); 8922 8923 static int complete_fast_pio_out_port_0x7e(struct kvm_vcpu *vcpu) 8924 { 8925 vcpu->arch.pio.count = 0; 8926 return 1; 8927 } 8928 8929 static int complete_fast_pio_out(struct kvm_vcpu *vcpu) 8930 { 8931 vcpu->arch.pio.count = 0; 8932 8933 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) 8934 return 1; 8935 8936 return kvm_skip_emulated_instruction(vcpu); 8937 } 8938 8939 static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, 8940 unsigned short port) 8941 { 8942 unsigned long val = kvm_rax_read(vcpu); 8943 int ret = emulator_pio_out(vcpu, size, port, &val, 1); 8944 8945 if (ret) 8946 return ret; 8947 8948 /* 8949 * Workaround userspace that relies on old KVM behavior of %rip being 8950 * incremented prior to exiting to userspace to handle "OUT 0x7e". 8951 */ 8952 if (port == 0x7e && 8953 kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_OUT_7E_INC_RIP)) { 8954 vcpu->arch.complete_userspace_io = 8955 complete_fast_pio_out_port_0x7e; 8956 kvm_skip_emulated_instruction(vcpu); 8957 } else { 8958 vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu); 8959 vcpu->arch.complete_userspace_io = complete_fast_pio_out; 8960 } 8961 return 0; 8962 } 8963 8964 static int complete_fast_pio_in(struct kvm_vcpu *vcpu) 8965 { 8966 unsigned long val; 8967 8968 /* We should only ever be called with arch.pio.count equal to 1 */ 8969 BUG_ON(vcpu->arch.pio.count != 1); 8970 8971 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) { 8972 vcpu->arch.pio.count = 0; 8973 return 1; 8974 } 8975 8976 /* For size less than 4 we merge, else we zero extend */ 8977 val = (vcpu->arch.pio.size < 4) ? kvm_rax_read(vcpu) : 0; 8978 8979 complete_emulator_pio_in(vcpu, &val); 8980 kvm_rax_write(vcpu, val); 8981 8982 return kvm_skip_emulated_instruction(vcpu); 8983 } 8984 8985 static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size, 8986 unsigned short port) 8987 { 8988 unsigned long val; 8989 int ret; 8990 8991 /* For size less than 4 we merge, else we zero extend */ 8992 val = (size < 4) ? kvm_rax_read(vcpu) : 0; 8993 8994 ret = emulator_pio_in(vcpu, size, port, &val, 1); 8995 if (ret) { 8996 kvm_rax_write(vcpu, val); 8997 return ret; 8998 } 8999 9000 vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu); 9001 vcpu->arch.complete_userspace_io = complete_fast_pio_in; 9002 9003 return 0; 9004 } 9005 9006 int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in) 9007 { 9008 int ret; 9009 9010 if (in) 9011 ret = kvm_fast_pio_in(vcpu, size, port); 9012 else 9013 ret = kvm_fast_pio_out(vcpu, size, port); 9014 return ret && kvm_skip_emulated_instruction(vcpu); 9015 } 9016 EXPORT_SYMBOL_GPL(kvm_fast_pio); 9017 9018 static int kvmclock_cpu_down_prep(unsigned int cpu) 9019 { 9020 __this_cpu_write(cpu_tsc_khz, 0); 9021 return 0; 9022 } 9023 9024 static void tsc_khz_changed(void *data) 9025 { 9026 struct cpufreq_freqs *freq = data; 9027 unsigned long khz = 0; 9028 9029 if (data) 9030 khz = freq->new; 9031 else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) 9032 khz = cpufreq_quick_get(raw_smp_processor_id()); 9033 if (!khz) 9034 khz = tsc_khz; 9035 __this_cpu_write(cpu_tsc_khz, khz); 9036 } 9037 9038 #ifdef CONFIG_X86_64 9039 static void kvm_hyperv_tsc_notifier(void) 9040 { 9041 struct kvm *kvm; 9042 int cpu; 9043 9044 mutex_lock(&kvm_lock); 9045 list_for_each_entry(kvm, &vm_list, vm_list) 9046 kvm_make_mclock_inprogress_request(kvm); 9047 9048 /* no guest entries from this point */ 9049 hyperv_stop_tsc_emulation(); 9050 9051 /* TSC frequency always matches when on Hyper-V */ 9052 for_each_present_cpu(cpu) 9053 per_cpu(cpu_tsc_khz, cpu) = tsc_khz; 9054 kvm_caps.max_guest_tsc_khz = tsc_khz; 9055 9056 list_for_each_entry(kvm, &vm_list, vm_list) { 9057 __kvm_start_pvclock_update(kvm); 9058 pvclock_update_vm_gtod_copy(kvm); 9059 kvm_end_pvclock_update(kvm); 9060 } 9061 9062 mutex_unlock(&kvm_lock); 9063 } 9064 #endif 9065 9066 static void __kvmclock_cpufreq_notifier(struct cpufreq_freqs *freq, int cpu) 9067 { 9068 struct kvm *kvm; 9069 struct kvm_vcpu *vcpu; 9070 int send_ipi = 0; 9071 unsigned long i; 9072 9073 /* 9074 * We allow guests to temporarily run on slowing clocks, 9075 * provided we notify them after, or to run on accelerating 9076 * clocks, provided we notify them before. Thus time never 9077 * goes backwards. 9078 * 9079 * However, we have a problem. We can't atomically update 9080 * the frequency of a given CPU from this function; it is 9081 * merely a notifier, which can be called from any CPU. 9082 * Changing the TSC frequency at arbitrary points in time 9083 * requires a recomputation of local variables related to 9084 * the TSC for each VCPU. We must flag these local variables 9085 * to be updated and be sure the update takes place with the 9086 * new frequency before any guests proceed. 9087 * 9088 * Unfortunately, the combination of hotplug CPU and frequency 9089 * change creates an intractable locking scenario; the order 9090 * of when these callouts happen is undefined with respect to 9091 * CPU hotplug, and they can race with each other. As such, 9092 * merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is 9093 * undefined; you can actually have a CPU frequency change take 9094 * place in between the computation of X and the setting of the 9095 * variable. To protect against this problem, all updates of 9096 * the per_cpu tsc_khz variable are done in an interrupt 9097 * protected IPI, and all callers wishing to update the value 9098 * must wait for a synchronous IPI to complete (which is trivial 9099 * if the caller is on the CPU already). This establishes the 9100 * necessary total order on variable updates. 9101 * 9102 * Note that because a guest time update may take place 9103 * anytime after the setting of the VCPU's request bit, the 9104 * correct TSC value must be set before the request. However, 9105 * to ensure the update actually makes it to any guest which 9106 * starts running in hardware virtualization between the set 9107 * and the acquisition of the spinlock, we must also ping the 9108 * CPU after setting the request bit. 9109 * 9110 */ 9111 9112 smp_call_function_single(cpu, tsc_khz_changed, freq, 1); 9113 9114 mutex_lock(&kvm_lock); 9115 list_for_each_entry(kvm, &vm_list, vm_list) { 9116 kvm_for_each_vcpu(i, vcpu, kvm) { 9117 if (vcpu->cpu != cpu) 9118 continue; 9119 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 9120 if (vcpu->cpu != raw_smp_processor_id()) 9121 send_ipi = 1; 9122 } 9123 } 9124 mutex_unlock(&kvm_lock); 9125 9126 if (freq->old < freq->new && send_ipi) { 9127 /* 9128 * We upscale the frequency. Must make the guest 9129 * doesn't see old kvmclock values while running with 9130 * the new frequency, otherwise we risk the guest sees 9131 * time go backwards. 9132 * 9133 * In case we update the frequency for another cpu 9134 * (which might be in guest context) send an interrupt 9135 * to kick the cpu out of guest context. Next time 9136 * guest context is entered kvmclock will be updated, 9137 * so the guest will not see stale values. 9138 */ 9139 smp_call_function_single(cpu, tsc_khz_changed, freq, 1); 9140 } 9141 } 9142 9143 static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val, 9144 void *data) 9145 { 9146 struct cpufreq_freqs *freq = data; 9147 int cpu; 9148 9149 if (val == CPUFREQ_PRECHANGE && freq->old > freq->new) 9150 return 0; 9151 if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new) 9152 return 0; 9153 9154 for_each_cpu(cpu, freq->policy->cpus) 9155 __kvmclock_cpufreq_notifier(freq, cpu); 9156 9157 return 0; 9158 } 9159 9160 static struct notifier_block kvmclock_cpufreq_notifier_block = { 9161 .notifier_call = kvmclock_cpufreq_notifier 9162 }; 9163 9164 static int kvmclock_cpu_online(unsigned int cpu) 9165 { 9166 tsc_khz_changed(NULL); 9167 return 0; 9168 } 9169 9170 static void kvm_timer_init(void) 9171 { 9172 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { 9173 max_tsc_khz = tsc_khz; 9174 9175 if (IS_ENABLED(CONFIG_CPU_FREQ)) { 9176 struct cpufreq_policy *policy; 9177 int cpu; 9178 9179 cpu = get_cpu(); 9180 policy = cpufreq_cpu_get(cpu); 9181 if (policy) { 9182 if (policy->cpuinfo.max_freq) 9183 max_tsc_khz = policy->cpuinfo.max_freq; 9184 cpufreq_cpu_put(policy); 9185 } 9186 put_cpu(); 9187 } 9188 cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block, 9189 CPUFREQ_TRANSITION_NOTIFIER); 9190 } 9191 9192 cpuhp_setup_state(CPUHP_AP_X86_KVM_CLK_ONLINE, "x86/kvm/clk:online", 9193 kvmclock_cpu_online, kvmclock_cpu_down_prep); 9194 } 9195 9196 #ifdef CONFIG_X86_64 9197 static void pvclock_gtod_update_fn(struct work_struct *work) 9198 { 9199 struct kvm *kvm; 9200 struct kvm_vcpu *vcpu; 9201 unsigned long i; 9202 9203 mutex_lock(&kvm_lock); 9204 list_for_each_entry(kvm, &vm_list, vm_list) 9205 kvm_for_each_vcpu(i, vcpu, kvm) 9206 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); 9207 atomic_set(&kvm_guest_has_master_clock, 0); 9208 mutex_unlock(&kvm_lock); 9209 } 9210 9211 static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn); 9212 9213 /* 9214 * Indirection to move queue_work() out of the tk_core.seq write held 9215 * region to prevent possible deadlocks against time accessors which 9216 * are invoked with work related locks held. 9217 */ 9218 static void pvclock_irq_work_fn(struct irq_work *w) 9219 { 9220 queue_work(system_long_wq, &pvclock_gtod_work); 9221 } 9222 9223 static DEFINE_IRQ_WORK(pvclock_irq_work, pvclock_irq_work_fn); 9224 9225 /* 9226 * Notification about pvclock gtod data update. 9227 */ 9228 static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused, 9229 void *priv) 9230 { 9231 struct pvclock_gtod_data *gtod = &pvclock_gtod_data; 9232 struct timekeeper *tk = priv; 9233 9234 update_pvclock_gtod(tk); 9235 9236 /* 9237 * Disable master clock if host does not trust, or does not use, 9238 * TSC based clocksource. Delegate queue_work() to irq_work as 9239 * this is invoked with tk_core.seq write held. 9240 */ 9241 if (!gtod_is_based_on_tsc(gtod->clock.vclock_mode) && 9242 atomic_read(&kvm_guest_has_master_clock) != 0) 9243 irq_work_queue(&pvclock_irq_work); 9244 return 0; 9245 } 9246 9247 static struct notifier_block pvclock_gtod_notifier = { 9248 .notifier_call = pvclock_gtod_notify, 9249 }; 9250 #endif 9251 9252 int kvm_arch_init(void *opaque) 9253 { 9254 struct kvm_x86_init_ops *ops = opaque; 9255 u64 host_pat; 9256 int r; 9257 9258 if (kvm_x86_ops.hardware_enable) { 9259 pr_err("kvm: already loaded vendor module '%s'\n", kvm_x86_ops.name); 9260 return -EEXIST; 9261 } 9262 9263 if (!ops->cpu_has_kvm_support()) { 9264 pr_err_ratelimited("kvm: no hardware support for '%s'\n", 9265 ops->runtime_ops->name); 9266 return -EOPNOTSUPP; 9267 } 9268 if (ops->disabled_by_bios()) { 9269 pr_err_ratelimited("kvm: support for '%s' disabled by bios\n", 9270 ops->runtime_ops->name); 9271 return -EOPNOTSUPP; 9272 } 9273 9274 /* 9275 * KVM explicitly assumes that the guest has an FPU and 9276 * FXSAVE/FXRSTOR. For example, the KVM_GET_FPU explicitly casts the 9277 * vCPU's FPU state as a fxregs_state struct. 9278 */ 9279 if (!boot_cpu_has(X86_FEATURE_FPU) || !boot_cpu_has(X86_FEATURE_FXSR)) { 9280 printk(KERN_ERR "kvm: inadequate fpu\n"); 9281 return -EOPNOTSUPP; 9282 } 9283 9284 if (IS_ENABLED(CONFIG_PREEMPT_RT) && !boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { 9285 pr_err("RT requires X86_FEATURE_CONSTANT_TSC\n"); 9286 return -EOPNOTSUPP; 9287 } 9288 9289 /* 9290 * KVM assumes that PAT entry '0' encodes WB memtype and simply zeroes 9291 * the PAT bits in SPTEs. Bail if PAT[0] is programmed to something 9292 * other than WB. Note, EPT doesn't utilize the PAT, but don't bother 9293 * with an exception. PAT[0] is set to WB on RESET and also by the 9294 * kernel, i.e. failure indicates a kernel bug or broken firmware. 9295 */ 9296 if (rdmsrl_safe(MSR_IA32_CR_PAT, &host_pat) || 9297 (host_pat & GENMASK(2, 0)) != 6) { 9298 pr_err("kvm: host PAT[0] is not WB\n"); 9299 return -EIO; 9300 } 9301 9302 x86_emulator_cache = kvm_alloc_emulator_cache(); 9303 if (!x86_emulator_cache) { 9304 pr_err("kvm: failed to allocate cache for x86 emulator\n"); 9305 return -ENOMEM; 9306 } 9307 9308 user_return_msrs = alloc_percpu(struct kvm_user_return_msrs); 9309 if (!user_return_msrs) { 9310 printk(KERN_ERR "kvm: failed to allocate percpu kvm_user_return_msrs\n"); 9311 r = -ENOMEM; 9312 goto out_free_x86_emulator_cache; 9313 } 9314 kvm_nr_uret_msrs = 0; 9315 9316 r = kvm_mmu_vendor_module_init(); 9317 if (r) 9318 goto out_free_percpu; 9319 9320 kvm_timer_init(); 9321 9322 if (boot_cpu_has(X86_FEATURE_XSAVE)) { 9323 host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK); 9324 kvm_caps.supported_xcr0 = host_xcr0 & KVM_SUPPORTED_XCR0; 9325 } 9326 9327 if (pi_inject_timer == -1) 9328 pi_inject_timer = housekeeping_enabled(HK_TYPE_TIMER); 9329 #ifdef CONFIG_X86_64 9330 pvclock_gtod_register_notifier(&pvclock_gtod_notifier); 9331 9332 if (hypervisor_is_type(X86_HYPER_MS_HYPERV)) 9333 set_hv_tscchange_cb(kvm_hyperv_tsc_notifier); 9334 #endif 9335 9336 return 0; 9337 9338 out_free_percpu: 9339 free_percpu(user_return_msrs); 9340 out_free_x86_emulator_cache: 9341 kmem_cache_destroy(x86_emulator_cache); 9342 return r; 9343 } 9344 9345 void kvm_arch_exit(void) 9346 { 9347 #ifdef CONFIG_X86_64 9348 if (hypervisor_is_type(X86_HYPER_MS_HYPERV)) 9349 clear_hv_tscchange_cb(); 9350 #endif 9351 kvm_lapic_exit(); 9352 9353 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) 9354 cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block, 9355 CPUFREQ_TRANSITION_NOTIFIER); 9356 cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE); 9357 #ifdef CONFIG_X86_64 9358 pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier); 9359 irq_work_sync(&pvclock_irq_work); 9360 cancel_work_sync(&pvclock_gtod_work); 9361 #endif 9362 kvm_x86_ops.hardware_enable = NULL; 9363 kvm_mmu_vendor_module_exit(); 9364 free_percpu(user_return_msrs); 9365 kmem_cache_destroy(x86_emulator_cache); 9366 #ifdef CONFIG_KVM_XEN 9367 static_key_deferred_flush(&kvm_xen_enabled); 9368 WARN_ON(static_branch_unlikely(&kvm_xen_enabled.key)); 9369 #endif 9370 } 9371 9372 static int __kvm_emulate_halt(struct kvm_vcpu *vcpu, int state, int reason) 9373 { 9374 /* 9375 * The vCPU has halted, e.g. executed HLT. Update the run state if the 9376 * local APIC is in-kernel, the run loop will detect the non-runnable 9377 * state and halt the vCPU. Exit to userspace if the local APIC is 9378 * managed by userspace, in which case userspace is responsible for 9379 * handling wake events. 9380 */ 9381 ++vcpu->stat.halt_exits; 9382 if (lapic_in_kernel(vcpu)) { 9383 vcpu->arch.mp_state = state; 9384 return 1; 9385 } else { 9386 vcpu->run->exit_reason = reason; 9387 return 0; 9388 } 9389 } 9390 9391 int kvm_emulate_halt_noskip(struct kvm_vcpu *vcpu) 9392 { 9393 return __kvm_emulate_halt(vcpu, KVM_MP_STATE_HALTED, KVM_EXIT_HLT); 9394 } 9395 EXPORT_SYMBOL_GPL(kvm_emulate_halt_noskip); 9396 9397 int kvm_emulate_halt(struct kvm_vcpu *vcpu) 9398 { 9399 int ret = kvm_skip_emulated_instruction(vcpu); 9400 /* 9401 * TODO: we might be squashing a GUESTDBG_SINGLESTEP-triggered 9402 * KVM_EXIT_DEBUG here. 9403 */ 9404 return kvm_emulate_halt_noskip(vcpu) && ret; 9405 } 9406 EXPORT_SYMBOL_GPL(kvm_emulate_halt); 9407 9408 int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu) 9409 { 9410 int ret = kvm_skip_emulated_instruction(vcpu); 9411 9412 return __kvm_emulate_halt(vcpu, KVM_MP_STATE_AP_RESET_HOLD, 9413 KVM_EXIT_AP_RESET_HOLD) && ret; 9414 } 9415 EXPORT_SYMBOL_GPL(kvm_emulate_ap_reset_hold); 9416 9417 #ifdef CONFIG_X86_64 9418 static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr, 9419 unsigned long clock_type) 9420 { 9421 struct kvm_clock_pairing clock_pairing; 9422 struct timespec64 ts; 9423 u64 cycle; 9424 int ret; 9425 9426 if (clock_type != KVM_CLOCK_PAIRING_WALLCLOCK) 9427 return -KVM_EOPNOTSUPP; 9428 9429 /* 9430 * When tsc is in permanent catchup mode guests won't be able to use 9431 * pvclock_read_retry loop to get consistent view of pvclock 9432 */ 9433 if (vcpu->arch.tsc_always_catchup) 9434 return -KVM_EOPNOTSUPP; 9435 9436 if (!kvm_get_walltime_and_clockread(&ts, &cycle)) 9437 return -KVM_EOPNOTSUPP; 9438 9439 clock_pairing.sec = ts.tv_sec; 9440 clock_pairing.nsec = ts.tv_nsec; 9441 clock_pairing.tsc = kvm_read_l1_tsc(vcpu, cycle); 9442 clock_pairing.flags = 0; 9443 memset(&clock_pairing.pad, 0, sizeof(clock_pairing.pad)); 9444 9445 ret = 0; 9446 if (kvm_write_guest(vcpu->kvm, paddr, &clock_pairing, 9447 sizeof(struct kvm_clock_pairing))) 9448 ret = -KVM_EFAULT; 9449 9450 return ret; 9451 } 9452 #endif 9453 9454 /* 9455 * kvm_pv_kick_cpu_op: Kick a vcpu. 9456 * 9457 * @apicid - apicid of vcpu to be kicked. 9458 */ 9459 static void kvm_pv_kick_cpu_op(struct kvm *kvm, int apicid) 9460 { 9461 /* 9462 * All other fields are unused for APIC_DM_REMRD, but may be consumed by 9463 * common code, e.g. for tracing. Defer initialization to the compiler. 9464 */ 9465 struct kvm_lapic_irq lapic_irq = { 9466 .delivery_mode = APIC_DM_REMRD, 9467 .dest_mode = APIC_DEST_PHYSICAL, 9468 .shorthand = APIC_DEST_NOSHORT, 9469 .dest_id = apicid, 9470 }; 9471 9472 kvm_irq_delivery_to_apic(kvm, NULL, &lapic_irq, NULL); 9473 } 9474 9475 bool kvm_apicv_activated(struct kvm *kvm) 9476 { 9477 return (READ_ONCE(kvm->arch.apicv_inhibit_reasons) == 0); 9478 } 9479 EXPORT_SYMBOL_GPL(kvm_apicv_activated); 9480 9481 bool kvm_vcpu_apicv_activated(struct kvm_vcpu *vcpu) 9482 { 9483 ulong vm_reasons = READ_ONCE(vcpu->kvm->arch.apicv_inhibit_reasons); 9484 ulong vcpu_reasons = static_call(kvm_x86_vcpu_get_apicv_inhibit_reasons)(vcpu); 9485 9486 return (vm_reasons | vcpu_reasons) == 0; 9487 } 9488 EXPORT_SYMBOL_GPL(kvm_vcpu_apicv_activated); 9489 9490 static void set_or_clear_apicv_inhibit(unsigned long *inhibits, 9491 enum kvm_apicv_inhibit reason, bool set) 9492 { 9493 if (set) 9494 __set_bit(reason, inhibits); 9495 else 9496 __clear_bit(reason, inhibits); 9497 9498 trace_kvm_apicv_inhibit_changed(reason, set, *inhibits); 9499 } 9500 9501 static void kvm_apicv_init(struct kvm *kvm) 9502 { 9503 unsigned long *inhibits = &kvm->arch.apicv_inhibit_reasons; 9504 9505 init_rwsem(&kvm->arch.apicv_update_lock); 9506 9507 set_or_clear_apicv_inhibit(inhibits, APICV_INHIBIT_REASON_ABSENT, true); 9508 9509 if (!enable_apicv) 9510 set_or_clear_apicv_inhibit(inhibits, 9511 APICV_INHIBIT_REASON_DISABLE, true); 9512 } 9513 9514 static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id) 9515 { 9516 struct kvm_vcpu *target = NULL; 9517 struct kvm_apic_map *map; 9518 9519 vcpu->stat.directed_yield_attempted++; 9520 9521 if (single_task_running()) 9522 goto no_yield; 9523 9524 rcu_read_lock(); 9525 map = rcu_dereference(vcpu->kvm->arch.apic_map); 9526 9527 if (likely(map) && dest_id <= map->max_apic_id && map->phys_map[dest_id]) 9528 target = map->phys_map[dest_id]->vcpu; 9529 9530 rcu_read_unlock(); 9531 9532 if (!target || !READ_ONCE(target->ready)) 9533 goto no_yield; 9534 9535 /* Ignore requests to yield to self */ 9536 if (vcpu == target) 9537 goto no_yield; 9538 9539 if (kvm_vcpu_yield_to(target) <= 0) 9540 goto no_yield; 9541 9542 vcpu->stat.directed_yield_successful++; 9543 9544 no_yield: 9545 return; 9546 } 9547 9548 static int complete_hypercall_exit(struct kvm_vcpu *vcpu) 9549 { 9550 u64 ret = vcpu->run->hypercall.ret; 9551 9552 if (!is_64_bit_mode(vcpu)) 9553 ret = (u32)ret; 9554 kvm_rax_write(vcpu, ret); 9555 ++vcpu->stat.hypercalls; 9556 return kvm_skip_emulated_instruction(vcpu); 9557 } 9558 9559 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) 9560 { 9561 unsigned long nr, a0, a1, a2, a3, ret; 9562 int op_64_bit; 9563 9564 if (kvm_xen_hypercall_enabled(vcpu->kvm)) 9565 return kvm_xen_hypercall(vcpu); 9566 9567 if (kvm_hv_hypercall_enabled(vcpu)) 9568 return kvm_hv_hypercall(vcpu); 9569 9570 nr = kvm_rax_read(vcpu); 9571 a0 = kvm_rbx_read(vcpu); 9572 a1 = kvm_rcx_read(vcpu); 9573 a2 = kvm_rdx_read(vcpu); 9574 a3 = kvm_rsi_read(vcpu); 9575 9576 trace_kvm_hypercall(nr, a0, a1, a2, a3); 9577 9578 op_64_bit = is_64_bit_hypercall(vcpu); 9579 if (!op_64_bit) { 9580 nr &= 0xFFFFFFFF; 9581 a0 &= 0xFFFFFFFF; 9582 a1 &= 0xFFFFFFFF; 9583 a2 &= 0xFFFFFFFF; 9584 a3 &= 0xFFFFFFFF; 9585 } 9586 9587 if (static_call(kvm_x86_get_cpl)(vcpu) != 0) { 9588 ret = -KVM_EPERM; 9589 goto out; 9590 } 9591 9592 ret = -KVM_ENOSYS; 9593 9594 switch (nr) { 9595 case KVM_HC_VAPIC_POLL_IRQ: 9596 ret = 0; 9597 break; 9598 case KVM_HC_KICK_CPU: 9599 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_UNHALT)) 9600 break; 9601 9602 kvm_pv_kick_cpu_op(vcpu->kvm, a1); 9603 kvm_sched_yield(vcpu, a1); 9604 ret = 0; 9605 break; 9606 #ifdef CONFIG_X86_64 9607 case KVM_HC_CLOCK_PAIRING: 9608 ret = kvm_pv_clock_pairing(vcpu, a0, a1); 9609 break; 9610 #endif 9611 case KVM_HC_SEND_IPI: 9612 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_SEND_IPI)) 9613 break; 9614 9615 ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit); 9616 break; 9617 case KVM_HC_SCHED_YIELD: 9618 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_SCHED_YIELD)) 9619 break; 9620 9621 kvm_sched_yield(vcpu, a0); 9622 ret = 0; 9623 break; 9624 case KVM_HC_MAP_GPA_RANGE: { 9625 u64 gpa = a0, npages = a1, attrs = a2; 9626 9627 ret = -KVM_ENOSYS; 9628 if (!(vcpu->kvm->arch.hypercall_exit_enabled & (1 << KVM_HC_MAP_GPA_RANGE))) 9629 break; 9630 9631 if (!PAGE_ALIGNED(gpa) || !npages || 9632 gpa_to_gfn(gpa) + npages <= gpa_to_gfn(gpa)) { 9633 ret = -KVM_EINVAL; 9634 break; 9635 } 9636 9637 vcpu->run->exit_reason = KVM_EXIT_HYPERCALL; 9638 vcpu->run->hypercall.nr = KVM_HC_MAP_GPA_RANGE; 9639 vcpu->run->hypercall.args[0] = gpa; 9640 vcpu->run->hypercall.args[1] = npages; 9641 vcpu->run->hypercall.args[2] = attrs; 9642 vcpu->run->hypercall.longmode = op_64_bit; 9643 vcpu->arch.complete_userspace_io = complete_hypercall_exit; 9644 return 0; 9645 } 9646 default: 9647 ret = -KVM_ENOSYS; 9648 break; 9649 } 9650 out: 9651 if (!op_64_bit) 9652 ret = (u32)ret; 9653 kvm_rax_write(vcpu, ret); 9654 9655 ++vcpu->stat.hypercalls; 9656 return kvm_skip_emulated_instruction(vcpu); 9657 } 9658 EXPORT_SYMBOL_GPL(kvm_emulate_hypercall); 9659 9660 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt) 9661 { 9662 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 9663 char instruction[3]; 9664 unsigned long rip = kvm_rip_read(vcpu); 9665 9666 /* 9667 * If the quirk is disabled, synthesize a #UD and let the guest pick up 9668 * the pieces. 9669 */ 9670 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_FIX_HYPERCALL_INSN)) { 9671 ctxt->exception.error_code_valid = false; 9672 ctxt->exception.vector = UD_VECTOR; 9673 ctxt->have_exception = true; 9674 return X86EMUL_PROPAGATE_FAULT; 9675 } 9676 9677 static_call(kvm_x86_patch_hypercall)(vcpu, instruction); 9678 9679 return emulator_write_emulated(ctxt, rip, instruction, 3, 9680 &ctxt->exception); 9681 } 9682 9683 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) 9684 { 9685 return vcpu->run->request_interrupt_window && 9686 likely(!pic_in_kernel(vcpu->kvm)); 9687 } 9688 9689 /* Called within kvm->srcu read side. */ 9690 static void post_kvm_run_save(struct kvm_vcpu *vcpu) 9691 { 9692 struct kvm_run *kvm_run = vcpu->run; 9693 9694 kvm_run->if_flag = static_call(kvm_x86_get_if_flag)(vcpu); 9695 kvm_run->cr8 = kvm_get_cr8(vcpu); 9696 kvm_run->apic_base = kvm_get_apic_base(vcpu); 9697 9698 kvm_run->ready_for_interrupt_injection = 9699 pic_in_kernel(vcpu->kvm) || 9700 kvm_vcpu_ready_for_interrupt_injection(vcpu); 9701 9702 if (is_smm(vcpu)) 9703 kvm_run->flags |= KVM_RUN_X86_SMM; 9704 } 9705 9706 static void update_cr8_intercept(struct kvm_vcpu *vcpu) 9707 { 9708 int max_irr, tpr; 9709 9710 if (!kvm_x86_ops.update_cr8_intercept) 9711 return; 9712 9713 if (!lapic_in_kernel(vcpu)) 9714 return; 9715 9716 if (vcpu->arch.apic->apicv_active) 9717 return; 9718 9719 if (!vcpu->arch.apic->vapic_addr) 9720 max_irr = kvm_lapic_find_highest_irr(vcpu); 9721 else 9722 max_irr = -1; 9723 9724 if (max_irr != -1) 9725 max_irr >>= 4; 9726 9727 tpr = kvm_lapic_get_cr8(vcpu); 9728 9729 static_call(kvm_x86_update_cr8_intercept)(vcpu, tpr, max_irr); 9730 } 9731 9732 9733 int kvm_check_nested_events(struct kvm_vcpu *vcpu) 9734 { 9735 if (kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu)) { 9736 kvm_x86_ops.nested_ops->triple_fault(vcpu); 9737 return 1; 9738 } 9739 9740 return kvm_x86_ops.nested_ops->check_events(vcpu); 9741 } 9742 9743 static void kvm_inject_exception(struct kvm_vcpu *vcpu) 9744 { 9745 trace_kvm_inj_exception(vcpu->arch.exception.vector, 9746 vcpu->arch.exception.has_error_code, 9747 vcpu->arch.exception.error_code, 9748 vcpu->arch.exception.injected); 9749 9750 if (vcpu->arch.exception.error_code && !is_protmode(vcpu)) 9751 vcpu->arch.exception.error_code = false; 9752 static_call(kvm_x86_inject_exception)(vcpu); 9753 } 9754 9755 /* 9756 * Check for any event (interrupt or exception) that is ready to be injected, 9757 * and if there is at least one event, inject the event with the highest 9758 * priority. This handles both "pending" events, i.e. events that have never 9759 * been injected into the guest, and "injected" events, i.e. events that were 9760 * injected as part of a previous VM-Enter, but weren't successfully delivered 9761 * and need to be re-injected. 9762 * 9763 * Note, this is not guaranteed to be invoked on a guest instruction boundary, 9764 * i.e. doesn't guarantee that there's an event window in the guest. KVM must 9765 * be able to inject exceptions in the "middle" of an instruction, and so must 9766 * also be able to re-inject NMIs and IRQs in the middle of an instruction. 9767 * I.e. for exceptions and re-injected events, NOT invoking this on instruction 9768 * boundaries is necessary and correct. 9769 * 9770 * For simplicity, KVM uses a single path to inject all events (except events 9771 * that are injected directly from L1 to L2) and doesn't explicitly track 9772 * instruction boundaries for asynchronous events. However, because VM-Exits 9773 * that can occur during instruction execution typically result in KVM skipping 9774 * the instruction or injecting an exception, e.g. instruction and exception 9775 * intercepts, and because pending exceptions have higher priority than pending 9776 * interrupts, KVM still honors instruction boundaries in most scenarios. 9777 * 9778 * But, if a VM-Exit occurs during instruction execution, and KVM does NOT skip 9779 * the instruction or inject an exception, then KVM can incorrecty inject a new 9780 * asynchrounous event if the event became pending after the CPU fetched the 9781 * instruction (in the guest). E.g. if a page fault (#PF, #NPF, EPT violation) 9782 * occurs and is resolved by KVM, a coincident NMI, SMI, IRQ, etc... can be 9783 * injected on the restarted instruction instead of being deferred until the 9784 * instruction completes. 9785 * 9786 * In practice, this virtualization hole is unlikely to be observed by the 9787 * guest, and even less likely to cause functional problems. To detect the 9788 * hole, the guest would have to trigger an event on a side effect of an early 9789 * phase of instruction execution, e.g. on the instruction fetch from memory. 9790 * And for it to be a functional problem, the guest would need to depend on the 9791 * ordering between that side effect, the instruction completing, _and_ the 9792 * delivery of the asynchronous event. 9793 */ 9794 static int kvm_check_and_inject_events(struct kvm_vcpu *vcpu, 9795 bool *req_immediate_exit) 9796 { 9797 bool can_inject; 9798 int r; 9799 9800 /* 9801 * Process nested events first, as nested VM-Exit supercedes event 9802 * re-injection. If there's an event queued for re-injection, it will 9803 * be saved into the appropriate vmc{b,s}12 fields on nested VM-Exit. 9804 */ 9805 if (is_guest_mode(vcpu)) 9806 r = kvm_check_nested_events(vcpu); 9807 else 9808 r = 0; 9809 9810 /* 9811 * Re-inject exceptions and events *especially* if immediate entry+exit 9812 * to/from L2 is needed, as any event that has already been injected 9813 * into L2 needs to complete its lifecycle before injecting a new event. 9814 * 9815 * Don't re-inject an NMI or interrupt if there is a pending exception. 9816 * This collision arises if an exception occurred while vectoring the 9817 * injected event, KVM intercepted said exception, and KVM ultimately 9818 * determined the fault belongs to the guest and queues the exception 9819 * for injection back into the guest. 9820 * 9821 * "Injected" interrupts can also collide with pending exceptions if 9822 * userspace ignores the "ready for injection" flag and blindly queues 9823 * an interrupt. In that case, prioritizing the exception is correct, 9824 * as the exception "occurred" before the exit to userspace. Trap-like 9825 * exceptions, e.g. most #DBs, have higher priority than interrupts. 9826 * And while fault-like exceptions, e.g. #GP and #PF, are the lowest 9827 * priority, they're only generated (pended) during instruction 9828 * execution, and interrupts are recognized at instruction boundaries. 9829 * Thus a pending fault-like exception means the fault occurred on the 9830 * *previous* instruction and must be serviced prior to recognizing any 9831 * new events in order to fully complete the previous instruction. 9832 */ 9833 if (vcpu->arch.exception.injected) 9834 kvm_inject_exception(vcpu); 9835 else if (kvm_is_exception_pending(vcpu)) 9836 ; /* see above */ 9837 else if (vcpu->arch.nmi_injected) 9838 static_call(kvm_x86_inject_nmi)(vcpu); 9839 else if (vcpu->arch.interrupt.injected) 9840 static_call(kvm_x86_inject_irq)(vcpu, true); 9841 9842 /* 9843 * Exceptions that morph to VM-Exits are handled above, and pending 9844 * exceptions on top of injected exceptions that do not VM-Exit should 9845 * either morph to #DF or, sadly, override the injected exception. 9846 */ 9847 WARN_ON_ONCE(vcpu->arch.exception.injected && 9848 vcpu->arch.exception.pending); 9849 9850 /* 9851 * Bail if immediate entry+exit to/from the guest is needed to complete 9852 * nested VM-Enter or event re-injection so that a different pending 9853 * event can be serviced (or if KVM needs to exit to userspace). 9854 * 9855 * Otherwise, continue processing events even if VM-Exit occurred. The 9856 * VM-Exit will have cleared exceptions that were meant for L2, but 9857 * there may now be events that can be injected into L1. 9858 */ 9859 if (r < 0) 9860 goto out; 9861 9862 /* 9863 * A pending exception VM-Exit should either result in nested VM-Exit 9864 * or force an immediate re-entry and exit to/from L2, and exception 9865 * VM-Exits cannot be injected (flag should _never_ be set). 9866 */ 9867 WARN_ON_ONCE(vcpu->arch.exception_vmexit.injected || 9868 vcpu->arch.exception_vmexit.pending); 9869 9870 /* 9871 * New events, other than exceptions, cannot be injected if KVM needs 9872 * to re-inject a previous event. See above comments on re-injecting 9873 * for why pending exceptions get priority. 9874 */ 9875 can_inject = !kvm_event_needs_reinjection(vcpu); 9876 9877 if (vcpu->arch.exception.pending) { 9878 /* 9879 * Fault-class exceptions, except #DBs, set RF=1 in the RFLAGS 9880 * value pushed on the stack. Trap-like exception and all #DBs 9881 * leave RF as-is (KVM follows Intel's behavior in this regard; 9882 * AMD states that code breakpoint #DBs excplitly clear RF=0). 9883 * 9884 * Note, most versions of Intel's SDM and AMD's APM incorrectly 9885 * describe the behavior of General Detect #DBs, which are 9886 * fault-like. They do _not_ set RF, a la code breakpoints. 9887 */ 9888 if (exception_type(vcpu->arch.exception.vector) == EXCPT_FAULT) 9889 __kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) | 9890 X86_EFLAGS_RF); 9891 9892 if (vcpu->arch.exception.vector == DB_VECTOR) { 9893 kvm_deliver_exception_payload(vcpu, &vcpu->arch.exception); 9894 if (vcpu->arch.dr7 & DR7_GD) { 9895 vcpu->arch.dr7 &= ~DR7_GD; 9896 kvm_update_dr7(vcpu); 9897 } 9898 } 9899 9900 kvm_inject_exception(vcpu); 9901 9902 vcpu->arch.exception.pending = false; 9903 vcpu->arch.exception.injected = true; 9904 9905 can_inject = false; 9906 } 9907 9908 /* Don't inject interrupts if the user asked to avoid doing so */ 9909 if (vcpu->guest_debug & KVM_GUESTDBG_BLOCKIRQ) 9910 return 0; 9911 9912 /* 9913 * Finally, inject interrupt events. If an event cannot be injected 9914 * due to architectural conditions (e.g. IF=0) a window-open exit 9915 * will re-request KVM_REQ_EVENT. Sometimes however an event is pending 9916 * and can architecturally be injected, but we cannot do it right now: 9917 * an interrupt could have arrived just now and we have to inject it 9918 * as a vmexit, or there could already an event in the queue, which is 9919 * indicated by can_inject. In that case we request an immediate exit 9920 * in order to make progress and get back here for another iteration. 9921 * The kvm_x86_ops hooks communicate this by returning -EBUSY. 9922 */ 9923 #ifdef CONFIG_KVM_SMM 9924 if (vcpu->arch.smi_pending) { 9925 r = can_inject ? static_call(kvm_x86_smi_allowed)(vcpu, true) : -EBUSY; 9926 if (r < 0) 9927 goto out; 9928 if (r) { 9929 vcpu->arch.smi_pending = false; 9930 ++vcpu->arch.smi_count; 9931 enter_smm(vcpu); 9932 can_inject = false; 9933 } else 9934 static_call(kvm_x86_enable_smi_window)(vcpu); 9935 } 9936 #endif 9937 9938 if (vcpu->arch.nmi_pending) { 9939 r = can_inject ? static_call(kvm_x86_nmi_allowed)(vcpu, true) : -EBUSY; 9940 if (r < 0) 9941 goto out; 9942 if (r) { 9943 --vcpu->arch.nmi_pending; 9944 vcpu->arch.nmi_injected = true; 9945 static_call(kvm_x86_inject_nmi)(vcpu); 9946 can_inject = false; 9947 WARN_ON(static_call(kvm_x86_nmi_allowed)(vcpu, true) < 0); 9948 } 9949 if (vcpu->arch.nmi_pending) 9950 static_call(kvm_x86_enable_nmi_window)(vcpu); 9951 } 9952 9953 if (kvm_cpu_has_injectable_intr(vcpu)) { 9954 r = can_inject ? static_call(kvm_x86_interrupt_allowed)(vcpu, true) : -EBUSY; 9955 if (r < 0) 9956 goto out; 9957 if (r) { 9958 kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), false); 9959 static_call(kvm_x86_inject_irq)(vcpu, false); 9960 WARN_ON(static_call(kvm_x86_interrupt_allowed)(vcpu, true) < 0); 9961 } 9962 if (kvm_cpu_has_injectable_intr(vcpu)) 9963 static_call(kvm_x86_enable_irq_window)(vcpu); 9964 } 9965 9966 if (is_guest_mode(vcpu) && 9967 kvm_x86_ops.nested_ops->has_events && 9968 kvm_x86_ops.nested_ops->has_events(vcpu)) 9969 *req_immediate_exit = true; 9970 9971 /* 9972 * KVM must never queue a new exception while injecting an event; KVM 9973 * is done emulating and should only propagate the to-be-injected event 9974 * to the VMCS/VMCB. Queueing a new exception can put the vCPU into an 9975 * infinite loop as KVM will bail from VM-Enter to inject the pending 9976 * exception and start the cycle all over. 9977 * 9978 * Exempt triple faults as they have special handling and won't put the 9979 * vCPU into an infinite loop. Triple fault can be queued when running 9980 * VMX without unrestricted guest, as that requires KVM to emulate Real 9981 * Mode events (see kvm_inject_realmode_interrupt()). 9982 */ 9983 WARN_ON_ONCE(vcpu->arch.exception.pending || 9984 vcpu->arch.exception_vmexit.pending); 9985 return 0; 9986 9987 out: 9988 if (r == -EBUSY) { 9989 *req_immediate_exit = true; 9990 r = 0; 9991 } 9992 return r; 9993 } 9994 9995 static void process_nmi(struct kvm_vcpu *vcpu) 9996 { 9997 unsigned limit = 2; 9998 9999 /* 10000 * x86 is limited to one NMI running, and one NMI pending after it. 10001 * If an NMI is already in progress, limit further NMIs to just one. 10002 * Otherwise, allow two (and we'll inject the first one immediately). 10003 */ 10004 if (static_call(kvm_x86_get_nmi_mask)(vcpu) || vcpu->arch.nmi_injected) 10005 limit = 1; 10006 10007 vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0); 10008 vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit); 10009 kvm_make_request(KVM_REQ_EVENT, vcpu); 10010 } 10011 10012 void kvm_make_scan_ioapic_request_mask(struct kvm *kvm, 10013 unsigned long *vcpu_bitmap) 10014 { 10015 kvm_make_vcpus_request_mask(kvm, KVM_REQ_SCAN_IOAPIC, vcpu_bitmap); 10016 } 10017 10018 void kvm_make_scan_ioapic_request(struct kvm *kvm) 10019 { 10020 kvm_make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC); 10021 } 10022 10023 void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu) 10024 { 10025 struct kvm_lapic *apic = vcpu->arch.apic; 10026 bool activate; 10027 10028 if (!lapic_in_kernel(vcpu)) 10029 return; 10030 10031 down_read(&vcpu->kvm->arch.apicv_update_lock); 10032 preempt_disable(); 10033 10034 /* Do not activate APICV when APIC is disabled */ 10035 activate = kvm_vcpu_apicv_activated(vcpu) && 10036 (kvm_get_apic_mode(vcpu) != LAPIC_MODE_DISABLED); 10037 10038 if (apic->apicv_active == activate) 10039 goto out; 10040 10041 apic->apicv_active = activate; 10042 kvm_apic_update_apicv(vcpu); 10043 static_call(kvm_x86_refresh_apicv_exec_ctrl)(vcpu); 10044 10045 /* 10046 * When APICv gets disabled, we may still have injected interrupts 10047 * pending. At the same time, KVM_REQ_EVENT may not be set as APICv was 10048 * still active when the interrupt got accepted. Make sure 10049 * kvm_check_and_inject_events() is called to check for that. 10050 */ 10051 if (!apic->apicv_active) 10052 kvm_make_request(KVM_REQ_EVENT, vcpu); 10053 10054 out: 10055 preempt_enable(); 10056 up_read(&vcpu->kvm->arch.apicv_update_lock); 10057 } 10058 EXPORT_SYMBOL_GPL(kvm_vcpu_update_apicv); 10059 10060 void __kvm_set_or_clear_apicv_inhibit(struct kvm *kvm, 10061 enum kvm_apicv_inhibit reason, bool set) 10062 { 10063 unsigned long old, new; 10064 10065 lockdep_assert_held_write(&kvm->arch.apicv_update_lock); 10066 10067 if (!static_call(kvm_x86_check_apicv_inhibit_reasons)(reason)) 10068 return; 10069 10070 old = new = kvm->arch.apicv_inhibit_reasons; 10071 10072 set_or_clear_apicv_inhibit(&new, reason, set); 10073 10074 if (!!old != !!new) { 10075 /* 10076 * Kick all vCPUs before setting apicv_inhibit_reasons to avoid 10077 * false positives in the sanity check WARN in svm_vcpu_run(). 10078 * This task will wait for all vCPUs to ack the kick IRQ before 10079 * updating apicv_inhibit_reasons, and all other vCPUs will 10080 * block on acquiring apicv_update_lock so that vCPUs can't 10081 * redo svm_vcpu_run() without seeing the new inhibit state. 10082 * 10083 * Note, holding apicv_update_lock and taking it in the read 10084 * side (handling the request) also prevents other vCPUs from 10085 * servicing the request with a stale apicv_inhibit_reasons. 10086 */ 10087 kvm_make_all_cpus_request(kvm, KVM_REQ_APICV_UPDATE); 10088 kvm->arch.apicv_inhibit_reasons = new; 10089 if (new) { 10090 unsigned long gfn = gpa_to_gfn(APIC_DEFAULT_PHYS_BASE); 10091 int idx = srcu_read_lock(&kvm->srcu); 10092 10093 kvm_zap_gfn_range(kvm, gfn, gfn+1); 10094 srcu_read_unlock(&kvm->srcu, idx); 10095 } 10096 } else { 10097 kvm->arch.apicv_inhibit_reasons = new; 10098 } 10099 } 10100 10101 void kvm_set_or_clear_apicv_inhibit(struct kvm *kvm, 10102 enum kvm_apicv_inhibit reason, bool set) 10103 { 10104 if (!enable_apicv) 10105 return; 10106 10107 down_write(&kvm->arch.apicv_update_lock); 10108 __kvm_set_or_clear_apicv_inhibit(kvm, reason, set); 10109 up_write(&kvm->arch.apicv_update_lock); 10110 } 10111 EXPORT_SYMBOL_GPL(kvm_set_or_clear_apicv_inhibit); 10112 10113 static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) 10114 { 10115 if (!kvm_apic_present(vcpu)) 10116 return; 10117 10118 bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256); 10119 10120 if (irqchip_split(vcpu->kvm)) 10121 kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors); 10122 else { 10123 static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); 10124 if (ioapic_in_kernel(vcpu->kvm)) 10125 kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors); 10126 } 10127 10128 if (is_guest_mode(vcpu)) 10129 vcpu->arch.load_eoi_exitmap_pending = true; 10130 else 10131 kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu); 10132 } 10133 10134 static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu) 10135 { 10136 u64 eoi_exit_bitmap[4]; 10137 10138 if (!kvm_apic_hw_enabled(vcpu->arch.apic)) 10139 return; 10140 10141 if (to_hv_vcpu(vcpu)) { 10142 bitmap_or((ulong *)eoi_exit_bitmap, 10143 vcpu->arch.ioapic_handled_vectors, 10144 to_hv_synic(vcpu)->vec_bitmap, 256); 10145 static_call_cond(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap); 10146 return; 10147 } 10148 10149 static_call_cond(kvm_x86_load_eoi_exitmap)( 10150 vcpu, (u64 *)vcpu->arch.ioapic_handled_vectors); 10151 } 10152 10153 void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, 10154 unsigned long start, unsigned long end) 10155 { 10156 unsigned long apic_address; 10157 10158 /* 10159 * The physical address of apic access page is stored in the VMCS. 10160 * Update it when it becomes invalid. 10161 */ 10162 apic_address = gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT); 10163 if (start <= apic_address && apic_address < end) 10164 kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD); 10165 } 10166 10167 void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) 10168 { 10169 static_call_cond(kvm_x86_guest_memory_reclaimed)(kvm); 10170 } 10171 10172 static void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) 10173 { 10174 if (!lapic_in_kernel(vcpu)) 10175 return; 10176 10177 static_call_cond(kvm_x86_set_apic_access_page_addr)(vcpu); 10178 } 10179 10180 void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu) 10181 { 10182 smp_send_reschedule(vcpu->cpu); 10183 } 10184 EXPORT_SYMBOL_GPL(__kvm_request_immediate_exit); 10185 10186 /* 10187 * Called within kvm->srcu read side. 10188 * Returns 1 to let vcpu_run() continue the guest execution loop without 10189 * exiting to the userspace. Otherwise, the value will be returned to the 10190 * userspace. 10191 */ 10192 static int vcpu_enter_guest(struct kvm_vcpu *vcpu) 10193 { 10194 int r; 10195 bool req_int_win = 10196 dm_request_for_irq_injection(vcpu) && 10197 kvm_cpu_accept_dm_intr(vcpu); 10198 fastpath_t exit_fastpath; 10199 10200 bool req_immediate_exit = false; 10201 10202 /* Forbid vmenter if vcpu dirty ring is soft-full */ 10203 if (unlikely(vcpu->kvm->dirty_ring_size && 10204 kvm_dirty_ring_soft_full(&vcpu->dirty_ring))) { 10205 vcpu->run->exit_reason = KVM_EXIT_DIRTY_RING_FULL; 10206 trace_kvm_dirty_ring_exit(vcpu); 10207 r = 0; 10208 goto out; 10209 } 10210 10211 if (kvm_request_pending(vcpu)) { 10212 if (kvm_check_request(KVM_REQ_VM_DEAD, vcpu)) { 10213 r = -EIO; 10214 goto out; 10215 } 10216 if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) { 10217 if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) { 10218 r = 0; 10219 goto out; 10220 } 10221 } 10222 if (kvm_check_request(KVM_REQ_MMU_FREE_OBSOLETE_ROOTS, vcpu)) 10223 kvm_mmu_free_obsolete_roots(vcpu); 10224 if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu)) 10225 __kvm_migrate_timers(vcpu); 10226 if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu)) 10227 kvm_update_masterclock(vcpu->kvm); 10228 if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu)) 10229 kvm_gen_kvmclock_update(vcpu); 10230 if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) { 10231 r = kvm_guest_time_update(vcpu); 10232 if (unlikely(r)) 10233 goto out; 10234 } 10235 if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu)) 10236 kvm_mmu_sync_roots(vcpu); 10237 if (kvm_check_request(KVM_REQ_LOAD_MMU_PGD, vcpu)) 10238 kvm_mmu_load_pgd(vcpu); 10239 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { 10240 kvm_vcpu_flush_tlb_all(vcpu); 10241 10242 /* Flushing all ASIDs flushes the current ASID... */ 10243 kvm_clear_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); 10244 } 10245 kvm_service_local_tlb_flush_requests(vcpu); 10246 10247 if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) { 10248 vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS; 10249 r = 0; 10250 goto out; 10251 } 10252 if (kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu)) { 10253 if (is_guest_mode(vcpu)) 10254 kvm_x86_ops.nested_ops->triple_fault(vcpu); 10255 10256 if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) { 10257 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; 10258 vcpu->mmio_needed = 0; 10259 r = 0; 10260 } 10261 goto out; 10262 } 10263 if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) { 10264 /* Page is swapped out. Do synthetic halt */ 10265 vcpu->arch.apf.halted = true; 10266 r = 1; 10267 goto out; 10268 } 10269 if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu)) 10270 record_steal_time(vcpu); 10271 #ifdef CONFIG_KVM_SMM 10272 if (kvm_check_request(KVM_REQ_SMI, vcpu)) 10273 process_smi(vcpu); 10274 #endif 10275 if (kvm_check_request(KVM_REQ_NMI, vcpu)) 10276 process_nmi(vcpu); 10277 if (kvm_check_request(KVM_REQ_PMU, vcpu)) 10278 kvm_pmu_handle_event(vcpu); 10279 if (kvm_check_request(KVM_REQ_PMI, vcpu)) 10280 kvm_pmu_deliver_pmi(vcpu); 10281 if (kvm_check_request(KVM_REQ_IOAPIC_EOI_EXIT, vcpu)) { 10282 BUG_ON(vcpu->arch.pending_ioapic_eoi > 255); 10283 if (test_bit(vcpu->arch.pending_ioapic_eoi, 10284 vcpu->arch.ioapic_handled_vectors)) { 10285 vcpu->run->exit_reason = KVM_EXIT_IOAPIC_EOI; 10286 vcpu->run->eoi.vector = 10287 vcpu->arch.pending_ioapic_eoi; 10288 r = 0; 10289 goto out; 10290 } 10291 } 10292 if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu)) 10293 vcpu_scan_ioapic(vcpu); 10294 if (kvm_check_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu)) 10295 vcpu_load_eoi_exitmap(vcpu); 10296 if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu)) 10297 kvm_vcpu_reload_apic_access_page(vcpu); 10298 if (kvm_check_request(KVM_REQ_HV_CRASH, vcpu)) { 10299 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; 10300 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_CRASH; 10301 vcpu->run->system_event.ndata = 0; 10302 r = 0; 10303 goto out; 10304 } 10305 if (kvm_check_request(KVM_REQ_HV_RESET, vcpu)) { 10306 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; 10307 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_RESET; 10308 vcpu->run->system_event.ndata = 0; 10309 r = 0; 10310 goto out; 10311 } 10312 if (kvm_check_request(KVM_REQ_HV_EXIT, vcpu)) { 10313 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); 10314 10315 vcpu->run->exit_reason = KVM_EXIT_HYPERV; 10316 vcpu->run->hyperv = hv_vcpu->exit; 10317 r = 0; 10318 goto out; 10319 } 10320 10321 /* 10322 * KVM_REQ_HV_STIMER has to be processed after 10323 * KVM_REQ_CLOCK_UPDATE, because Hyper-V SynIC timers 10324 * depend on the guest clock being up-to-date 10325 */ 10326 if (kvm_check_request(KVM_REQ_HV_STIMER, vcpu)) 10327 kvm_hv_process_stimers(vcpu); 10328 if (kvm_check_request(KVM_REQ_APICV_UPDATE, vcpu)) 10329 kvm_vcpu_update_apicv(vcpu); 10330 if (kvm_check_request(KVM_REQ_APF_READY, vcpu)) 10331 kvm_check_async_pf_completion(vcpu); 10332 if (kvm_check_request(KVM_REQ_MSR_FILTER_CHANGED, vcpu)) 10333 static_call(kvm_x86_msr_filter_changed)(vcpu); 10334 10335 if (kvm_check_request(KVM_REQ_UPDATE_CPU_DIRTY_LOGGING, vcpu)) 10336 static_call(kvm_x86_update_cpu_dirty_logging)(vcpu); 10337 } 10338 10339 if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win || 10340 kvm_xen_has_interrupt(vcpu)) { 10341 ++vcpu->stat.req_event; 10342 r = kvm_apic_accept_events(vcpu); 10343 if (r < 0) { 10344 r = 0; 10345 goto out; 10346 } 10347 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { 10348 r = 1; 10349 goto out; 10350 } 10351 10352 r = kvm_check_and_inject_events(vcpu, &req_immediate_exit); 10353 if (r < 0) { 10354 r = 0; 10355 goto out; 10356 } 10357 if (req_int_win) 10358 static_call(kvm_x86_enable_irq_window)(vcpu); 10359 10360 if (kvm_lapic_enabled(vcpu)) { 10361 update_cr8_intercept(vcpu); 10362 kvm_lapic_sync_to_vapic(vcpu); 10363 } 10364 } 10365 10366 r = kvm_mmu_reload(vcpu); 10367 if (unlikely(r)) { 10368 goto cancel_injection; 10369 } 10370 10371 preempt_disable(); 10372 10373 static_call(kvm_x86_prepare_switch_to_guest)(vcpu); 10374 10375 /* 10376 * Disable IRQs before setting IN_GUEST_MODE. Posted interrupt 10377 * IPI are then delayed after guest entry, which ensures that they 10378 * result in virtual interrupt delivery. 10379 */ 10380 local_irq_disable(); 10381 10382 /* Store vcpu->apicv_active before vcpu->mode. */ 10383 smp_store_release(&vcpu->mode, IN_GUEST_MODE); 10384 10385 kvm_vcpu_srcu_read_unlock(vcpu); 10386 10387 /* 10388 * 1) We should set ->mode before checking ->requests. Please see 10389 * the comment in kvm_vcpu_exiting_guest_mode(). 10390 * 10391 * 2) For APICv, we should set ->mode before checking PID.ON. This 10392 * pairs with the memory barrier implicit in pi_test_and_set_on 10393 * (see vmx_deliver_posted_interrupt). 10394 * 10395 * 3) This also orders the write to mode from any reads to the page 10396 * tables done while the VCPU is running. Please see the comment 10397 * in kvm_flush_remote_tlbs. 10398 */ 10399 smp_mb__after_srcu_read_unlock(); 10400 10401 /* 10402 * Process pending posted interrupts to handle the case where the 10403 * notification IRQ arrived in the host, or was never sent (because the 10404 * target vCPU wasn't running). Do this regardless of the vCPU's APICv 10405 * status, KVM doesn't update assigned devices when APICv is inhibited, 10406 * i.e. they can post interrupts even if APICv is temporarily disabled. 10407 */ 10408 if (kvm_lapic_enabled(vcpu)) 10409 static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); 10410 10411 if (kvm_vcpu_exit_request(vcpu)) { 10412 vcpu->mode = OUTSIDE_GUEST_MODE; 10413 smp_wmb(); 10414 local_irq_enable(); 10415 preempt_enable(); 10416 kvm_vcpu_srcu_read_lock(vcpu); 10417 r = 1; 10418 goto cancel_injection; 10419 } 10420 10421 if (req_immediate_exit) { 10422 kvm_make_request(KVM_REQ_EVENT, vcpu); 10423 static_call(kvm_x86_request_immediate_exit)(vcpu); 10424 } 10425 10426 fpregs_assert_state_consistent(); 10427 if (test_thread_flag(TIF_NEED_FPU_LOAD)) 10428 switch_fpu_return(); 10429 10430 if (vcpu->arch.guest_fpu.xfd_err) 10431 wrmsrl(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err); 10432 10433 if (unlikely(vcpu->arch.switch_db_regs)) { 10434 set_debugreg(0, 7); 10435 set_debugreg(vcpu->arch.eff_db[0], 0); 10436 set_debugreg(vcpu->arch.eff_db[1], 1); 10437 set_debugreg(vcpu->arch.eff_db[2], 2); 10438 set_debugreg(vcpu->arch.eff_db[3], 3); 10439 } else if (unlikely(hw_breakpoint_active())) { 10440 set_debugreg(0, 7); 10441 } 10442 10443 guest_timing_enter_irqoff(); 10444 10445 for (;;) { 10446 /* 10447 * Assert that vCPU vs. VM APICv state is consistent. An APICv 10448 * update must kick and wait for all vCPUs before toggling the 10449 * per-VM state, and responsing vCPUs must wait for the update 10450 * to complete before servicing KVM_REQ_APICV_UPDATE. 10451 */ 10452 WARN_ON_ONCE((kvm_vcpu_apicv_activated(vcpu) != kvm_vcpu_apicv_active(vcpu)) && 10453 (kvm_get_apic_mode(vcpu) != LAPIC_MODE_DISABLED)); 10454 10455 exit_fastpath = static_call(kvm_x86_vcpu_run)(vcpu); 10456 if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST)) 10457 break; 10458 10459 if (kvm_lapic_enabled(vcpu)) 10460 static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); 10461 10462 if (unlikely(kvm_vcpu_exit_request(vcpu))) { 10463 exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED; 10464 break; 10465 } 10466 } 10467 10468 /* 10469 * Do this here before restoring debug registers on the host. And 10470 * since we do this before handling the vmexit, a DR access vmexit 10471 * can (a) read the correct value of the debug registers, (b) set 10472 * KVM_DEBUGREG_WONT_EXIT again. 10473 */ 10474 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) { 10475 WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP); 10476 static_call(kvm_x86_sync_dirty_debug_regs)(vcpu); 10477 kvm_update_dr0123(vcpu); 10478 kvm_update_dr7(vcpu); 10479 } 10480 10481 /* 10482 * If the guest has used debug registers, at least dr7 10483 * will be disabled while returning to the host. 10484 * If we don't have active breakpoints in the host, we don't 10485 * care about the messed up debug address registers. But if 10486 * we have some of them active, restore the old state. 10487 */ 10488 if (hw_breakpoint_active()) 10489 hw_breakpoint_restore(); 10490 10491 vcpu->arch.last_vmentry_cpu = vcpu->cpu; 10492 vcpu->arch.last_guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc()); 10493 10494 vcpu->mode = OUTSIDE_GUEST_MODE; 10495 smp_wmb(); 10496 10497 /* 10498 * Sync xfd before calling handle_exit_irqoff() which may 10499 * rely on the fact that guest_fpu::xfd is up-to-date (e.g. 10500 * in #NM irqoff handler). 10501 */ 10502 if (vcpu->arch.xfd_no_write_intercept) 10503 fpu_sync_guest_vmexit_xfd_state(); 10504 10505 static_call(kvm_x86_handle_exit_irqoff)(vcpu); 10506 10507 if (vcpu->arch.guest_fpu.xfd_err) 10508 wrmsrl(MSR_IA32_XFD_ERR, 0); 10509 10510 /* 10511 * Consume any pending interrupts, including the possible source of 10512 * VM-Exit on SVM and any ticks that occur between VM-Exit and now. 10513 * An instruction is required after local_irq_enable() to fully unblock 10514 * interrupts on processors that implement an interrupt shadow, the 10515 * stat.exits increment will do nicely. 10516 */ 10517 kvm_before_interrupt(vcpu, KVM_HANDLING_IRQ); 10518 local_irq_enable(); 10519 ++vcpu->stat.exits; 10520 local_irq_disable(); 10521 kvm_after_interrupt(vcpu); 10522 10523 /* 10524 * Wait until after servicing IRQs to account guest time so that any 10525 * ticks that occurred while running the guest are properly accounted 10526 * to the guest. Waiting until IRQs are enabled degrades the accuracy 10527 * of accounting via context tracking, but the loss of accuracy is 10528 * acceptable for all known use cases. 10529 */ 10530 guest_timing_exit_irqoff(); 10531 10532 local_irq_enable(); 10533 preempt_enable(); 10534 10535 kvm_vcpu_srcu_read_lock(vcpu); 10536 10537 /* 10538 * Profile KVM exit RIPs: 10539 */ 10540 if (unlikely(prof_on == KVM_PROFILING)) { 10541 unsigned long rip = kvm_rip_read(vcpu); 10542 profile_hit(KVM_PROFILING, (void *)rip); 10543 } 10544 10545 if (unlikely(vcpu->arch.tsc_always_catchup)) 10546 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 10547 10548 if (vcpu->arch.apic_attention) 10549 kvm_lapic_sync_from_vapic(vcpu); 10550 10551 r = static_call(kvm_x86_handle_exit)(vcpu, exit_fastpath); 10552 return r; 10553 10554 cancel_injection: 10555 if (req_immediate_exit) 10556 kvm_make_request(KVM_REQ_EVENT, vcpu); 10557 static_call(kvm_x86_cancel_injection)(vcpu); 10558 if (unlikely(vcpu->arch.apic_attention)) 10559 kvm_lapic_sync_from_vapic(vcpu); 10560 out: 10561 return r; 10562 } 10563 10564 /* Called within kvm->srcu read side. */ 10565 static inline int vcpu_block(struct kvm_vcpu *vcpu) 10566 { 10567 bool hv_timer; 10568 10569 if (!kvm_arch_vcpu_runnable(vcpu)) { 10570 /* 10571 * Switch to the software timer before halt-polling/blocking as 10572 * the guest's timer may be a break event for the vCPU, and the 10573 * hypervisor timer runs only when the CPU is in guest mode. 10574 * Switch before halt-polling so that KVM recognizes an expired 10575 * timer before blocking. 10576 */ 10577 hv_timer = kvm_lapic_hv_timer_in_use(vcpu); 10578 if (hv_timer) 10579 kvm_lapic_switch_to_sw_timer(vcpu); 10580 10581 kvm_vcpu_srcu_read_unlock(vcpu); 10582 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) 10583 kvm_vcpu_halt(vcpu); 10584 else 10585 kvm_vcpu_block(vcpu); 10586 kvm_vcpu_srcu_read_lock(vcpu); 10587 10588 if (hv_timer) 10589 kvm_lapic_switch_to_hv_timer(vcpu); 10590 10591 /* 10592 * If the vCPU is not runnable, a signal or another host event 10593 * of some kind is pending; service it without changing the 10594 * vCPU's activity state. 10595 */ 10596 if (!kvm_arch_vcpu_runnable(vcpu)) 10597 return 1; 10598 } 10599 10600 /* 10601 * Evaluate nested events before exiting the halted state. This allows 10602 * the halt state to be recorded properly in the VMCS12's activity 10603 * state field (AMD does not have a similar field and a VM-Exit always 10604 * causes a spurious wakeup from HLT). 10605 */ 10606 if (is_guest_mode(vcpu)) { 10607 if (kvm_check_nested_events(vcpu) < 0) 10608 return 0; 10609 } 10610 10611 if (kvm_apic_accept_events(vcpu) < 0) 10612 return 0; 10613 switch(vcpu->arch.mp_state) { 10614 case KVM_MP_STATE_HALTED: 10615 case KVM_MP_STATE_AP_RESET_HOLD: 10616 vcpu->arch.pv.pv_unhalted = false; 10617 vcpu->arch.mp_state = 10618 KVM_MP_STATE_RUNNABLE; 10619 fallthrough; 10620 case KVM_MP_STATE_RUNNABLE: 10621 vcpu->arch.apf.halted = false; 10622 break; 10623 case KVM_MP_STATE_INIT_RECEIVED: 10624 break; 10625 default: 10626 WARN_ON_ONCE(1); 10627 break; 10628 } 10629 return 1; 10630 } 10631 10632 static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu) 10633 { 10634 return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && 10635 !vcpu->arch.apf.halted); 10636 } 10637 10638 /* Called within kvm->srcu read side. */ 10639 static int vcpu_run(struct kvm_vcpu *vcpu) 10640 { 10641 int r; 10642 10643 vcpu->arch.l1tf_flush_l1d = true; 10644 10645 for (;;) { 10646 /* 10647 * If another guest vCPU requests a PV TLB flush in the middle 10648 * of instruction emulation, the rest of the emulation could 10649 * use a stale page translation. Assume that any code after 10650 * this point can start executing an instruction. 10651 */ 10652 vcpu->arch.at_instruction_boundary = false; 10653 if (kvm_vcpu_running(vcpu)) { 10654 r = vcpu_enter_guest(vcpu); 10655 } else { 10656 r = vcpu_block(vcpu); 10657 } 10658 10659 if (r <= 0) 10660 break; 10661 10662 kvm_clear_request(KVM_REQ_UNBLOCK, vcpu); 10663 if (kvm_xen_has_pending_events(vcpu)) 10664 kvm_xen_inject_pending_events(vcpu); 10665 10666 if (kvm_cpu_has_pending_timer(vcpu)) 10667 kvm_inject_pending_timer_irqs(vcpu); 10668 10669 if (dm_request_for_irq_injection(vcpu) && 10670 kvm_vcpu_ready_for_interrupt_injection(vcpu)) { 10671 r = 0; 10672 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; 10673 ++vcpu->stat.request_irq_exits; 10674 break; 10675 } 10676 10677 if (__xfer_to_guest_mode_work_pending()) { 10678 kvm_vcpu_srcu_read_unlock(vcpu); 10679 r = xfer_to_guest_mode_handle_work(vcpu); 10680 kvm_vcpu_srcu_read_lock(vcpu); 10681 if (r) 10682 return r; 10683 } 10684 } 10685 10686 return r; 10687 } 10688 10689 static inline int complete_emulated_io(struct kvm_vcpu *vcpu) 10690 { 10691 return kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE); 10692 } 10693 10694 static int complete_emulated_pio(struct kvm_vcpu *vcpu) 10695 { 10696 BUG_ON(!vcpu->arch.pio.count); 10697 10698 return complete_emulated_io(vcpu); 10699 } 10700 10701 /* 10702 * Implements the following, as a state machine: 10703 * 10704 * read: 10705 * for each fragment 10706 * for each mmio piece in the fragment 10707 * write gpa, len 10708 * exit 10709 * copy data 10710 * execute insn 10711 * 10712 * write: 10713 * for each fragment 10714 * for each mmio piece in the fragment 10715 * write gpa, len 10716 * copy data 10717 * exit 10718 */ 10719 static int complete_emulated_mmio(struct kvm_vcpu *vcpu) 10720 { 10721 struct kvm_run *run = vcpu->run; 10722 struct kvm_mmio_fragment *frag; 10723 unsigned len; 10724 10725 BUG_ON(!vcpu->mmio_needed); 10726 10727 /* Complete previous fragment */ 10728 frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment]; 10729 len = min(8u, frag->len); 10730 if (!vcpu->mmio_is_write) 10731 memcpy(frag->data, run->mmio.data, len); 10732 10733 if (frag->len <= 8) { 10734 /* Switch to the next fragment. */ 10735 frag++; 10736 vcpu->mmio_cur_fragment++; 10737 } else { 10738 /* Go forward to the next mmio piece. */ 10739 frag->data += len; 10740 frag->gpa += len; 10741 frag->len -= len; 10742 } 10743 10744 if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) { 10745 vcpu->mmio_needed = 0; 10746 10747 /* FIXME: return into emulator if single-stepping. */ 10748 if (vcpu->mmio_is_write) 10749 return 1; 10750 vcpu->mmio_read_completed = 1; 10751 return complete_emulated_io(vcpu); 10752 } 10753 10754 run->exit_reason = KVM_EXIT_MMIO; 10755 run->mmio.phys_addr = frag->gpa; 10756 if (vcpu->mmio_is_write) 10757 memcpy(run->mmio.data, frag->data, min(8u, frag->len)); 10758 run->mmio.len = min(8u, frag->len); 10759 run->mmio.is_write = vcpu->mmio_is_write; 10760 vcpu->arch.complete_userspace_io = complete_emulated_mmio; 10761 return 0; 10762 } 10763 10764 /* Swap (qemu) user FPU context for the guest FPU context. */ 10765 static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) 10766 { 10767 /* Exclude PKRU, it's restored separately immediately after VM-Exit. */ 10768 fpu_swap_kvm_fpstate(&vcpu->arch.guest_fpu, true); 10769 trace_kvm_fpu(1); 10770 } 10771 10772 /* When vcpu_run ends, restore user space FPU context. */ 10773 static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) 10774 { 10775 fpu_swap_kvm_fpstate(&vcpu->arch.guest_fpu, false); 10776 ++vcpu->stat.fpu_reload; 10777 trace_kvm_fpu(0); 10778 } 10779 10780 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) 10781 { 10782 struct kvm_queued_exception *ex = &vcpu->arch.exception; 10783 struct kvm_run *kvm_run = vcpu->run; 10784 int r; 10785 10786 vcpu_load(vcpu); 10787 kvm_sigset_activate(vcpu); 10788 kvm_run->flags = 0; 10789 kvm_load_guest_fpu(vcpu); 10790 10791 kvm_vcpu_srcu_read_lock(vcpu); 10792 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { 10793 if (kvm_run->immediate_exit) { 10794 r = -EINTR; 10795 goto out; 10796 } 10797 /* 10798 * It should be impossible for the hypervisor timer to be in 10799 * use before KVM has ever run the vCPU. 10800 */ 10801 WARN_ON_ONCE(kvm_lapic_hv_timer_in_use(vcpu)); 10802 10803 kvm_vcpu_srcu_read_unlock(vcpu); 10804 kvm_vcpu_block(vcpu); 10805 kvm_vcpu_srcu_read_lock(vcpu); 10806 10807 if (kvm_apic_accept_events(vcpu) < 0) { 10808 r = 0; 10809 goto out; 10810 } 10811 r = -EAGAIN; 10812 if (signal_pending(current)) { 10813 r = -EINTR; 10814 kvm_run->exit_reason = KVM_EXIT_INTR; 10815 ++vcpu->stat.signal_exits; 10816 } 10817 goto out; 10818 } 10819 10820 if ((kvm_run->kvm_valid_regs & ~KVM_SYNC_X86_VALID_FIELDS) || 10821 (kvm_run->kvm_dirty_regs & ~KVM_SYNC_X86_VALID_FIELDS)) { 10822 r = -EINVAL; 10823 goto out; 10824 } 10825 10826 if (kvm_run->kvm_dirty_regs) { 10827 r = sync_regs(vcpu); 10828 if (r != 0) 10829 goto out; 10830 } 10831 10832 /* re-sync apic's tpr */ 10833 if (!lapic_in_kernel(vcpu)) { 10834 if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) { 10835 r = -EINVAL; 10836 goto out; 10837 } 10838 } 10839 10840 /* 10841 * If userspace set a pending exception and L2 is active, convert it to 10842 * a pending VM-Exit if L1 wants to intercept the exception. 10843 */ 10844 if (vcpu->arch.exception_from_userspace && is_guest_mode(vcpu) && 10845 kvm_x86_ops.nested_ops->is_exception_vmexit(vcpu, ex->vector, 10846 ex->error_code)) { 10847 kvm_queue_exception_vmexit(vcpu, ex->vector, 10848 ex->has_error_code, ex->error_code, 10849 ex->has_payload, ex->payload); 10850 ex->injected = false; 10851 ex->pending = false; 10852 } 10853 vcpu->arch.exception_from_userspace = false; 10854 10855 if (unlikely(vcpu->arch.complete_userspace_io)) { 10856 int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io; 10857 vcpu->arch.complete_userspace_io = NULL; 10858 r = cui(vcpu); 10859 if (r <= 0) 10860 goto out; 10861 } else { 10862 WARN_ON_ONCE(vcpu->arch.pio.count); 10863 WARN_ON_ONCE(vcpu->mmio_needed); 10864 } 10865 10866 if (kvm_run->immediate_exit) { 10867 r = -EINTR; 10868 goto out; 10869 } 10870 10871 r = static_call(kvm_x86_vcpu_pre_run)(vcpu); 10872 if (r <= 0) 10873 goto out; 10874 10875 r = vcpu_run(vcpu); 10876 10877 out: 10878 kvm_put_guest_fpu(vcpu); 10879 if (kvm_run->kvm_valid_regs) 10880 store_regs(vcpu); 10881 post_kvm_run_save(vcpu); 10882 kvm_vcpu_srcu_read_unlock(vcpu); 10883 10884 kvm_sigset_deactivate(vcpu); 10885 vcpu_put(vcpu); 10886 return r; 10887 } 10888 10889 static void __get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 10890 { 10891 if (vcpu->arch.emulate_regs_need_sync_to_vcpu) { 10892 /* 10893 * We are here if userspace calls get_regs() in the middle of 10894 * instruction emulation. Registers state needs to be copied 10895 * back from emulation context to vcpu. Userspace shouldn't do 10896 * that usually, but some bad designed PV devices (vmware 10897 * backdoor interface) need this to work 10898 */ 10899 emulator_writeback_register_cache(vcpu->arch.emulate_ctxt); 10900 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; 10901 } 10902 regs->rax = kvm_rax_read(vcpu); 10903 regs->rbx = kvm_rbx_read(vcpu); 10904 regs->rcx = kvm_rcx_read(vcpu); 10905 regs->rdx = kvm_rdx_read(vcpu); 10906 regs->rsi = kvm_rsi_read(vcpu); 10907 regs->rdi = kvm_rdi_read(vcpu); 10908 regs->rsp = kvm_rsp_read(vcpu); 10909 regs->rbp = kvm_rbp_read(vcpu); 10910 #ifdef CONFIG_X86_64 10911 regs->r8 = kvm_r8_read(vcpu); 10912 regs->r9 = kvm_r9_read(vcpu); 10913 regs->r10 = kvm_r10_read(vcpu); 10914 regs->r11 = kvm_r11_read(vcpu); 10915 regs->r12 = kvm_r12_read(vcpu); 10916 regs->r13 = kvm_r13_read(vcpu); 10917 regs->r14 = kvm_r14_read(vcpu); 10918 regs->r15 = kvm_r15_read(vcpu); 10919 #endif 10920 10921 regs->rip = kvm_rip_read(vcpu); 10922 regs->rflags = kvm_get_rflags(vcpu); 10923 } 10924 10925 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 10926 { 10927 vcpu_load(vcpu); 10928 __get_regs(vcpu, regs); 10929 vcpu_put(vcpu); 10930 return 0; 10931 } 10932 10933 static void __set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 10934 { 10935 vcpu->arch.emulate_regs_need_sync_from_vcpu = true; 10936 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; 10937 10938 kvm_rax_write(vcpu, regs->rax); 10939 kvm_rbx_write(vcpu, regs->rbx); 10940 kvm_rcx_write(vcpu, regs->rcx); 10941 kvm_rdx_write(vcpu, regs->rdx); 10942 kvm_rsi_write(vcpu, regs->rsi); 10943 kvm_rdi_write(vcpu, regs->rdi); 10944 kvm_rsp_write(vcpu, regs->rsp); 10945 kvm_rbp_write(vcpu, regs->rbp); 10946 #ifdef CONFIG_X86_64 10947 kvm_r8_write(vcpu, regs->r8); 10948 kvm_r9_write(vcpu, regs->r9); 10949 kvm_r10_write(vcpu, regs->r10); 10950 kvm_r11_write(vcpu, regs->r11); 10951 kvm_r12_write(vcpu, regs->r12); 10952 kvm_r13_write(vcpu, regs->r13); 10953 kvm_r14_write(vcpu, regs->r14); 10954 kvm_r15_write(vcpu, regs->r15); 10955 #endif 10956 10957 kvm_rip_write(vcpu, regs->rip); 10958 kvm_set_rflags(vcpu, regs->rflags | X86_EFLAGS_FIXED); 10959 10960 vcpu->arch.exception.pending = false; 10961 vcpu->arch.exception_vmexit.pending = false; 10962 10963 kvm_make_request(KVM_REQ_EVENT, vcpu); 10964 } 10965 10966 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 10967 { 10968 vcpu_load(vcpu); 10969 __set_regs(vcpu, regs); 10970 vcpu_put(vcpu); 10971 return 0; 10972 } 10973 10974 static void __get_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 10975 { 10976 struct desc_ptr dt; 10977 10978 if (vcpu->arch.guest_state_protected) 10979 goto skip_protected_regs; 10980 10981 kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS); 10982 kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS); 10983 kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES); 10984 kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS); 10985 kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS); 10986 kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS); 10987 10988 kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR); 10989 kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); 10990 10991 static_call(kvm_x86_get_idt)(vcpu, &dt); 10992 sregs->idt.limit = dt.size; 10993 sregs->idt.base = dt.address; 10994 static_call(kvm_x86_get_gdt)(vcpu, &dt); 10995 sregs->gdt.limit = dt.size; 10996 sregs->gdt.base = dt.address; 10997 10998 sregs->cr2 = vcpu->arch.cr2; 10999 sregs->cr3 = kvm_read_cr3(vcpu); 11000 11001 skip_protected_regs: 11002 sregs->cr0 = kvm_read_cr0(vcpu); 11003 sregs->cr4 = kvm_read_cr4(vcpu); 11004 sregs->cr8 = kvm_get_cr8(vcpu); 11005 sregs->efer = vcpu->arch.efer; 11006 sregs->apic_base = kvm_get_apic_base(vcpu); 11007 } 11008 11009 static void __get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 11010 { 11011 __get_sregs_common(vcpu, sregs); 11012 11013 if (vcpu->arch.guest_state_protected) 11014 return; 11015 11016 if (vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft) 11017 set_bit(vcpu->arch.interrupt.nr, 11018 (unsigned long *)sregs->interrupt_bitmap); 11019 } 11020 11021 static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2) 11022 { 11023 int i; 11024 11025 __get_sregs_common(vcpu, (struct kvm_sregs *)sregs2); 11026 11027 if (vcpu->arch.guest_state_protected) 11028 return; 11029 11030 if (is_pae_paging(vcpu)) { 11031 for (i = 0 ; i < 4 ; i++) 11032 sregs2->pdptrs[i] = kvm_pdptr_read(vcpu, i); 11033 sregs2->flags |= KVM_SREGS2_FLAGS_PDPTRS_VALID; 11034 } 11035 } 11036 11037 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 11038 struct kvm_sregs *sregs) 11039 { 11040 vcpu_load(vcpu); 11041 __get_sregs(vcpu, sregs); 11042 vcpu_put(vcpu); 11043 return 0; 11044 } 11045 11046 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 11047 struct kvm_mp_state *mp_state) 11048 { 11049 int r; 11050 11051 vcpu_load(vcpu); 11052 if (kvm_mpx_supported()) 11053 kvm_load_guest_fpu(vcpu); 11054 11055 r = kvm_apic_accept_events(vcpu); 11056 if (r < 0) 11057 goto out; 11058 r = 0; 11059 11060 if ((vcpu->arch.mp_state == KVM_MP_STATE_HALTED || 11061 vcpu->arch.mp_state == KVM_MP_STATE_AP_RESET_HOLD) && 11062 vcpu->arch.pv.pv_unhalted) 11063 mp_state->mp_state = KVM_MP_STATE_RUNNABLE; 11064 else 11065 mp_state->mp_state = vcpu->arch.mp_state; 11066 11067 out: 11068 if (kvm_mpx_supported()) 11069 kvm_put_guest_fpu(vcpu); 11070 vcpu_put(vcpu); 11071 return r; 11072 } 11073 11074 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 11075 struct kvm_mp_state *mp_state) 11076 { 11077 int ret = -EINVAL; 11078 11079 vcpu_load(vcpu); 11080 11081 switch (mp_state->mp_state) { 11082 case KVM_MP_STATE_UNINITIALIZED: 11083 case KVM_MP_STATE_HALTED: 11084 case KVM_MP_STATE_AP_RESET_HOLD: 11085 case KVM_MP_STATE_INIT_RECEIVED: 11086 case KVM_MP_STATE_SIPI_RECEIVED: 11087 if (!lapic_in_kernel(vcpu)) 11088 goto out; 11089 break; 11090 11091 case KVM_MP_STATE_RUNNABLE: 11092 break; 11093 11094 default: 11095 goto out; 11096 } 11097 11098 /* 11099 * Pending INITs are reported using KVM_SET_VCPU_EVENTS, disallow 11100 * forcing the guest into INIT/SIPI if those events are supposed to be 11101 * blocked. KVM prioritizes SMI over INIT, so reject INIT/SIPI state 11102 * if an SMI is pending as well. 11103 */ 11104 if ((!kvm_apic_init_sipi_allowed(vcpu) || vcpu->arch.smi_pending) && 11105 (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED || 11106 mp_state->mp_state == KVM_MP_STATE_INIT_RECEIVED)) 11107 goto out; 11108 11109 if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) { 11110 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; 11111 set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events); 11112 } else 11113 vcpu->arch.mp_state = mp_state->mp_state; 11114 kvm_make_request(KVM_REQ_EVENT, vcpu); 11115 11116 ret = 0; 11117 out: 11118 vcpu_put(vcpu); 11119 return ret; 11120 } 11121 11122 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, 11123 int reason, bool has_error_code, u32 error_code) 11124 { 11125 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 11126 int ret; 11127 11128 init_emulate_ctxt(vcpu); 11129 11130 ret = emulator_task_switch(ctxt, tss_selector, idt_index, reason, 11131 has_error_code, error_code); 11132 if (ret) { 11133 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 11134 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; 11135 vcpu->run->internal.ndata = 0; 11136 return 0; 11137 } 11138 11139 kvm_rip_write(vcpu, ctxt->eip); 11140 kvm_set_rflags(vcpu, ctxt->eflags); 11141 return 1; 11142 } 11143 EXPORT_SYMBOL_GPL(kvm_task_switch); 11144 11145 static bool kvm_is_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 11146 { 11147 if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) { 11148 /* 11149 * When EFER.LME and CR0.PG are set, the processor is in 11150 * 64-bit mode (though maybe in a 32-bit code segment). 11151 * CR4.PAE and EFER.LMA must be set. 11152 */ 11153 if (!(sregs->cr4 & X86_CR4_PAE) || !(sregs->efer & EFER_LMA)) 11154 return false; 11155 if (kvm_vcpu_is_illegal_gpa(vcpu, sregs->cr3)) 11156 return false; 11157 } else { 11158 /* 11159 * Not in 64-bit mode: EFER.LMA is clear and the code 11160 * segment cannot be 64-bit. 11161 */ 11162 if (sregs->efer & EFER_LMA || sregs->cs.l) 11163 return false; 11164 } 11165 11166 return kvm_is_valid_cr4(vcpu, sregs->cr4); 11167 } 11168 11169 static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs, 11170 int *mmu_reset_needed, bool update_pdptrs) 11171 { 11172 struct msr_data apic_base_msr; 11173 int idx; 11174 struct desc_ptr dt; 11175 11176 if (!kvm_is_valid_sregs(vcpu, sregs)) 11177 return -EINVAL; 11178 11179 apic_base_msr.data = sregs->apic_base; 11180 apic_base_msr.host_initiated = true; 11181 if (kvm_set_apic_base(vcpu, &apic_base_msr)) 11182 return -EINVAL; 11183 11184 if (vcpu->arch.guest_state_protected) 11185 return 0; 11186 11187 dt.size = sregs->idt.limit; 11188 dt.address = sregs->idt.base; 11189 static_call(kvm_x86_set_idt)(vcpu, &dt); 11190 dt.size = sregs->gdt.limit; 11191 dt.address = sregs->gdt.base; 11192 static_call(kvm_x86_set_gdt)(vcpu, &dt); 11193 11194 vcpu->arch.cr2 = sregs->cr2; 11195 *mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; 11196 vcpu->arch.cr3 = sregs->cr3; 11197 kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); 11198 static_call_cond(kvm_x86_post_set_cr3)(vcpu, sregs->cr3); 11199 11200 kvm_set_cr8(vcpu, sregs->cr8); 11201 11202 *mmu_reset_needed |= vcpu->arch.efer != sregs->efer; 11203 static_call(kvm_x86_set_efer)(vcpu, sregs->efer); 11204 11205 *mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0; 11206 static_call(kvm_x86_set_cr0)(vcpu, sregs->cr0); 11207 vcpu->arch.cr0 = sregs->cr0; 11208 11209 *mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; 11210 static_call(kvm_x86_set_cr4)(vcpu, sregs->cr4); 11211 11212 if (update_pdptrs) { 11213 idx = srcu_read_lock(&vcpu->kvm->srcu); 11214 if (is_pae_paging(vcpu)) { 11215 load_pdptrs(vcpu, kvm_read_cr3(vcpu)); 11216 *mmu_reset_needed = 1; 11217 } 11218 srcu_read_unlock(&vcpu->kvm->srcu, idx); 11219 } 11220 11221 kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS); 11222 kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS); 11223 kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES); 11224 kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS); 11225 kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS); 11226 kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS); 11227 11228 kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR); 11229 kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); 11230 11231 update_cr8_intercept(vcpu); 11232 11233 /* Older userspace won't unhalt the vcpu on reset. */ 11234 if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 && 11235 sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 && 11236 !is_protmode(vcpu)) 11237 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 11238 11239 return 0; 11240 } 11241 11242 static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 11243 { 11244 int pending_vec, max_bits; 11245 int mmu_reset_needed = 0; 11246 int ret = __set_sregs_common(vcpu, sregs, &mmu_reset_needed, true); 11247 11248 if (ret) 11249 return ret; 11250 11251 if (mmu_reset_needed) 11252 kvm_mmu_reset_context(vcpu); 11253 11254 max_bits = KVM_NR_INTERRUPTS; 11255 pending_vec = find_first_bit( 11256 (const unsigned long *)sregs->interrupt_bitmap, max_bits); 11257 11258 if (pending_vec < max_bits) { 11259 kvm_queue_interrupt(vcpu, pending_vec, false); 11260 pr_debug("Set back pending irq %d\n", pending_vec); 11261 kvm_make_request(KVM_REQ_EVENT, vcpu); 11262 } 11263 return 0; 11264 } 11265 11266 static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2) 11267 { 11268 int mmu_reset_needed = 0; 11269 bool valid_pdptrs = sregs2->flags & KVM_SREGS2_FLAGS_PDPTRS_VALID; 11270 bool pae = (sregs2->cr0 & X86_CR0_PG) && (sregs2->cr4 & X86_CR4_PAE) && 11271 !(sregs2->efer & EFER_LMA); 11272 int i, ret; 11273 11274 if (sregs2->flags & ~KVM_SREGS2_FLAGS_PDPTRS_VALID) 11275 return -EINVAL; 11276 11277 if (valid_pdptrs && (!pae || vcpu->arch.guest_state_protected)) 11278 return -EINVAL; 11279 11280 ret = __set_sregs_common(vcpu, (struct kvm_sregs *)sregs2, 11281 &mmu_reset_needed, !valid_pdptrs); 11282 if (ret) 11283 return ret; 11284 11285 if (valid_pdptrs) { 11286 for (i = 0; i < 4 ; i++) 11287 kvm_pdptr_write(vcpu, i, sregs2->pdptrs[i]); 11288 11289 kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR); 11290 mmu_reset_needed = 1; 11291 vcpu->arch.pdptrs_from_userspace = true; 11292 } 11293 if (mmu_reset_needed) 11294 kvm_mmu_reset_context(vcpu); 11295 return 0; 11296 } 11297 11298 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 11299 struct kvm_sregs *sregs) 11300 { 11301 int ret; 11302 11303 vcpu_load(vcpu); 11304 ret = __set_sregs(vcpu, sregs); 11305 vcpu_put(vcpu); 11306 return ret; 11307 } 11308 11309 static void kvm_arch_vcpu_guestdbg_update_apicv_inhibit(struct kvm *kvm) 11310 { 11311 bool set = false; 11312 struct kvm_vcpu *vcpu; 11313 unsigned long i; 11314 11315 if (!enable_apicv) 11316 return; 11317 11318 down_write(&kvm->arch.apicv_update_lock); 11319 11320 kvm_for_each_vcpu(i, vcpu, kvm) { 11321 if (vcpu->guest_debug & KVM_GUESTDBG_BLOCKIRQ) { 11322 set = true; 11323 break; 11324 } 11325 } 11326 __kvm_set_or_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_BLOCKIRQ, set); 11327 up_write(&kvm->arch.apicv_update_lock); 11328 } 11329 11330 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 11331 struct kvm_guest_debug *dbg) 11332 { 11333 unsigned long rflags; 11334 int i, r; 11335 11336 if (vcpu->arch.guest_state_protected) 11337 return -EINVAL; 11338 11339 vcpu_load(vcpu); 11340 11341 if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) { 11342 r = -EBUSY; 11343 if (kvm_is_exception_pending(vcpu)) 11344 goto out; 11345 if (dbg->control & KVM_GUESTDBG_INJECT_DB) 11346 kvm_queue_exception(vcpu, DB_VECTOR); 11347 else 11348 kvm_queue_exception(vcpu, BP_VECTOR); 11349 } 11350 11351 /* 11352 * Read rflags as long as potentially injected trace flags are still 11353 * filtered out. 11354 */ 11355 rflags = kvm_get_rflags(vcpu); 11356 11357 vcpu->guest_debug = dbg->control; 11358 if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)) 11359 vcpu->guest_debug = 0; 11360 11361 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { 11362 for (i = 0; i < KVM_NR_DB_REGS; ++i) 11363 vcpu->arch.eff_db[i] = dbg->arch.debugreg[i]; 11364 vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7]; 11365 } else { 11366 for (i = 0; i < KVM_NR_DB_REGS; i++) 11367 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; 11368 } 11369 kvm_update_dr7(vcpu); 11370 11371 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) 11372 vcpu->arch.singlestep_rip = kvm_get_linear_rip(vcpu); 11373 11374 /* 11375 * Trigger an rflags update that will inject or remove the trace 11376 * flags. 11377 */ 11378 kvm_set_rflags(vcpu, rflags); 11379 11380 static_call(kvm_x86_update_exception_bitmap)(vcpu); 11381 11382 kvm_arch_vcpu_guestdbg_update_apicv_inhibit(vcpu->kvm); 11383 11384 r = 0; 11385 11386 out: 11387 vcpu_put(vcpu); 11388 return r; 11389 } 11390 11391 /* 11392 * Translate a guest virtual address to a guest physical address. 11393 */ 11394 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 11395 struct kvm_translation *tr) 11396 { 11397 unsigned long vaddr = tr->linear_address; 11398 gpa_t gpa; 11399 int idx; 11400 11401 vcpu_load(vcpu); 11402 11403 idx = srcu_read_lock(&vcpu->kvm->srcu); 11404 gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL); 11405 srcu_read_unlock(&vcpu->kvm->srcu, idx); 11406 tr->physical_address = gpa; 11407 tr->valid = gpa != INVALID_GPA; 11408 tr->writeable = 1; 11409 tr->usermode = 0; 11410 11411 vcpu_put(vcpu); 11412 return 0; 11413 } 11414 11415 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 11416 { 11417 struct fxregs_state *fxsave; 11418 11419 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) 11420 return 0; 11421 11422 vcpu_load(vcpu); 11423 11424 fxsave = &vcpu->arch.guest_fpu.fpstate->regs.fxsave; 11425 memcpy(fpu->fpr, fxsave->st_space, 128); 11426 fpu->fcw = fxsave->cwd; 11427 fpu->fsw = fxsave->swd; 11428 fpu->ftwx = fxsave->twd; 11429 fpu->last_opcode = fxsave->fop; 11430 fpu->last_ip = fxsave->rip; 11431 fpu->last_dp = fxsave->rdp; 11432 memcpy(fpu->xmm, fxsave->xmm_space, sizeof(fxsave->xmm_space)); 11433 11434 vcpu_put(vcpu); 11435 return 0; 11436 } 11437 11438 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 11439 { 11440 struct fxregs_state *fxsave; 11441 11442 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) 11443 return 0; 11444 11445 vcpu_load(vcpu); 11446 11447 fxsave = &vcpu->arch.guest_fpu.fpstate->regs.fxsave; 11448 11449 memcpy(fxsave->st_space, fpu->fpr, 128); 11450 fxsave->cwd = fpu->fcw; 11451 fxsave->swd = fpu->fsw; 11452 fxsave->twd = fpu->ftwx; 11453 fxsave->fop = fpu->last_opcode; 11454 fxsave->rip = fpu->last_ip; 11455 fxsave->rdp = fpu->last_dp; 11456 memcpy(fxsave->xmm_space, fpu->xmm, sizeof(fxsave->xmm_space)); 11457 11458 vcpu_put(vcpu); 11459 return 0; 11460 } 11461 11462 static void store_regs(struct kvm_vcpu *vcpu) 11463 { 11464 BUILD_BUG_ON(sizeof(struct kvm_sync_regs) > SYNC_REGS_SIZE_BYTES); 11465 11466 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_REGS) 11467 __get_regs(vcpu, &vcpu->run->s.regs.regs); 11468 11469 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_SREGS) 11470 __get_sregs(vcpu, &vcpu->run->s.regs.sregs); 11471 11472 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_EVENTS) 11473 kvm_vcpu_ioctl_x86_get_vcpu_events( 11474 vcpu, &vcpu->run->s.regs.events); 11475 } 11476 11477 static int sync_regs(struct kvm_vcpu *vcpu) 11478 { 11479 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_REGS) { 11480 __set_regs(vcpu, &vcpu->run->s.regs.regs); 11481 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_REGS; 11482 } 11483 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_SREGS) { 11484 if (__set_sregs(vcpu, &vcpu->run->s.regs.sregs)) 11485 return -EINVAL; 11486 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_SREGS; 11487 } 11488 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_EVENTS) { 11489 if (kvm_vcpu_ioctl_x86_set_vcpu_events( 11490 vcpu, &vcpu->run->s.regs.events)) 11491 return -EINVAL; 11492 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_EVENTS; 11493 } 11494 11495 return 0; 11496 } 11497 11498 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) 11499 { 11500 if (kvm_check_tsc_unstable() && kvm->created_vcpus) 11501 pr_warn_once("kvm: SMP vm created on host with unstable TSC; " 11502 "guest TSC will not be reliable\n"); 11503 11504 if (!kvm->arch.max_vcpu_ids) 11505 kvm->arch.max_vcpu_ids = KVM_MAX_VCPU_IDS; 11506 11507 if (id >= kvm->arch.max_vcpu_ids) 11508 return -EINVAL; 11509 11510 return static_call(kvm_x86_vcpu_precreate)(kvm); 11511 } 11512 11513 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) 11514 { 11515 struct page *page; 11516 int r; 11517 11518 vcpu->arch.last_vmentry_cpu = -1; 11519 vcpu->arch.regs_avail = ~0; 11520 vcpu->arch.regs_dirty = ~0; 11521 11522 kvm_gpc_init(&vcpu->arch.pv_time); 11523 11524 if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu)) 11525 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 11526 else 11527 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; 11528 11529 r = kvm_mmu_create(vcpu); 11530 if (r < 0) 11531 return r; 11532 11533 if (irqchip_in_kernel(vcpu->kvm)) { 11534 r = kvm_create_lapic(vcpu, lapic_timer_advance_ns); 11535 if (r < 0) 11536 goto fail_mmu_destroy; 11537 11538 /* 11539 * Defer evaluating inhibits until the vCPU is first run, as 11540 * this vCPU will not get notified of any changes until this 11541 * vCPU is visible to other vCPUs (marked online and added to 11542 * the set of vCPUs). Opportunistically mark APICv active as 11543 * VMX in particularly is highly unlikely to have inhibits. 11544 * Ignore the current per-VM APICv state so that vCPU creation 11545 * is guaranteed to run with a deterministic value, the request 11546 * will ensure the vCPU gets the correct state before VM-Entry. 11547 */ 11548 if (enable_apicv) { 11549 vcpu->arch.apic->apicv_active = true; 11550 kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu); 11551 } 11552 } else 11553 static_branch_inc(&kvm_has_noapic_vcpu); 11554 11555 r = -ENOMEM; 11556 11557 page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); 11558 if (!page) 11559 goto fail_free_lapic; 11560 vcpu->arch.pio_data = page_address(page); 11561 11562 vcpu->arch.mce_banks = kcalloc(KVM_MAX_MCE_BANKS * 4, sizeof(u64), 11563 GFP_KERNEL_ACCOUNT); 11564 vcpu->arch.mci_ctl2_banks = kcalloc(KVM_MAX_MCE_BANKS, sizeof(u64), 11565 GFP_KERNEL_ACCOUNT); 11566 if (!vcpu->arch.mce_banks || !vcpu->arch.mci_ctl2_banks) 11567 goto fail_free_mce_banks; 11568 vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS; 11569 11570 if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, 11571 GFP_KERNEL_ACCOUNT)) 11572 goto fail_free_mce_banks; 11573 11574 if (!alloc_emulate_ctxt(vcpu)) 11575 goto free_wbinvd_dirty_mask; 11576 11577 if (!fpu_alloc_guest_fpstate(&vcpu->arch.guest_fpu)) { 11578 pr_err("kvm: failed to allocate vcpu's fpu\n"); 11579 goto free_emulate_ctxt; 11580 } 11581 11582 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); 11583 vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu); 11584 11585 vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT; 11586 11587 kvm_async_pf_hash_reset(vcpu); 11588 11589 vcpu->arch.perf_capabilities = kvm_caps.supported_perf_cap; 11590 kvm_pmu_init(vcpu); 11591 11592 vcpu->arch.pending_external_vector = -1; 11593 vcpu->arch.preempted_in_kernel = false; 11594 11595 #if IS_ENABLED(CONFIG_HYPERV) 11596 vcpu->arch.hv_root_tdp = INVALID_PAGE; 11597 #endif 11598 11599 r = static_call(kvm_x86_vcpu_create)(vcpu); 11600 if (r) 11601 goto free_guest_fpu; 11602 11603 vcpu->arch.arch_capabilities = kvm_get_arch_capabilities(); 11604 vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT; 11605 kvm_xen_init_vcpu(vcpu); 11606 kvm_vcpu_mtrr_init(vcpu); 11607 vcpu_load(vcpu); 11608 kvm_set_tsc_khz(vcpu, vcpu->kvm->arch.default_tsc_khz); 11609 kvm_vcpu_reset(vcpu, false); 11610 kvm_init_mmu(vcpu); 11611 vcpu_put(vcpu); 11612 return 0; 11613 11614 free_guest_fpu: 11615 fpu_free_guest_fpstate(&vcpu->arch.guest_fpu); 11616 free_emulate_ctxt: 11617 kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt); 11618 free_wbinvd_dirty_mask: 11619 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); 11620 fail_free_mce_banks: 11621 kfree(vcpu->arch.mce_banks); 11622 kfree(vcpu->arch.mci_ctl2_banks); 11623 free_page((unsigned long)vcpu->arch.pio_data); 11624 fail_free_lapic: 11625 kvm_free_lapic(vcpu); 11626 fail_mmu_destroy: 11627 kvm_mmu_destroy(vcpu); 11628 return r; 11629 } 11630 11631 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 11632 { 11633 struct kvm *kvm = vcpu->kvm; 11634 11635 if (mutex_lock_killable(&vcpu->mutex)) 11636 return; 11637 vcpu_load(vcpu); 11638 kvm_synchronize_tsc(vcpu, 0); 11639 vcpu_put(vcpu); 11640 11641 /* poll control enabled by default */ 11642 vcpu->arch.msr_kvm_poll_control = 1; 11643 11644 mutex_unlock(&vcpu->mutex); 11645 11646 if (kvmclock_periodic_sync && vcpu->vcpu_idx == 0) 11647 schedule_delayed_work(&kvm->arch.kvmclock_sync_work, 11648 KVMCLOCK_SYNC_PERIOD); 11649 } 11650 11651 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 11652 { 11653 int idx; 11654 11655 kvmclock_reset(vcpu); 11656 11657 static_call(kvm_x86_vcpu_free)(vcpu); 11658 11659 kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt); 11660 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); 11661 fpu_free_guest_fpstate(&vcpu->arch.guest_fpu); 11662 11663 kvm_xen_destroy_vcpu(vcpu); 11664 kvm_hv_vcpu_uninit(vcpu); 11665 kvm_pmu_destroy(vcpu); 11666 kfree(vcpu->arch.mce_banks); 11667 kfree(vcpu->arch.mci_ctl2_banks); 11668 kvm_free_lapic(vcpu); 11669 idx = srcu_read_lock(&vcpu->kvm->srcu); 11670 kvm_mmu_destroy(vcpu); 11671 srcu_read_unlock(&vcpu->kvm->srcu, idx); 11672 free_page((unsigned long)vcpu->arch.pio_data); 11673 kvfree(vcpu->arch.cpuid_entries); 11674 if (!lapic_in_kernel(vcpu)) 11675 static_branch_dec(&kvm_has_noapic_vcpu); 11676 } 11677 11678 void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) 11679 { 11680 struct kvm_cpuid_entry2 *cpuid_0x1; 11681 unsigned long old_cr0 = kvm_read_cr0(vcpu); 11682 unsigned long new_cr0; 11683 11684 /* 11685 * Several of the "set" flows, e.g. ->set_cr0(), read other registers 11686 * to handle side effects. RESET emulation hits those flows and relies 11687 * on emulated/virtualized registers, including those that are loaded 11688 * into hardware, to be zeroed at vCPU creation. Use CRs as a sentinel 11689 * to detect improper or missing initialization. 11690 */ 11691 WARN_ON_ONCE(!init_event && 11692 (old_cr0 || kvm_read_cr3(vcpu) || kvm_read_cr4(vcpu))); 11693 11694 /* 11695 * SVM doesn't unconditionally VM-Exit on INIT and SHUTDOWN, thus it's 11696 * possible to INIT the vCPU while L2 is active. Force the vCPU back 11697 * into L1 as EFER.SVME is cleared on INIT (along with all other EFER 11698 * bits), i.e. virtualization is disabled. 11699 */ 11700 if (is_guest_mode(vcpu)) 11701 kvm_leave_nested(vcpu); 11702 11703 kvm_lapic_reset(vcpu, init_event); 11704 11705 WARN_ON_ONCE(is_guest_mode(vcpu) || is_smm(vcpu)); 11706 vcpu->arch.hflags = 0; 11707 11708 vcpu->arch.smi_pending = 0; 11709 vcpu->arch.smi_count = 0; 11710 atomic_set(&vcpu->arch.nmi_queued, 0); 11711 vcpu->arch.nmi_pending = 0; 11712 vcpu->arch.nmi_injected = false; 11713 kvm_clear_interrupt_queue(vcpu); 11714 kvm_clear_exception_queue(vcpu); 11715 11716 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); 11717 kvm_update_dr0123(vcpu); 11718 vcpu->arch.dr6 = DR6_ACTIVE_LOW; 11719 vcpu->arch.dr7 = DR7_FIXED_1; 11720 kvm_update_dr7(vcpu); 11721 11722 vcpu->arch.cr2 = 0; 11723 11724 kvm_make_request(KVM_REQ_EVENT, vcpu); 11725 vcpu->arch.apf.msr_en_val = 0; 11726 vcpu->arch.apf.msr_int_val = 0; 11727 vcpu->arch.st.msr_val = 0; 11728 11729 kvmclock_reset(vcpu); 11730 11731 kvm_clear_async_pf_completion_queue(vcpu); 11732 kvm_async_pf_hash_reset(vcpu); 11733 vcpu->arch.apf.halted = false; 11734 11735 if (vcpu->arch.guest_fpu.fpstate && kvm_mpx_supported()) { 11736 struct fpstate *fpstate = vcpu->arch.guest_fpu.fpstate; 11737 11738 /* 11739 * All paths that lead to INIT are required to load the guest's 11740 * FPU state (because most paths are buried in KVM_RUN). 11741 */ 11742 if (init_event) 11743 kvm_put_guest_fpu(vcpu); 11744 11745 fpstate_clear_xstate_component(fpstate, XFEATURE_BNDREGS); 11746 fpstate_clear_xstate_component(fpstate, XFEATURE_BNDCSR); 11747 11748 if (init_event) 11749 kvm_load_guest_fpu(vcpu); 11750 } 11751 11752 if (!init_event) { 11753 kvm_pmu_reset(vcpu); 11754 vcpu->arch.smbase = 0x30000; 11755 11756 vcpu->arch.msr_misc_features_enables = 0; 11757 vcpu->arch.ia32_misc_enable_msr = MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL | 11758 MSR_IA32_MISC_ENABLE_BTS_UNAVAIL; 11759 11760 __kvm_set_xcr(vcpu, 0, XFEATURE_MASK_FP); 11761 __kvm_set_msr(vcpu, MSR_IA32_XSS, 0, true); 11762 } 11763 11764 /* All GPRs except RDX (handled below) are zeroed on RESET/INIT. */ 11765 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); 11766 kvm_register_mark_dirty(vcpu, VCPU_REGS_RSP); 11767 11768 /* 11769 * Fall back to KVM's default Family/Model/Stepping of 0x600 (P6/Athlon) 11770 * if no CPUID match is found. Note, it's impossible to get a match at 11771 * RESET since KVM emulates RESET before exposing the vCPU to userspace, 11772 * i.e. it's impossible for kvm_find_cpuid_entry() to find a valid entry 11773 * on RESET. But, go through the motions in case that's ever remedied. 11774 */ 11775 cpuid_0x1 = kvm_find_cpuid_entry(vcpu, 1); 11776 kvm_rdx_write(vcpu, cpuid_0x1 ? cpuid_0x1->eax : 0x600); 11777 11778 static_call(kvm_x86_vcpu_reset)(vcpu, init_event); 11779 11780 kvm_set_rflags(vcpu, X86_EFLAGS_FIXED); 11781 kvm_rip_write(vcpu, 0xfff0); 11782 11783 vcpu->arch.cr3 = 0; 11784 kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); 11785 11786 /* 11787 * CR0.CD/NW are set on RESET, preserved on INIT. Note, some versions 11788 * of Intel's SDM list CD/NW as being set on INIT, but they contradict 11789 * (or qualify) that with a footnote stating that CD/NW are preserved. 11790 */ 11791 new_cr0 = X86_CR0_ET; 11792 if (init_event) 11793 new_cr0 |= (old_cr0 & (X86_CR0_NW | X86_CR0_CD)); 11794 else 11795 new_cr0 |= X86_CR0_NW | X86_CR0_CD; 11796 11797 static_call(kvm_x86_set_cr0)(vcpu, new_cr0); 11798 static_call(kvm_x86_set_cr4)(vcpu, 0); 11799 static_call(kvm_x86_set_efer)(vcpu, 0); 11800 static_call(kvm_x86_update_exception_bitmap)(vcpu); 11801 11802 /* 11803 * On the standard CR0/CR4/EFER modification paths, there are several 11804 * complex conditions determining whether the MMU has to be reset and/or 11805 * which PCIDs have to be flushed. However, CR0.WP and the paging-related 11806 * bits in CR4 and EFER are irrelevant if CR0.PG was '0'; and a reset+flush 11807 * is needed anyway if CR0.PG was '1' (which can only happen for INIT, as 11808 * CR0 will be '0' prior to RESET). So we only need to check CR0.PG here. 11809 */ 11810 if (old_cr0 & X86_CR0_PG) { 11811 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 11812 kvm_mmu_reset_context(vcpu); 11813 } 11814 11815 /* 11816 * Intel's SDM states that all TLB entries are flushed on INIT. AMD's 11817 * APM states the TLBs are untouched by INIT, but it also states that 11818 * the TLBs are flushed on "External initialization of the processor." 11819 * Flush the guest TLB regardless of vendor, there is no meaningful 11820 * benefit in relying on the guest to flush the TLB immediately after 11821 * INIT. A spurious TLB flush is benign and likely negligible from a 11822 * performance perspective. 11823 */ 11824 if (init_event) 11825 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 11826 } 11827 EXPORT_SYMBOL_GPL(kvm_vcpu_reset); 11828 11829 void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) 11830 { 11831 struct kvm_segment cs; 11832 11833 kvm_get_segment(vcpu, &cs, VCPU_SREG_CS); 11834 cs.selector = vector << 8; 11835 cs.base = vector << 12; 11836 kvm_set_segment(vcpu, &cs, VCPU_SREG_CS); 11837 kvm_rip_write(vcpu, 0); 11838 } 11839 EXPORT_SYMBOL_GPL(kvm_vcpu_deliver_sipi_vector); 11840 11841 int kvm_arch_hardware_enable(void) 11842 { 11843 struct kvm *kvm; 11844 struct kvm_vcpu *vcpu; 11845 unsigned long i; 11846 int ret; 11847 u64 local_tsc; 11848 u64 max_tsc = 0; 11849 bool stable, backwards_tsc = false; 11850 11851 kvm_user_return_msr_cpu_online(); 11852 ret = static_call(kvm_x86_hardware_enable)(); 11853 if (ret != 0) 11854 return ret; 11855 11856 local_tsc = rdtsc(); 11857 stable = !kvm_check_tsc_unstable(); 11858 list_for_each_entry(kvm, &vm_list, vm_list) { 11859 kvm_for_each_vcpu(i, vcpu, kvm) { 11860 if (!stable && vcpu->cpu == smp_processor_id()) 11861 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 11862 if (stable && vcpu->arch.last_host_tsc > local_tsc) { 11863 backwards_tsc = true; 11864 if (vcpu->arch.last_host_tsc > max_tsc) 11865 max_tsc = vcpu->arch.last_host_tsc; 11866 } 11867 } 11868 } 11869 11870 /* 11871 * Sometimes, even reliable TSCs go backwards. This happens on 11872 * platforms that reset TSC during suspend or hibernate actions, but 11873 * maintain synchronization. We must compensate. Fortunately, we can 11874 * detect that condition here, which happens early in CPU bringup, 11875 * before any KVM threads can be running. Unfortunately, we can't 11876 * bring the TSCs fully up to date with real time, as we aren't yet far 11877 * enough into CPU bringup that we know how much real time has actually 11878 * elapsed; our helper function, ktime_get_boottime_ns() will be using boot 11879 * variables that haven't been updated yet. 11880 * 11881 * So we simply find the maximum observed TSC above, then record the 11882 * adjustment to TSC in each VCPU. When the VCPU later gets loaded, 11883 * the adjustment will be applied. Note that we accumulate 11884 * adjustments, in case multiple suspend cycles happen before some VCPU 11885 * gets a chance to run again. In the event that no KVM threads get a 11886 * chance to run, we will miss the entire elapsed period, as we'll have 11887 * reset last_host_tsc, so VCPUs will not have the TSC adjusted and may 11888 * loose cycle time. This isn't too big a deal, since the loss will be 11889 * uniform across all VCPUs (not to mention the scenario is extremely 11890 * unlikely). It is possible that a second hibernate recovery happens 11891 * much faster than a first, causing the observed TSC here to be 11892 * smaller; this would require additional padding adjustment, which is 11893 * why we set last_host_tsc to the local tsc observed here. 11894 * 11895 * N.B. - this code below runs only on platforms with reliable TSC, 11896 * as that is the only way backwards_tsc is set above. Also note 11897 * that this runs for ALL vcpus, which is not a bug; all VCPUs should 11898 * have the same delta_cyc adjustment applied if backwards_tsc 11899 * is detected. Note further, this adjustment is only done once, 11900 * as we reset last_host_tsc on all VCPUs to stop this from being 11901 * called multiple times (one for each physical CPU bringup). 11902 * 11903 * Platforms with unreliable TSCs don't have to deal with this, they 11904 * will be compensated by the logic in vcpu_load, which sets the TSC to 11905 * catchup mode. This will catchup all VCPUs to real time, but cannot 11906 * guarantee that they stay in perfect synchronization. 11907 */ 11908 if (backwards_tsc) { 11909 u64 delta_cyc = max_tsc - local_tsc; 11910 list_for_each_entry(kvm, &vm_list, vm_list) { 11911 kvm->arch.backwards_tsc_observed = true; 11912 kvm_for_each_vcpu(i, vcpu, kvm) { 11913 vcpu->arch.tsc_offset_adjustment += delta_cyc; 11914 vcpu->arch.last_host_tsc = local_tsc; 11915 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); 11916 } 11917 11918 /* 11919 * We have to disable TSC offset matching.. if you were 11920 * booting a VM while issuing an S4 host suspend.... 11921 * you may have some problem. Solving this issue is 11922 * left as an exercise to the reader. 11923 */ 11924 kvm->arch.last_tsc_nsec = 0; 11925 kvm->arch.last_tsc_write = 0; 11926 } 11927 11928 } 11929 return 0; 11930 } 11931 11932 void kvm_arch_hardware_disable(void) 11933 { 11934 static_call(kvm_x86_hardware_disable)(); 11935 drop_user_return_notifiers(); 11936 } 11937 11938 static inline void kvm_ops_update(struct kvm_x86_init_ops *ops) 11939 { 11940 memcpy(&kvm_x86_ops, ops->runtime_ops, sizeof(kvm_x86_ops)); 11941 11942 #define __KVM_X86_OP(func) \ 11943 static_call_update(kvm_x86_##func, kvm_x86_ops.func); 11944 #define KVM_X86_OP(func) \ 11945 WARN_ON(!kvm_x86_ops.func); __KVM_X86_OP(func) 11946 #define KVM_X86_OP_OPTIONAL __KVM_X86_OP 11947 #define KVM_X86_OP_OPTIONAL_RET0(func) \ 11948 static_call_update(kvm_x86_##func, (void *)kvm_x86_ops.func ? : \ 11949 (void *)__static_call_return0); 11950 #include <asm/kvm-x86-ops.h> 11951 #undef __KVM_X86_OP 11952 11953 kvm_pmu_ops_update(ops->pmu_ops); 11954 } 11955 11956 int kvm_arch_hardware_setup(void *opaque) 11957 { 11958 struct kvm_x86_init_ops *ops = opaque; 11959 int r; 11960 11961 rdmsrl_safe(MSR_EFER, &host_efer); 11962 11963 if (boot_cpu_has(X86_FEATURE_XSAVES)) 11964 rdmsrl(MSR_IA32_XSS, host_xss); 11965 11966 kvm_init_pmu_capability(); 11967 11968 r = ops->hardware_setup(); 11969 if (r != 0) 11970 return r; 11971 11972 kvm_ops_update(ops); 11973 11974 kvm_register_perf_callbacks(ops->handle_intel_pt_intr); 11975 11976 if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES)) 11977 kvm_caps.supported_xss = 0; 11978 11979 #define __kvm_cpu_cap_has(UNUSED_, f) kvm_cpu_cap_has(f) 11980 cr4_reserved_bits = __cr4_reserved_bits(__kvm_cpu_cap_has, UNUSED_); 11981 #undef __kvm_cpu_cap_has 11982 11983 if (kvm_caps.has_tsc_control) { 11984 /* 11985 * Make sure the user can only configure tsc_khz values that 11986 * fit into a signed integer. 11987 * A min value is not calculated because it will always 11988 * be 1 on all machines. 11989 */ 11990 u64 max = min(0x7fffffffULL, 11991 __scale_tsc(kvm_caps.max_tsc_scaling_ratio, tsc_khz)); 11992 kvm_caps.max_guest_tsc_khz = max; 11993 } 11994 kvm_caps.default_tsc_scaling_ratio = 1ULL << kvm_caps.tsc_scaling_ratio_frac_bits; 11995 kvm_init_msr_list(); 11996 return 0; 11997 } 11998 11999 void kvm_arch_hardware_unsetup(void) 12000 { 12001 kvm_unregister_perf_callbacks(); 12002 12003 static_call(kvm_x86_hardware_unsetup)(); 12004 } 12005 12006 int kvm_arch_check_processor_compat(void *opaque) 12007 { 12008 struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); 12009 struct kvm_x86_init_ops *ops = opaque; 12010 12011 WARN_ON(!irqs_disabled()); 12012 12013 if (__cr4_reserved_bits(cpu_has, c) != 12014 __cr4_reserved_bits(cpu_has, &boot_cpu_data)) 12015 return -EIO; 12016 12017 return ops->check_processor_compatibility(); 12018 } 12019 12020 bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu) 12021 { 12022 return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id; 12023 } 12024 EXPORT_SYMBOL_GPL(kvm_vcpu_is_reset_bsp); 12025 12026 bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu) 12027 { 12028 return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0; 12029 } 12030 12031 __read_mostly DEFINE_STATIC_KEY_FALSE(kvm_has_noapic_vcpu); 12032 EXPORT_SYMBOL_GPL(kvm_has_noapic_vcpu); 12033 12034 void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) 12035 { 12036 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 12037 12038 vcpu->arch.l1tf_flush_l1d = true; 12039 if (pmu->version && unlikely(pmu->event_count)) { 12040 pmu->need_cleanup = true; 12041 kvm_make_request(KVM_REQ_PMU, vcpu); 12042 } 12043 static_call(kvm_x86_sched_in)(vcpu, cpu); 12044 } 12045 12046 void kvm_arch_free_vm(struct kvm *kvm) 12047 { 12048 kfree(to_kvm_hv(kvm)->hv_pa_pg); 12049 __kvm_arch_free_vm(kvm); 12050 } 12051 12052 12053 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 12054 { 12055 int ret; 12056 unsigned long flags; 12057 12058 if (type) 12059 return -EINVAL; 12060 12061 ret = kvm_page_track_init(kvm); 12062 if (ret) 12063 goto out; 12064 12065 ret = kvm_mmu_init_vm(kvm); 12066 if (ret) 12067 goto out_page_track; 12068 12069 ret = static_call(kvm_x86_vm_init)(kvm); 12070 if (ret) 12071 goto out_uninit_mmu; 12072 12073 INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list); 12074 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); 12075 atomic_set(&kvm->arch.noncoherent_dma_count, 0); 12076 12077 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ 12078 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); 12079 /* Reserve bit 1 of irq_sources_bitmap for irqfd-resampler */ 12080 set_bit(KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, 12081 &kvm->arch.irq_sources_bitmap); 12082 12083 raw_spin_lock_init(&kvm->arch.tsc_write_lock); 12084 mutex_init(&kvm->arch.apic_map_lock); 12085 seqcount_raw_spinlock_init(&kvm->arch.pvclock_sc, &kvm->arch.tsc_write_lock); 12086 kvm->arch.kvmclock_offset = -get_kvmclock_base_ns(); 12087 12088 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); 12089 pvclock_update_vm_gtod_copy(kvm); 12090 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); 12091 12092 kvm->arch.default_tsc_khz = max_tsc_khz ? : tsc_khz; 12093 kvm->arch.guest_can_read_msr_platform_info = true; 12094 kvm->arch.enable_pmu = enable_pmu; 12095 12096 #if IS_ENABLED(CONFIG_HYPERV) 12097 spin_lock_init(&kvm->arch.hv_root_tdp_lock); 12098 kvm->arch.hv_root_tdp = INVALID_PAGE; 12099 #endif 12100 12101 INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn); 12102 INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn); 12103 12104 kvm_apicv_init(kvm); 12105 kvm_hv_init_vm(kvm); 12106 kvm_xen_init_vm(kvm); 12107 12108 return 0; 12109 12110 out_uninit_mmu: 12111 kvm_mmu_uninit_vm(kvm); 12112 out_page_track: 12113 kvm_page_track_cleanup(kvm); 12114 out: 12115 return ret; 12116 } 12117 12118 int kvm_arch_post_init_vm(struct kvm *kvm) 12119 { 12120 return kvm_mmu_post_init_vm(kvm); 12121 } 12122 12123 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu) 12124 { 12125 vcpu_load(vcpu); 12126 kvm_mmu_unload(vcpu); 12127 vcpu_put(vcpu); 12128 } 12129 12130 static void kvm_unload_vcpu_mmus(struct kvm *kvm) 12131 { 12132 unsigned long i; 12133 struct kvm_vcpu *vcpu; 12134 12135 kvm_for_each_vcpu(i, vcpu, kvm) { 12136 kvm_clear_async_pf_completion_queue(vcpu); 12137 kvm_unload_vcpu_mmu(vcpu); 12138 } 12139 } 12140 12141 void kvm_arch_sync_events(struct kvm *kvm) 12142 { 12143 cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work); 12144 cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work); 12145 kvm_free_pit(kvm); 12146 } 12147 12148 /** 12149 * __x86_set_memory_region: Setup KVM internal memory slot 12150 * 12151 * @kvm: the kvm pointer to the VM. 12152 * @id: the slot ID to setup. 12153 * @gpa: the GPA to install the slot (unused when @size == 0). 12154 * @size: the size of the slot. Set to zero to uninstall a slot. 12155 * 12156 * This function helps to setup a KVM internal memory slot. Specify 12157 * @size > 0 to install a new slot, while @size == 0 to uninstall a 12158 * slot. The return code can be one of the following: 12159 * 12160 * HVA: on success (uninstall will return a bogus HVA) 12161 * -errno: on error 12162 * 12163 * The caller should always use IS_ERR() to check the return value 12164 * before use. Note, the KVM internal memory slots are guaranteed to 12165 * remain valid and unchanged until the VM is destroyed, i.e., the 12166 * GPA->HVA translation will not change. However, the HVA is a user 12167 * address, i.e. its accessibility is not guaranteed, and must be 12168 * accessed via __copy_{to,from}_user(). 12169 */ 12170 void __user * __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, 12171 u32 size) 12172 { 12173 int i, r; 12174 unsigned long hva, old_npages; 12175 struct kvm_memslots *slots = kvm_memslots(kvm); 12176 struct kvm_memory_slot *slot; 12177 12178 /* Called with kvm->slots_lock held. */ 12179 if (WARN_ON(id >= KVM_MEM_SLOTS_NUM)) 12180 return ERR_PTR_USR(-EINVAL); 12181 12182 slot = id_to_memslot(slots, id); 12183 if (size) { 12184 if (slot && slot->npages) 12185 return ERR_PTR_USR(-EEXIST); 12186 12187 /* 12188 * MAP_SHARED to prevent internal slot pages from being moved 12189 * by fork()/COW. 12190 */ 12191 hva = vm_mmap(NULL, 0, size, PROT_READ | PROT_WRITE, 12192 MAP_SHARED | MAP_ANONYMOUS, 0); 12193 if (IS_ERR((void *)hva)) 12194 return (void __user *)hva; 12195 } else { 12196 if (!slot || !slot->npages) 12197 return NULL; 12198 12199 old_npages = slot->npages; 12200 hva = slot->userspace_addr; 12201 } 12202 12203 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 12204 struct kvm_userspace_memory_region m; 12205 12206 m.slot = id | (i << 16); 12207 m.flags = 0; 12208 m.guest_phys_addr = gpa; 12209 m.userspace_addr = hva; 12210 m.memory_size = size; 12211 r = __kvm_set_memory_region(kvm, &m); 12212 if (r < 0) 12213 return ERR_PTR_USR(r); 12214 } 12215 12216 if (!size) 12217 vm_munmap(hva, old_npages * PAGE_SIZE); 12218 12219 return (void __user *)hva; 12220 } 12221 EXPORT_SYMBOL_GPL(__x86_set_memory_region); 12222 12223 void kvm_arch_pre_destroy_vm(struct kvm *kvm) 12224 { 12225 kvm_mmu_pre_destroy_vm(kvm); 12226 } 12227 12228 void kvm_arch_destroy_vm(struct kvm *kvm) 12229 { 12230 if (current->mm == kvm->mm) { 12231 /* 12232 * Free memory regions allocated on behalf of userspace, 12233 * unless the memory map has changed due to process exit 12234 * or fd copying. 12235 */ 12236 mutex_lock(&kvm->slots_lock); 12237 __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 12238 0, 0); 12239 __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 12240 0, 0); 12241 __x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0); 12242 mutex_unlock(&kvm->slots_lock); 12243 } 12244 kvm_unload_vcpu_mmus(kvm); 12245 static_call_cond(kvm_x86_vm_destroy)(kvm); 12246 kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1)); 12247 kvm_pic_destroy(kvm); 12248 kvm_ioapic_destroy(kvm); 12249 kvm_destroy_vcpus(kvm); 12250 kvfree(rcu_dereference_check(kvm->arch.apic_map, 1)); 12251 kfree(srcu_dereference_check(kvm->arch.pmu_event_filter, &kvm->srcu, 1)); 12252 kvm_mmu_uninit_vm(kvm); 12253 kvm_page_track_cleanup(kvm); 12254 kvm_xen_destroy_vm(kvm); 12255 kvm_hv_destroy_vm(kvm); 12256 } 12257 12258 static void memslot_rmap_free(struct kvm_memory_slot *slot) 12259 { 12260 int i; 12261 12262 for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { 12263 kvfree(slot->arch.rmap[i]); 12264 slot->arch.rmap[i] = NULL; 12265 } 12266 } 12267 12268 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) 12269 { 12270 int i; 12271 12272 memslot_rmap_free(slot); 12273 12274 for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) { 12275 kvfree(slot->arch.lpage_info[i - 1]); 12276 slot->arch.lpage_info[i - 1] = NULL; 12277 } 12278 12279 kvm_page_track_free_memslot(slot); 12280 } 12281 12282 int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages) 12283 { 12284 const int sz = sizeof(*slot->arch.rmap[0]); 12285 int i; 12286 12287 for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { 12288 int level = i + 1; 12289 int lpages = __kvm_mmu_slot_lpages(slot, npages, level); 12290 12291 if (slot->arch.rmap[i]) 12292 continue; 12293 12294 slot->arch.rmap[i] = __vcalloc(lpages, sz, GFP_KERNEL_ACCOUNT); 12295 if (!slot->arch.rmap[i]) { 12296 memslot_rmap_free(slot); 12297 return -ENOMEM; 12298 } 12299 } 12300 12301 return 0; 12302 } 12303 12304 static int kvm_alloc_memslot_metadata(struct kvm *kvm, 12305 struct kvm_memory_slot *slot) 12306 { 12307 unsigned long npages = slot->npages; 12308 int i, r; 12309 12310 /* 12311 * Clear out the previous array pointers for the KVM_MR_MOVE case. The 12312 * old arrays will be freed by __kvm_set_memory_region() if installing 12313 * the new memslot is successful. 12314 */ 12315 memset(&slot->arch, 0, sizeof(slot->arch)); 12316 12317 if (kvm_memslots_have_rmaps(kvm)) { 12318 r = memslot_rmap_alloc(slot, npages); 12319 if (r) 12320 return r; 12321 } 12322 12323 for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) { 12324 struct kvm_lpage_info *linfo; 12325 unsigned long ugfn; 12326 int lpages; 12327 int level = i + 1; 12328 12329 lpages = __kvm_mmu_slot_lpages(slot, npages, level); 12330 12331 linfo = __vcalloc(lpages, sizeof(*linfo), GFP_KERNEL_ACCOUNT); 12332 if (!linfo) 12333 goto out_free; 12334 12335 slot->arch.lpage_info[i - 1] = linfo; 12336 12337 if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1)) 12338 linfo[0].disallow_lpage = 1; 12339 if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1)) 12340 linfo[lpages - 1].disallow_lpage = 1; 12341 ugfn = slot->userspace_addr >> PAGE_SHIFT; 12342 /* 12343 * If the gfn and userspace address are not aligned wrt each 12344 * other, disable large page support for this slot. 12345 */ 12346 if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1)) { 12347 unsigned long j; 12348 12349 for (j = 0; j < lpages; ++j) 12350 linfo[j].disallow_lpage = 1; 12351 } 12352 } 12353 12354 if (kvm_page_track_create_memslot(kvm, slot, npages)) 12355 goto out_free; 12356 12357 return 0; 12358 12359 out_free: 12360 memslot_rmap_free(slot); 12361 12362 for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) { 12363 kvfree(slot->arch.lpage_info[i - 1]); 12364 slot->arch.lpage_info[i - 1] = NULL; 12365 } 12366 return -ENOMEM; 12367 } 12368 12369 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) 12370 { 12371 struct kvm_vcpu *vcpu; 12372 unsigned long i; 12373 12374 /* 12375 * memslots->generation has been incremented. 12376 * mmio generation may have reached its maximum value. 12377 */ 12378 kvm_mmu_invalidate_mmio_sptes(kvm, gen); 12379 12380 /* Force re-initialization of steal_time cache */ 12381 kvm_for_each_vcpu(i, vcpu, kvm) 12382 kvm_vcpu_kick(vcpu); 12383 } 12384 12385 int kvm_arch_prepare_memory_region(struct kvm *kvm, 12386 const struct kvm_memory_slot *old, 12387 struct kvm_memory_slot *new, 12388 enum kvm_mr_change change) 12389 { 12390 if (change == KVM_MR_CREATE || change == KVM_MR_MOVE) { 12391 if ((new->base_gfn + new->npages - 1) > kvm_mmu_max_gfn()) 12392 return -EINVAL; 12393 12394 return kvm_alloc_memslot_metadata(kvm, new); 12395 } 12396 12397 if (change == KVM_MR_FLAGS_ONLY) 12398 memcpy(&new->arch, &old->arch, sizeof(old->arch)); 12399 else if (WARN_ON_ONCE(change != KVM_MR_DELETE)) 12400 return -EIO; 12401 12402 return 0; 12403 } 12404 12405 12406 static void kvm_mmu_update_cpu_dirty_logging(struct kvm *kvm, bool enable) 12407 { 12408 struct kvm_arch *ka = &kvm->arch; 12409 12410 if (!kvm_x86_ops.cpu_dirty_log_size) 12411 return; 12412 12413 if ((enable && ++ka->cpu_dirty_logging_count == 1) || 12414 (!enable && --ka->cpu_dirty_logging_count == 0)) 12415 kvm_make_all_cpus_request(kvm, KVM_REQ_UPDATE_CPU_DIRTY_LOGGING); 12416 12417 WARN_ON_ONCE(ka->cpu_dirty_logging_count < 0); 12418 } 12419 12420 static void kvm_mmu_slot_apply_flags(struct kvm *kvm, 12421 struct kvm_memory_slot *old, 12422 const struct kvm_memory_slot *new, 12423 enum kvm_mr_change change) 12424 { 12425 u32 old_flags = old ? old->flags : 0; 12426 u32 new_flags = new ? new->flags : 0; 12427 bool log_dirty_pages = new_flags & KVM_MEM_LOG_DIRTY_PAGES; 12428 12429 /* 12430 * Update CPU dirty logging if dirty logging is being toggled. This 12431 * applies to all operations. 12432 */ 12433 if ((old_flags ^ new_flags) & KVM_MEM_LOG_DIRTY_PAGES) 12434 kvm_mmu_update_cpu_dirty_logging(kvm, log_dirty_pages); 12435 12436 /* 12437 * Nothing more to do for RO slots (which can't be dirtied and can't be 12438 * made writable) or CREATE/MOVE/DELETE of a slot. 12439 * 12440 * For a memslot with dirty logging disabled: 12441 * CREATE: No dirty mappings will already exist. 12442 * MOVE/DELETE: The old mappings will already have been cleaned up by 12443 * kvm_arch_flush_shadow_memslot() 12444 * 12445 * For a memslot with dirty logging enabled: 12446 * CREATE: No shadow pages exist, thus nothing to write-protect 12447 * and no dirty bits to clear. 12448 * MOVE/DELETE: The old mappings will already have been cleaned up by 12449 * kvm_arch_flush_shadow_memslot(). 12450 */ 12451 if ((change != KVM_MR_FLAGS_ONLY) || (new_flags & KVM_MEM_READONLY)) 12452 return; 12453 12454 /* 12455 * READONLY and non-flags changes were filtered out above, and the only 12456 * other flag is LOG_DIRTY_PAGES, i.e. something is wrong if dirty 12457 * logging isn't being toggled on or off. 12458 */ 12459 if (WARN_ON_ONCE(!((old_flags ^ new_flags) & KVM_MEM_LOG_DIRTY_PAGES))) 12460 return; 12461 12462 if (!log_dirty_pages) { 12463 /* 12464 * Dirty logging tracks sptes in 4k granularity, meaning that 12465 * large sptes have to be split. If live migration succeeds, 12466 * the guest in the source machine will be destroyed and large 12467 * sptes will be created in the destination. However, if the 12468 * guest continues to run in the source machine (for example if 12469 * live migration fails), small sptes will remain around and 12470 * cause bad performance. 12471 * 12472 * Scan sptes if dirty logging has been stopped, dropping those 12473 * which can be collapsed into a single large-page spte. Later 12474 * page faults will create the large-page sptes. 12475 */ 12476 kvm_mmu_zap_collapsible_sptes(kvm, new); 12477 } else { 12478 /* 12479 * Initially-all-set does not require write protecting any page, 12480 * because they're all assumed to be dirty. 12481 */ 12482 if (kvm_dirty_log_manual_protect_and_init_set(kvm)) 12483 return; 12484 12485 if (READ_ONCE(eager_page_split)) 12486 kvm_mmu_slot_try_split_huge_pages(kvm, new, PG_LEVEL_4K); 12487 12488 if (kvm_x86_ops.cpu_dirty_log_size) { 12489 kvm_mmu_slot_leaf_clear_dirty(kvm, new); 12490 kvm_mmu_slot_remove_write_access(kvm, new, PG_LEVEL_2M); 12491 } else { 12492 kvm_mmu_slot_remove_write_access(kvm, new, PG_LEVEL_4K); 12493 } 12494 12495 /* 12496 * Unconditionally flush the TLBs after enabling dirty logging. 12497 * A flush is almost always going to be necessary (see below), 12498 * and unconditionally flushing allows the helpers to omit 12499 * the subtly complex checks when removing write access. 12500 * 12501 * Do the flush outside of mmu_lock to reduce the amount of 12502 * time mmu_lock is held. Flushing after dropping mmu_lock is 12503 * safe as KVM only needs to guarantee the slot is fully 12504 * write-protected before returning to userspace, i.e. before 12505 * userspace can consume the dirty status. 12506 * 12507 * Flushing outside of mmu_lock requires KVM to be careful when 12508 * making decisions based on writable status of an SPTE, e.g. a 12509 * !writable SPTE doesn't guarantee a CPU can't perform writes. 12510 * 12511 * Specifically, KVM also write-protects guest page tables to 12512 * monitor changes when using shadow paging, and must guarantee 12513 * no CPUs can write to those page before mmu_lock is dropped. 12514 * Because CPUs may have stale TLB entries at this point, a 12515 * !writable SPTE doesn't guarantee CPUs can't perform writes. 12516 * 12517 * KVM also allows making SPTES writable outside of mmu_lock, 12518 * e.g. to allow dirty logging without taking mmu_lock. 12519 * 12520 * To handle these scenarios, KVM uses a separate software-only 12521 * bit (MMU-writable) to track if a SPTE is !writable due to 12522 * a guest page table being write-protected (KVM clears the 12523 * MMU-writable flag when write-protecting for shadow paging). 12524 * 12525 * The use of MMU-writable is also the primary motivation for 12526 * the unconditional flush. Because KVM must guarantee that a 12527 * CPU doesn't contain stale, writable TLB entries for a 12528 * !MMU-writable SPTE, KVM must flush if it encounters any 12529 * MMU-writable SPTE regardless of whether the actual hardware 12530 * writable bit was set. I.e. KVM is almost guaranteed to need 12531 * to flush, while unconditionally flushing allows the "remove 12532 * write access" helpers to ignore MMU-writable entirely. 12533 * 12534 * See is_writable_pte() for more details (the case involving 12535 * access-tracked SPTEs is particularly relevant). 12536 */ 12537 kvm_arch_flush_remote_tlbs_memslot(kvm, new); 12538 } 12539 } 12540 12541 void kvm_arch_commit_memory_region(struct kvm *kvm, 12542 struct kvm_memory_slot *old, 12543 const struct kvm_memory_slot *new, 12544 enum kvm_mr_change change) 12545 { 12546 if (!kvm->arch.n_requested_mmu_pages && 12547 (change == KVM_MR_CREATE || change == KVM_MR_DELETE)) { 12548 unsigned long nr_mmu_pages; 12549 12550 nr_mmu_pages = kvm->nr_memslot_pages / KVM_MEMSLOT_PAGES_TO_MMU_PAGES_RATIO; 12551 nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES); 12552 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); 12553 } 12554 12555 kvm_mmu_slot_apply_flags(kvm, old, new, change); 12556 12557 /* Free the arrays associated with the old memslot. */ 12558 if (change == KVM_MR_MOVE) 12559 kvm_arch_free_memslot(kvm, old); 12560 } 12561 12562 void kvm_arch_flush_shadow_all(struct kvm *kvm) 12563 { 12564 kvm_mmu_zap_all(kvm); 12565 } 12566 12567 void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 12568 struct kvm_memory_slot *slot) 12569 { 12570 kvm_page_track_flush_slot(kvm, slot); 12571 } 12572 12573 static inline bool kvm_guest_apic_has_interrupt(struct kvm_vcpu *vcpu) 12574 { 12575 return (is_guest_mode(vcpu) && 12576 static_call(kvm_x86_guest_apic_has_interrupt)(vcpu)); 12577 } 12578 12579 static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) 12580 { 12581 if (!list_empty_careful(&vcpu->async_pf.done)) 12582 return true; 12583 12584 if (kvm_apic_has_pending_init_or_sipi(vcpu) && 12585 kvm_apic_init_sipi_allowed(vcpu)) 12586 return true; 12587 12588 if (vcpu->arch.pv.pv_unhalted) 12589 return true; 12590 12591 if (kvm_is_exception_pending(vcpu)) 12592 return true; 12593 12594 if (kvm_test_request(KVM_REQ_NMI, vcpu) || 12595 (vcpu->arch.nmi_pending && 12596 static_call(kvm_x86_nmi_allowed)(vcpu, false))) 12597 return true; 12598 12599 #ifdef CONFIG_KVM_SMM 12600 if (kvm_test_request(KVM_REQ_SMI, vcpu) || 12601 (vcpu->arch.smi_pending && 12602 static_call(kvm_x86_smi_allowed)(vcpu, false))) 12603 return true; 12604 #endif 12605 12606 if (kvm_arch_interrupt_allowed(vcpu) && 12607 (kvm_cpu_has_interrupt(vcpu) || 12608 kvm_guest_apic_has_interrupt(vcpu))) 12609 return true; 12610 12611 if (kvm_hv_has_stimer_pending(vcpu)) 12612 return true; 12613 12614 if (is_guest_mode(vcpu) && 12615 kvm_x86_ops.nested_ops->has_events && 12616 kvm_x86_ops.nested_ops->has_events(vcpu)) 12617 return true; 12618 12619 if (kvm_xen_has_pending_events(vcpu)) 12620 return true; 12621 12622 return false; 12623 } 12624 12625 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) 12626 { 12627 return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu); 12628 } 12629 12630 bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu) 12631 { 12632 if (kvm_vcpu_apicv_active(vcpu) && 12633 static_call(kvm_x86_dy_apicv_has_pending_interrupt)(vcpu)) 12634 return true; 12635 12636 return false; 12637 } 12638 12639 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) 12640 { 12641 if (READ_ONCE(vcpu->arch.pv.pv_unhalted)) 12642 return true; 12643 12644 if (kvm_test_request(KVM_REQ_NMI, vcpu) || 12645 #ifdef CONFIG_KVM_SMM 12646 kvm_test_request(KVM_REQ_SMI, vcpu) || 12647 #endif 12648 kvm_test_request(KVM_REQ_EVENT, vcpu)) 12649 return true; 12650 12651 return kvm_arch_dy_has_pending_interrupt(vcpu); 12652 } 12653 12654 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) 12655 { 12656 if (vcpu->arch.guest_state_protected) 12657 return true; 12658 12659 return vcpu->arch.preempted_in_kernel; 12660 } 12661 12662 unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu) 12663 { 12664 return kvm_rip_read(vcpu); 12665 } 12666 12667 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) 12668 { 12669 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; 12670 } 12671 12672 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu) 12673 { 12674 return static_call(kvm_x86_interrupt_allowed)(vcpu, false); 12675 } 12676 12677 unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu) 12678 { 12679 /* Can't read the RIP when guest state is protected, just return 0 */ 12680 if (vcpu->arch.guest_state_protected) 12681 return 0; 12682 12683 if (is_64_bit_mode(vcpu)) 12684 return kvm_rip_read(vcpu); 12685 return (u32)(get_segment_base(vcpu, VCPU_SREG_CS) + 12686 kvm_rip_read(vcpu)); 12687 } 12688 EXPORT_SYMBOL_GPL(kvm_get_linear_rip); 12689 12690 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip) 12691 { 12692 return kvm_get_linear_rip(vcpu) == linear_rip; 12693 } 12694 EXPORT_SYMBOL_GPL(kvm_is_linear_rip); 12695 12696 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu) 12697 { 12698 unsigned long rflags; 12699 12700 rflags = static_call(kvm_x86_get_rflags)(vcpu); 12701 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) 12702 rflags &= ~X86_EFLAGS_TF; 12703 return rflags; 12704 } 12705 EXPORT_SYMBOL_GPL(kvm_get_rflags); 12706 12707 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) 12708 { 12709 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP && 12710 kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) 12711 rflags |= X86_EFLAGS_TF; 12712 static_call(kvm_x86_set_rflags)(vcpu, rflags); 12713 } 12714 12715 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) 12716 { 12717 __kvm_set_rflags(vcpu, rflags); 12718 kvm_make_request(KVM_REQ_EVENT, vcpu); 12719 } 12720 EXPORT_SYMBOL_GPL(kvm_set_rflags); 12721 12722 static inline u32 kvm_async_pf_hash_fn(gfn_t gfn) 12723 { 12724 BUILD_BUG_ON(!is_power_of_2(ASYNC_PF_PER_VCPU)); 12725 12726 return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU)); 12727 } 12728 12729 static inline u32 kvm_async_pf_next_probe(u32 key) 12730 { 12731 return (key + 1) & (ASYNC_PF_PER_VCPU - 1); 12732 } 12733 12734 static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 12735 { 12736 u32 key = kvm_async_pf_hash_fn(gfn); 12737 12738 while (vcpu->arch.apf.gfns[key] != ~0) 12739 key = kvm_async_pf_next_probe(key); 12740 12741 vcpu->arch.apf.gfns[key] = gfn; 12742 } 12743 12744 static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn) 12745 { 12746 int i; 12747 u32 key = kvm_async_pf_hash_fn(gfn); 12748 12749 for (i = 0; i < ASYNC_PF_PER_VCPU && 12750 (vcpu->arch.apf.gfns[key] != gfn && 12751 vcpu->arch.apf.gfns[key] != ~0); i++) 12752 key = kvm_async_pf_next_probe(key); 12753 12754 return key; 12755 } 12756 12757 bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 12758 { 12759 return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn; 12760 } 12761 12762 static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 12763 { 12764 u32 i, j, k; 12765 12766 i = j = kvm_async_pf_gfn_slot(vcpu, gfn); 12767 12768 if (WARN_ON_ONCE(vcpu->arch.apf.gfns[i] != gfn)) 12769 return; 12770 12771 while (true) { 12772 vcpu->arch.apf.gfns[i] = ~0; 12773 do { 12774 j = kvm_async_pf_next_probe(j); 12775 if (vcpu->arch.apf.gfns[j] == ~0) 12776 return; 12777 k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]); 12778 /* 12779 * k lies cyclically in ]i,j] 12780 * | i.k.j | 12781 * |....j i.k.| or |.k..j i...| 12782 */ 12783 } while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j)); 12784 vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j]; 12785 i = j; 12786 } 12787 } 12788 12789 static inline int apf_put_user_notpresent(struct kvm_vcpu *vcpu) 12790 { 12791 u32 reason = KVM_PV_REASON_PAGE_NOT_PRESENT; 12792 12793 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &reason, 12794 sizeof(reason)); 12795 } 12796 12797 static inline int apf_put_user_ready(struct kvm_vcpu *vcpu, u32 token) 12798 { 12799 unsigned int offset = offsetof(struct kvm_vcpu_pv_apf_data, token); 12800 12801 return kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data, 12802 &token, offset, sizeof(token)); 12803 } 12804 12805 static inline bool apf_pageready_slot_free(struct kvm_vcpu *vcpu) 12806 { 12807 unsigned int offset = offsetof(struct kvm_vcpu_pv_apf_data, token); 12808 u32 val; 12809 12810 if (kvm_read_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data, 12811 &val, offset, sizeof(val))) 12812 return false; 12813 12814 return !val; 12815 } 12816 12817 static bool kvm_can_deliver_async_pf(struct kvm_vcpu *vcpu) 12818 { 12819 12820 if (!kvm_pv_async_pf_enabled(vcpu)) 12821 return false; 12822 12823 if (vcpu->arch.apf.send_user_only && 12824 static_call(kvm_x86_get_cpl)(vcpu) == 0) 12825 return false; 12826 12827 if (is_guest_mode(vcpu)) { 12828 /* 12829 * L1 needs to opt into the special #PF vmexits that are 12830 * used to deliver async page faults. 12831 */ 12832 return vcpu->arch.apf.delivery_as_pf_vmexit; 12833 } else { 12834 /* 12835 * Play it safe in case the guest temporarily disables paging. 12836 * The real mode IDT in particular is unlikely to have a #PF 12837 * exception setup. 12838 */ 12839 return is_paging(vcpu); 12840 } 12841 } 12842 12843 bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu) 12844 { 12845 if (unlikely(!lapic_in_kernel(vcpu) || 12846 kvm_event_needs_reinjection(vcpu) || 12847 kvm_is_exception_pending(vcpu))) 12848 return false; 12849 12850 if (kvm_hlt_in_guest(vcpu->kvm) && !kvm_can_deliver_async_pf(vcpu)) 12851 return false; 12852 12853 /* 12854 * If interrupts are off we cannot even use an artificial 12855 * halt state. 12856 */ 12857 return kvm_arch_interrupt_allowed(vcpu); 12858 } 12859 12860 bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, 12861 struct kvm_async_pf *work) 12862 { 12863 struct x86_exception fault; 12864 12865 trace_kvm_async_pf_not_present(work->arch.token, work->cr2_or_gpa); 12866 kvm_add_async_pf_gfn(vcpu, work->arch.gfn); 12867 12868 if (kvm_can_deliver_async_pf(vcpu) && 12869 !apf_put_user_notpresent(vcpu)) { 12870 fault.vector = PF_VECTOR; 12871 fault.error_code_valid = true; 12872 fault.error_code = 0; 12873 fault.nested_page_fault = false; 12874 fault.address = work->arch.token; 12875 fault.async_page_fault = true; 12876 kvm_inject_page_fault(vcpu, &fault); 12877 return true; 12878 } else { 12879 /* 12880 * It is not possible to deliver a paravirtualized asynchronous 12881 * page fault, but putting the guest in an artificial halt state 12882 * can be beneficial nevertheless: if an interrupt arrives, we 12883 * can deliver it timely and perhaps the guest will schedule 12884 * another process. When the instruction that triggered a page 12885 * fault is retried, hopefully the page will be ready in the host. 12886 */ 12887 kvm_make_request(KVM_REQ_APF_HALT, vcpu); 12888 return false; 12889 } 12890 } 12891 12892 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, 12893 struct kvm_async_pf *work) 12894 { 12895 struct kvm_lapic_irq irq = { 12896 .delivery_mode = APIC_DM_FIXED, 12897 .vector = vcpu->arch.apf.vec 12898 }; 12899 12900 if (work->wakeup_all) 12901 work->arch.token = ~0; /* broadcast wakeup */ 12902 else 12903 kvm_del_async_pf_gfn(vcpu, work->arch.gfn); 12904 trace_kvm_async_pf_ready(work->arch.token, work->cr2_or_gpa); 12905 12906 if ((work->wakeup_all || work->notpresent_injected) && 12907 kvm_pv_async_pf_enabled(vcpu) && 12908 !apf_put_user_ready(vcpu, work->arch.token)) { 12909 vcpu->arch.apf.pageready_pending = true; 12910 kvm_apic_set_irq(vcpu, &irq, NULL); 12911 } 12912 12913 vcpu->arch.apf.halted = false; 12914 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 12915 } 12916 12917 void kvm_arch_async_page_present_queued(struct kvm_vcpu *vcpu) 12918 { 12919 kvm_make_request(KVM_REQ_APF_READY, vcpu); 12920 if (!vcpu->arch.apf.pageready_pending) 12921 kvm_vcpu_kick(vcpu); 12922 } 12923 12924 bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu) 12925 { 12926 if (!kvm_pv_async_pf_enabled(vcpu)) 12927 return true; 12928 else 12929 return kvm_lapic_enabled(vcpu) && apf_pageready_slot_free(vcpu); 12930 } 12931 12932 void kvm_arch_start_assignment(struct kvm *kvm) 12933 { 12934 if (atomic_inc_return(&kvm->arch.assigned_device_count) == 1) 12935 static_call_cond(kvm_x86_pi_start_assignment)(kvm); 12936 } 12937 EXPORT_SYMBOL_GPL(kvm_arch_start_assignment); 12938 12939 void kvm_arch_end_assignment(struct kvm *kvm) 12940 { 12941 atomic_dec(&kvm->arch.assigned_device_count); 12942 } 12943 EXPORT_SYMBOL_GPL(kvm_arch_end_assignment); 12944 12945 bool noinstr kvm_arch_has_assigned_device(struct kvm *kvm) 12946 { 12947 return arch_atomic_read(&kvm->arch.assigned_device_count); 12948 } 12949 EXPORT_SYMBOL_GPL(kvm_arch_has_assigned_device); 12950 12951 void kvm_arch_register_noncoherent_dma(struct kvm *kvm) 12952 { 12953 atomic_inc(&kvm->arch.noncoherent_dma_count); 12954 } 12955 EXPORT_SYMBOL_GPL(kvm_arch_register_noncoherent_dma); 12956 12957 void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm) 12958 { 12959 atomic_dec(&kvm->arch.noncoherent_dma_count); 12960 } 12961 EXPORT_SYMBOL_GPL(kvm_arch_unregister_noncoherent_dma); 12962 12963 bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) 12964 { 12965 return atomic_read(&kvm->arch.noncoherent_dma_count); 12966 } 12967 EXPORT_SYMBOL_GPL(kvm_arch_has_noncoherent_dma); 12968 12969 bool kvm_arch_has_irq_bypass(void) 12970 { 12971 return true; 12972 } 12973 12974 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons, 12975 struct irq_bypass_producer *prod) 12976 { 12977 struct kvm_kernel_irqfd *irqfd = 12978 container_of(cons, struct kvm_kernel_irqfd, consumer); 12979 int ret; 12980 12981 irqfd->producer = prod; 12982 kvm_arch_start_assignment(irqfd->kvm); 12983 ret = static_call(kvm_x86_pi_update_irte)(irqfd->kvm, 12984 prod->irq, irqfd->gsi, 1); 12985 12986 if (ret) 12987 kvm_arch_end_assignment(irqfd->kvm); 12988 12989 return ret; 12990 } 12991 12992 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, 12993 struct irq_bypass_producer *prod) 12994 { 12995 int ret; 12996 struct kvm_kernel_irqfd *irqfd = 12997 container_of(cons, struct kvm_kernel_irqfd, consumer); 12998 12999 WARN_ON(irqfd->producer != prod); 13000 irqfd->producer = NULL; 13001 13002 /* 13003 * When producer of consumer is unregistered, we change back to 13004 * remapped mode, so we can re-use the current implementation 13005 * when the irq is masked/disabled or the consumer side (KVM 13006 * int this case doesn't want to receive the interrupts. 13007 */ 13008 ret = static_call(kvm_x86_pi_update_irte)(irqfd->kvm, prod->irq, irqfd->gsi, 0); 13009 if (ret) 13010 printk(KERN_INFO "irq bypass consumer (token %p) unregistration" 13011 " fails: %d\n", irqfd->consumer.token, ret); 13012 13013 kvm_arch_end_assignment(irqfd->kvm); 13014 } 13015 13016 int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, 13017 uint32_t guest_irq, bool set) 13018 { 13019 return static_call(kvm_x86_pi_update_irte)(kvm, host_irq, guest_irq, set); 13020 } 13021 13022 bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *old, 13023 struct kvm_kernel_irq_routing_entry *new) 13024 { 13025 if (new->type != KVM_IRQ_ROUTING_MSI) 13026 return true; 13027 13028 return !!memcmp(&old->msi, &new->msi, sizeof(new->msi)); 13029 } 13030 13031 bool kvm_vector_hashing_enabled(void) 13032 { 13033 return vector_hashing; 13034 } 13035 13036 bool kvm_arch_no_poll(struct kvm_vcpu *vcpu) 13037 { 13038 return (vcpu->arch.msr_kvm_poll_control & 1) == 0; 13039 } 13040 EXPORT_SYMBOL_GPL(kvm_arch_no_poll); 13041 13042 13043 int kvm_spec_ctrl_test_value(u64 value) 13044 { 13045 /* 13046 * test that setting IA32_SPEC_CTRL to given value 13047 * is allowed by the host processor 13048 */ 13049 13050 u64 saved_value; 13051 unsigned long flags; 13052 int ret = 0; 13053 13054 local_irq_save(flags); 13055 13056 if (rdmsrl_safe(MSR_IA32_SPEC_CTRL, &saved_value)) 13057 ret = 1; 13058 else if (wrmsrl_safe(MSR_IA32_SPEC_CTRL, value)) 13059 ret = 1; 13060 else 13061 wrmsrl(MSR_IA32_SPEC_CTRL, saved_value); 13062 13063 local_irq_restore(flags); 13064 13065 return ret; 13066 } 13067 EXPORT_SYMBOL_GPL(kvm_spec_ctrl_test_value); 13068 13069 void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code) 13070 { 13071 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 13072 struct x86_exception fault; 13073 u64 access = error_code & 13074 (PFERR_WRITE_MASK | PFERR_FETCH_MASK | PFERR_USER_MASK); 13075 13076 if (!(error_code & PFERR_PRESENT_MASK) || 13077 mmu->gva_to_gpa(vcpu, mmu, gva, access, &fault) != INVALID_GPA) { 13078 /* 13079 * If vcpu->arch.walk_mmu->gva_to_gpa succeeded, the page 13080 * tables probably do not match the TLB. Just proceed 13081 * with the error code that the processor gave. 13082 */ 13083 fault.vector = PF_VECTOR; 13084 fault.error_code_valid = true; 13085 fault.error_code = error_code; 13086 fault.nested_page_fault = false; 13087 fault.address = gva; 13088 fault.async_page_fault = false; 13089 } 13090 vcpu->arch.walk_mmu->inject_page_fault(vcpu, &fault); 13091 } 13092 EXPORT_SYMBOL_GPL(kvm_fixup_and_inject_pf_error); 13093 13094 /* 13095 * Handles kvm_read/write_guest_virt*() result and either injects #PF or returns 13096 * KVM_EXIT_INTERNAL_ERROR for cases not currently handled by KVM. Return value 13097 * indicates whether exit to userspace is needed. 13098 */ 13099 int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r, 13100 struct x86_exception *e) 13101 { 13102 if (r == X86EMUL_PROPAGATE_FAULT) { 13103 kvm_inject_emulated_page_fault(vcpu, e); 13104 return 1; 13105 } 13106 13107 /* 13108 * In case kvm_read/write_guest_virt*() failed with X86EMUL_IO_NEEDED 13109 * while handling a VMX instruction KVM could've handled the request 13110 * correctly by exiting to userspace and performing I/O but there 13111 * doesn't seem to be a real use-case behind such requests, just return 13112 * KVM_EXIT_INTERNAL_ERROR for now. 13113 */ 13114 kvm_prepare_emulation_failure_exit(vcpu); 13115 13116 return 0; 13117 } 13118 EXPORT_SYMBOL_GPL(kvm_handle_memory_failure); 13119 13120 int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva) 13121 { 13122 bool pcid_enabled; 13123 struct x86_exception e; 13124 struct { 13125 u64 pcid; 13126 u64 gla; 13127 } operand; 13128 int r; 13129 13130 r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e); 13131 if (r != X86EMUL_CONTINUE) 13132 return kvm_handle_memory_failure(vcpu, r, &e); 13133 13134 if (operand.pcid >> 12 != 0) { 13135 kvm_inject_gp(vcpu, 0); 13136 return 1; 13137 } 13138 13139 pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE); 13140 13141 switch (type) { 13142 case INVPCID_TYPE_INDIV_ADDR: 13143 if ((!pcid_enabled && (operand.pcid != 0)) || 13144 is_noncanonical_address(operand.gla, vcpu)) { 13145 kvm_inject_gp(vcpu, 0); 13146 return 1; 13147 } 13148 kvm_mmu_invpcid_gva(vcpu, operand.gla, operand.pcid); 13149 return kvm_skip_emulated_instruction(vcpu); 13150 13151 case INVPCID_TYPE_SINGLE_CTXT: 13152 if (!pcid_enabled && (operand.pcid != 0)) { 13153 kvm_inject_gp(vcpu, 0); 13154 return 1; 13155 } 13156 13157 kvm_invalidate_pcid(vcpu, operand.pcid); 13158 return kvm_skip_emulated_instruction(vcpu); 13159 13160 case INVPCID_TYPE_ALL_NON_GLOBAL: 13161 /* 13162 * Currently, KVM doesn't mark global entries in the shadow 13163 * page tables, so a non-global flush just degenerates to a 13164 * global flush. If needed, we could optimize this later by 13165 * keeping track of global entries in shadow page tables. 13166 */ 13167 13168 fallthrough; 13169 case INVPCID_TYPE_ALL_INCL_GLOBAL: 13170 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 13171 return kvm_skip_emulated_instruction(vcpu); 13172 13173 default: 13174 kvm_inject_gp(vcpu, 0); 13175 return 1; 13176 } 13177 } 13178 EXPORT_SYMBOL_GPL(kvm_handle_invpcid); 13179 13180 static int complete_sev_es_emulated_mmio(struct kvm_vcpu *vcpu) 13181 { 13182 struct kvm_run *run = vcpu->run; 13183 struct kvm_mmio_fragment *frag; 13184 unsigned int len; 13185 13186 BUG_ON(!vcpu->mmio_needed); 13187 13188 /* Complete previous fragment */ 13189 frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment]; 13190 len = min(8u, frag->len); 13191 if (!vcpu->mmio_is_write) 13192 memcpy(frag->data, run->mmio.data, len); 13193 13194 if (frag->len <= 8) { 13195 /* Switch to the next fragment. */ 13196 frag++; 13197 vcpu->mmio_cur_fragment++; 13198 } else { 13199 /* Go forward to the next mmio piece. */ 13200 frag->data += len; 13201 frag->gpa += len; 13202 frag->len -= len; 13203 } 13204 13205 if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) { 13206 vcpu->mmio_needed = 0; 13207 13208 // VMG change, at this point, we're always done 13209 // RIP has already been advanced 13210 return 1; 13211 } 13212 13213 // More MMIO is needed 13214 run->mmio.phys_addr = frag->gpa; 13215 run->mmio.len = min(8u, frag->len); 13216 run->mmio.is_write = vcpu->mmio_is_write; 13217 if (run->mmio.is_write) 13218 memcpy(run->mmio.data, frag->data, min(8u, frag->len)); 13219 run->exit_reason = KVM_EXIT_MMIO; 13220 13221 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio; 13222 13223 return 0; 13224 } 13225 13226 int kvm_sev_es_mmio_write(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes, 13227 void *data) 13228 { 13229 int handled; 13230 struct kvm_mmio_fragment *frag; 13231 13232 if (!data) 13233 return -EINVAL; 13234 13235 handled = write_emultor.read_write_mmio(vcpu, gpa, bytes, data); 13236 if (handled == bytes) 13237 return 1; 13238 13239 bytes -= handled; 13240 gpa += handled; 13241 data += handled; 13242 13243 /*TODO: Check if need to increment number of frags */ 13244 frag = vcpu->mmio_fragments; 13245 vcpu->mmio_nr_fragments = 1; 13246 frag->len = bytes; 13247 frag->gpa = gpa; 13248 frag->data = data; 13249 13250 vcpu->mmio_needed = 1; 13251 vcpu->mmio_cur_fragment = 0; 13252 13253 vcpu->run->mmio.phys_addr = gpa; 13254 vcpu->run->mmio.len = min(8u, frag->len); 13255 vcpu->run->mmio.is_write = 1; 13256 memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); 13257 vcpu->run->exit_reason = KVM_EXIT_MMIO; 13258 13259 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio; 13260 13261 return 0; 13262 } 13263 EXPORT_SYMBOL_GPL(kvm_sev_es_mmio_write); 13264 13265 int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes, 13266 void *data) 13267 { 13268 int handled; 13269 struct kvm_mmio_fragment *frag; 13270 13271 if (!data) 13272 return -EINVAL; 13273 13274 handled = read_emultor.read_write_mmio(vcpu, gpa, bytes, data); 13275 if (handled == bytes) 13276 return 1; 13277 13278 bytes -= handled; 13279 gpa += handled; 13280 data += handled; 13281 13282 /*TODO: Check if need to increment number of frags */ 13283 frag = vcpu->mmio_fragments; 13284 vcpu->mmio_nr_fragments = 1; 13285 frag->len = bytes; 13286 frag->gpa = gpa; 13287 frag->data = data; 13288 13289 vcpu->mmio_needed = 1; 13290 vcpu->mmio_cur_fragment = 0; 13291 13292 vcpu->run->mmio.phys_addr = gpa; 13293 vcpu->run->mmio.len = min(8u, frag->len); 13294 vcpu->run->mmio.is_write = 0; 13295 vcpu->run->exit_reason = KVM_EXIT_MMIO; 13296 13297 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio; 13298 13299 return 0; 13300 } 13301 EXPORT_SYMBOL_GPL(kvm_sev_es_mmio_read); 13302 13303 static void advance_sev_es_emulated_pio(struct kvm_vcpu *vcpu, unsigned count, int size) 13304 { 13305 vcpu->arch.sev_pio_count -= count; 13306 vcpu->arch.sev_pio_data += count * size; 13307 } 13308 13309 static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size, 13310 unsigned int port); 13311 13312 static int complete_sev_es_emulated_outs(struct kvm_vcpu *vcpu) 13313 { 13314 int size = vcpu->arch.pio.size; 13315 int port = vcpu->arch.pio.port; 13316 13317 vcpu->arch.pio.count = 0; 13318 if (vcpu->arch.sev_pio_count) 13319 return kvm_sev_es_outs(vcpu, size, port); 13320 return 1; 13321 } 13322 13323 static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size, 13324 unsigned int port) 13325 { 13326 for (;;) { 13327 unsigned int count = 13328 min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count); 13329 int ret = emulator_pio_out(vcpu, size, port, vcpu->arch.sev_pio_data, count); 13330 13331 /* memcpy done already by emulator_pio_out. */ 13332 advance_sev_es_emulated_pio(vcpu, count, size); 13333 if (!ret) 13334 break; 13335 13336 /* Emulation done by the kernel. */ 13337 if (!vcpu->arch.sev_pio_count) 13338 return 1; 13339 } 13340 13341 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_outs; 13342 return 0; 13343 } 13344 13345 static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size, 13346 unsigned int port); 13347 13348 static int complete_sev_es_emulated_ins(struct kvm_vcpu *vcpu) 13349 { 13350 unsigned count = vcpu->arch.pio.count; 13351 int size = vcpu->arch.pio.size; 13352 int port = vcpu->arch.pio.port; 13353 13354 complete_emulator_pio_in(vcpu, vcpu->arch.sev_pio_data); 13355 advance_sev_es_emulated_pio(vcpu, count, size); 13356 if (vcpu->arch.sev_pio_count) 13357 return kvm_sev_es_ins(vcpu, size, port); 13358 return 1; 13359 } 13360 13361 static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size, 13362 unsigned int port) 13363 { 13364 for (;;) { 13365 unsigned int count = 13366 min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count); 13367 if (!emulator_pio_in(vcpu, size, port, vcpu->arch.sev_pio_data, count)) 13368 break; 13369 13370 /* Emulation done by the kernel. */ 13371 advance_sev_es_emulated_pio(vcpu, count, size); 13372 if (!vcpu->arch.sev_pio_count) 13373 return 1; 13374 } 13375 13376 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_ins; 13377 return 0; 13378 } 13379 13380 int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size, 13381 unsigned int port, void *data, unsigned int count, 13382 int in) 13383 { 13384 vcpu->arch.sev_pio_data = data; 13385 vcpu->arch.sev_pio_count = count; 13386 return in ? kvm_sev_es_ins(vcpu, size, port) 13387 : kvm_sev_es_outs(vcpu, size, port); 13388 } 13389 EXPORT_SYMBOL_GPL(kvm_sev_es_string_io); 13390 13391 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_entry); 13392 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit); 13393 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio); 13394 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq); 13395 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault); 13396 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr); 13397 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr); 13398 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmenter); 13399 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit); 13400 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject); 13401 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit); 13402 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmenter_failed); 13403 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga); 13404 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit); 13405 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts); 13406 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_write_tsc_offset); 13407 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ple_window_update); 13408 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pml_full); 13409 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pi_irte_update); 13410 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_unaccelerated_access); 13411 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_incomplete_ipi); 13412 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_ga_log); 13413 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_kick_vcpu_slowpath); 13414 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_doorbell); 13415 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_apicv_accept_irq); 13416 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_enter); 13417 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_exit); 13418 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_enter); 13419 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_exit); 13420 13421 static int __init kvm_x86_init(void) 13422 { 13423 kvm_mmu_x86_module_init(); 13424 return 0; 13425 } 13426 module_init(kvm_x86_init); 13427 13428 static void __exit kvm_x86_exit(void) 13429 { 13430 /* 13431 * If module_init() is implemented, module_exit() must also be 13432 * implemented to allow module unload. 13433 */ 13434 } 13435 module_exit(kvm_x86_exit); 13436