1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Kernel-based Virtual Machine driver for Linux 4 * 5 * derived from drivers/kvm/kvm_main.c 6 * 7 * Copyright (C) 2006 Qumranet, Inc. 8 * Copyright (C) 2008 Qumranet, Inc. 9 * Copyright IBM Corporation, 2008 10 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 11 * 12 * Authors: 13 * Avi Kivity <avi@qumranet.com> 14 * Yaniv Kamay <yaniv@qumranet.com> 15 * Amit Shah <amit.shah@qumranet.com> 16 * Ben-Ami Yassour <benami@il.ibm.com> 17 */ 18 19 #include <linux/kvm_host.h> 20 #include "irq.h" 21 #include "ioapic.h" 22 #include "mmu.h" 23 #include "i8254.h" 24 #include "tss.h" 25 #include "kvm_cache_regs.h" 26 #include "kvm_emulate.h" 27 #include "x86.h" 28 #include "cpuid.h" 29 #include "pmu.h" 30 #include "hyperv.h" 31 #include "lapic.h" 32 #include "xen.h" 33 34 #include <linux/clocksource.h> 35 #include <linux/interrupt.h> 36 #include <linux/kvm.h> 37 #include <linux/fs.h> 38 #include <linux/vmalloc.h> 39 #include <linux/export.h> 40 #include <linux/moduleparam.h> 41 #include <linux/mman.h> 42 #include <linux/highmem.h> 43 #include <linux/iommu.h> 44 #include <linux/intel-iommu.h> 45 #include <linux/cpufreq.h> 46 #include <linux/user-return-notifier.h> 47 #include <linux/srcu.h> 48 #include <linux/slab.h> 49 #include <linux/perf_event.h> 50 #include <linux/uaccess.h> 51 #include <linux/hash.h> 52 #include <linux/pci.h> 53 #include <linux/timekeeper_internal.h> 54 #include <linux/pvclock_gtod.h> 55 #include <linux/kvm_irqfd.h> 56 #include <linux/irqbypass.h> 57 #include <linux/sched/stat.h> 58 #include <linux/sched/isolation.h> 59 #include <linux/mem_encrypt.h> 60 #include <linux/entry-kvm.h> 61 #include <linux/suspend.h> 62 63 #include <trace/events/kvm.h> 64 65 #include <asm/debugreg.h> 66 #include <asm/msr.h> 67 #include <asm/desc.h> 68 #include <asm/mce.h> 69 #include <asm/pkru.h> 70 #include <linux/kernel_stat.h> 71 #include <asm/fpu/api.h> 72 #include <asm/fpu/xcr.h> 73 #include <asm/fpu/xstate.h> 74 #include <asm/pvclock.h> 75 #include <asm/div64.h> 76 #include <asm/irq_remapping.h> 77 #include <asm/mshyperv.h> 78 #include <asm/hypervisor.h> 79 #include <asm/tlbflush.h> 80 #include <asm/intel_pt.h> 81 #include <asm/emulate_prefix.h> 82 #include <asm/sgx.h> 83 #include <clocksource/hyperv_timer.h> 84 85 #define CREATE_TRACE_POINTS 86 #include "trace.h" 87 88 #define MAX_IO_MSRS 256 89 #define KVM_MAX_MCE_BANKS 32 90 91 struct kvm_caps kvm_caps __read_mostly = { 92 .supported_mce_cap = MCG_CTL_P | MCG_SER_P, 93 }; 94 EXPORT_SYMBOL_GPL(kvm_caps); 95 96 #define ERR_PTR_USR(e) ((void __user *)ERR_PTR(e)) 97 98 #define emul_to_vcpu(ctxt) \ 99 ((struct kvm_vcpu *)(ctxt)->vcpu) 100 101 /* EFER defaults: 102 * - enable syscall per default because its emulated by KVM 103 * - enable LME and LMA per default on 64 bit KVM 104 */ 105 #ifdef CONFIG_X86_64 106 static 107 u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA)); 108 #else 109 static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE); 110 #endif 111 112 static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS; 113 114 #define KVM_EXIT_HYPERCALL_VALID_MASK (1 << KVM_HC_MAP_GPA_RANGE) 115 116 #define KVM_CAP_PMU_VALID_MASK KVM_PMU_CAP_DISABLE 117 118 #define KVM_X2APIC_API_VALID_FLAGS (KVM_X2APIC_API_USE_32BIT_IDS | \ 119 KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK) 120 121 static void update_cr8_intercept(struct kvm_vcpu *vcpu); 122 static void process_nmi(struct kvm_vcpu *vcpu); 123 static void process_smi(struct kvm_vcpu *vcpu); 124 static void enter_smm(struct kvm_vcpu *vcpu); 125 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); 126 static void store_regs(struct kvm_vcpu *vcpu); 127 static int sync_regs(struct kvm_vcpu *vcpu); 128 static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu); 129 130 static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2); 131 static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2); 132 133 struct kvm_x86_ops kvm_x86_ops __read_mostly; 134 135 #define KVM_X86_OP(func) \ 136 DEFINE_STATIC_CALL_NULL(kvm_x86_##func, \ 137 *(((struct kvm_x86_ops *)0)->func)); 138 #define KVM_X86_OP_OPTIONAL KVM_X86_OP 139 #define KVM_X86_OP_OPTIONAL_RET0 KVM_X86_OP 140 #include <asm/kvm-x86-ops.h> 141 EXPORT_STATIC_CALL_GPL(kvm_x86_get_cs_db_l_bits); 142 EXPORT_STATIC_CALL_GPL(kvm_x86_cache_reg); 143 144 static bool __read_mostly ignore_msrs = 0; 145 module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR); 146 147 bool __read_mostly report_ignored_msrs = true; 148 module_param(report_ignored_msrs, bool, S_IRUGO | S_IWUSR); 149 EXPORT_SYMBOL_GPL(report_ignored_msrs); 150 151 unsigned int min_timer_period_us = 200; 152 module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR); 153 154 static bool __read_mostly kvmclock_periodic_sync = true; 155 module_param(kvmclock_periodic_sync, bool, S_IRUGO); 156 157 /* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */ 158 static u32 __read_mostly tsc_tolerance_ppm = 250; 159 module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR); 160 161 /* 162 * lapic timer advance (tscdeadline mode only) in nanoseconds. '-1' enables 163 * adaptive tuning starting from default advancement of 1000ns. '0' disables 164 * advancement entirely. Any other value is used as-is and disables adaptive 165 * tuning, i.e. allows privileged userspace to set an exact advancement time. 166 */ 167 static int __read_mostly lapic_timer_advance_ns = -1; 168 module_param(lapic_timer_advance_ns, int, S_IRUGO | S_IWUSR); 169 170 static bool __read_mostly vector_hashing = true; 171 module_param(vector_hashing, bool, S_IRUGO); 172 173 bool __read_mostly enable_vmware_backdoor = false; 174 module_param(enable_vmware_backdoor, bool, S_IRUGO); 175 EXPORT_SYMBOL_GPL(enable_vmware_backdoor); 176 177 static bool __read_mostly force_emulation_prefix = false; 178 module_param(force_emulation_prefix, bool, S_IRUGO); 179 180 int __read_mostly pi_inject_timer = -1; 181 module_param(pi_inject_timer, bint, S_IRUGO | S_IWUSR); 182 183 /* Enable/disable PMU virtualization */ 184 bool __read_mostly enable_pmu = true; 185 EXPORT_SYMBOL_GPL(enable_pmu); 186 module_param(enable_pmu, bool, 0444); 187 188 bool __read_mostly eager_page_split = true; 189 module_param(eager_page_split, bool, 0644); 190 191 /* 192 * Restoring the host value for MSRs that are only consumed when running in 193 * usermode, e.g. SYSCALL MSRs and TSC_AUX, can be deferred until the CPU 194 * returns to userspace, i.e. the kernel can run with the guest's value. 195 */ 196 #define KVM_MAX_NR_USER_RETURN_MSRS 16 197 198 struct kvm_user_return_msrs { 199 struct user_return_notifier urn; 200 bool registered; 201 struct kvm_user_return_msr_values { 202 u64 host; 203 u64 curr; 204 } values[KVM_MAX_NR_USER_RETURN_MSRS]; 205 }; 206 207 u32 __read_mostly kvm_nr_uret_msrs; 208 EXPORT_SYMBOL_GPL(kvm_nr_uret_msrs); 209 static u32 __read_mostly kvm_uret_msrs_list[KVM_MAX_NR_USER_RETURN_MSRS]; 210 static struct kvm_user_return_msrs __percpu *user_return_msrs; 211 212 #define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \ 213 | XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \ 214 | XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512 \ 215 | XFEATURE_MASK_PKRU | XFEATURE_MASK_XTILE) 216 217 u64 __read_mostly host_efer; 218 EXPORT_SYMBOL_GPL(host_efer); 219 220 bool __read_mostly allow_smaller_maxphyaddr = 0; 221 EXPORT_SYMBOL_GPL(allow_smaller_maxphyaddr); 222 223 bool __read_mostly enable_apicv = true; 224 EXPORT_SYMBOL_GPL(enable_apicv); 225 226 u64 __read_mostly host_xss; 227 EXPORT_SYMBOL_GPL(host_xss); 228 229 const struct _kvm_stats_desc kvm_vm_stats_desc[] = { 230 KVM_GENERIC_VM_STATS(), 231 STATS_DESC_COUNTER(VM, mmu_shadow_zapped), 232 STATS_DESC_COUNTER(VM, mmu_pte_write), 233 STATS_DESC_COUNTER(VM, mmu_pde_zapped), 234 STATS_DESC_COUNTER(VM, mmu_flooded), 235 STATS_DESC_COUNTER(VM, mmu_recycled), 236 STATS_DESC_COUNTER(VM, mmu_cache_miss), 237 STATS_DESC_ICOUNTER(VM, mmu_unsync), 238 STATS_DESC_ICOUNTER(VM, pages_4k), 239 STATS_DESC_ICOUNTER(VM, pages_2m), 240 STATS_DESC_ICOUNTER(VM, pages_1g), 241 STATS_DESC_ICOUNTER(VM, nx_lpage_splits), 242 STATS_DESC_PCOUNTER(VM, max_mmu_rmap_size), 243 STATS_DESC_PCOUNTER(VM, max_mmu_page_hash_collisions) 244 }; 245 246 const struct kvm_stats_header kvm_vm_stats_header = { 247 .name_size = KVM_STATS_NAME_SIZE, 248 .num_desc = ARRAY_SIZE(kvm_vm_stats_desc), 249 .id_offset = sizeof(struct kvm_stats_header), 250 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, 251 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + 252 sizeof(kvm_vm_stats_desc), 253 }; 254 255 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { 256 KVM_GENERIC_VCPU_STATS(), 257 STATS_DESC_COUNTER(VCPU, pf_taken), 258 STATS_DESC_COUNTER(VCPU, pf_fixed), 259 STATS_DESC_COUNTER(VCPU, pf_emulate), 260 STATS_DESC_COUNTER(VCPU, pf_spurious), 261 STATS_DESC_COUNTER(VCPU, pf_fast), 262 STATS_DESC_COUNTER(VCPU, pf_mmio_spte_created), 263 STATS_DESC_COUNTER(VCPU, pf_guest), 264 STATS_DESC_COUNTER(VCPU, tlb_flush), 265 STATS_DESC_COUNTER(VCPU, invlpg), 266 STATS_DESC_COUNTER(VCPU, exits), 267 STATS_DESC_COUNTER(VCPU, io_exits), 268 STATS_DESC_COUNTER(VCPU, mmio_exits), 269 STATS_DESC_COUNTER(VCPU, signal_exits), 270 STATS_DESC_COUNTER(VCPU, irq_window_exits), 271 STATS_DESC_COUNTER(VCPU, nmi_window_exits), 272 STATS_DESC_COUNTER(VCPU, l1d_flush), 273 STATS_DESC_COUNTER(VCPU, halt_exits), 274 STATS_DESC_COUNTER(VCPU, request_irq_exits), 275 STATS_DESC_COUNTER(VCPU, irq_exits), 276 STATS_DESC_COUNTER(VCPU, host_state_reload), 277 STATS_DESC_COUNTER(VCPU, fpu_reload), 278 STATS_DESC_COUNTER(VCPU, insn_emulation), 279 STATS_DESC_COUNTER(VCPU, insn_emulation_fail), 280 STATS_DESC_COUNTER(VCPU, hypercalls), 281 STATS_DESC_COUNTER(VCPU, irq_injections), 282 STATS_DESC_COUNTER(VCPU, nmi_injections), 283 STATS_DESC_COUNTER(VCPU, req_event), 284 STATS_DESC_COUNTER(VCPU, nested_run), 285 STATS_DESC_COUNTER(VCPU, directed_yield_attempted), 286 STATS_DESC_COUNTER(VCPU, directed_yield_successful), 287 STATS_DESC_COUNTER(VCPU, preemption_reported), 288 STATS_DESC_COUNTER(VCPU, preemption_other), 289 STATS_DESC_ICOUNTER(VCPU, guest_mode), 290 STATS_DESC_COUNTER(VCPU, notify_window_exits), 291 }; 292 293 const struct kvm_stats_header kvm_vcpu_stats_header = { 294 .name_size = KVM_STATS_NAME_SIZE, 295 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc), 296 .id_offset = sizeof(struct kvm_stats_header), 297 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, 298 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + 299 sizeof(kvm_vcpu_stats_desc), 300 }; 301 302 u64 __read_mostly host_xcr0; 303 304 static struct kmem_cache *x86_emulator_cache; 305 306 /* 307 * When called, it means the previous get/set msr reached an invalid msr. 308 * Return true if we want to ignore/silent this failed msr access. 309 */ 310 static bool kvm_msr_ignored_check(u32 msr, u64 data, bool write) 311 { 312 const char *op = write ? "wrmsr" : "rdmsr"; 313 314 if (ignore_msrs) { 315 if (report_ignored_msrs) 316 kvm_pr_unimpl("ignored %s: 0x%x data 0x%llx\n", 317 op, msr, data); 318 /* Mask the error */ 319 return true; 320 } else { 321 kvm_debug_ratelimited("unhandled %s: 0x%x data 0x%llx\n", 322 op, msr, data); 323 return false; 324 } 325 } 326 327 static struct kmem_cache *kvm_alloc_emulator_cache(void) 328 { 329 unsigned int useroffset = offsetof(struct x86_emulate_ctxt, src); 330 unsigned int size = sizeof(struct x86_emulate_ctxt); 331 332 return kmem_cache_create_usercopy("x86_emulator", size, 333 __alignof__(struct x86_emulate_ctxt), 334 SLAB_ACCOUNT, useroffset, 335 size - useroffset, NULL); 336 } 337 338 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt); 339 340 static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu) 341 { 342 int i; 343 for (i = 0; i < ASYNC_PF_PER_VCPU; i++) 344 vcpu->arch.apf.gfns[i] = ~0; 345 } 346 347 static void kvm_on_user_return(struct user_return_notifier *urn) 348 { 349 unsigned slot; 350 struct kvm_user_return_msrs *msrs 351 = container_of(urn, struct kvm_user_return_msrs, urn); 352 struct kvm_user_return_msr_values *values; 353 unsigned long flags; 354 355 /* 356 * Disabling irqs at this point since the following code could be 357 * interrupted and executed through kvm_arch_hardware_disable() 358 */ 359 local_irq_save(flags); 360 if (msrs->registered) { 361 msrs->registered = false; 362 user_return_notifier_unregister(urn); 363 } 364 local_irq_restore(flags); 365 for (slot = 0; slot < kvm_nr_uret_msrs; ++slot) { 366 values = &msrs->values[slot]; 367 if (values->host != values->curr) { 368 wrmsrl(kvm_uret_msrs_list[slot], values->host); 369 values->curr = values->host; 370 } 371 } 372 } 373 374 static int kvm_probe_user_return_msr(u32 msr) 375 { 376 u64 val; 377 int ret; 378 379 preempt_disable(); 380 ret = rdmsrl_safe(msr, &val); 381 if (ret) 382 goto out; 383 ret = wrmsrl_safe(msr, val); 384 out: 385 preempt_enable(); 386 return ret; 387 } 388 389 int kvm_add_user_return_msr(u32 msr) 390 { 391 BUG_ON(kvm_nr_uret_msrs >= KVM_MAX_NR_USER_RETURN_MSRS); 392 393 if (kvm_probe_user_return_msr(msr)) 394 return -1; 395 396 kvm_uret_msrs_list[kvm_nr_uret_msrs] = msr; 397 return kvm_nr_uret_msrs++; 398 } 399 EXPORT_SYMBOL_GPL(kvm_add_user_return_msr); 400 401 int kvm_find_user_return_msr(u32 msr) 402 { 403 int i; 404 405 for (i = 0; i < kvm_nr_uret_msrs; ++i) { 406 if (kvm_uret_msrs_list[i] == msr) 407 return i; 408 } 409 return -1; 410 } 411 EXPORT_SYMBOL_GPL(kvm_find_user_return_msr); 412 413 static void kvm_user_return_msr_cpu_online(void) 414 { 415 unsigned int cpu = smp_processor_id(); 416 struct kvm_user_return_msrs *msrs = per_cpu_ptr(user_return_msrs, cpu); 417 u64 value; 418 int i; 419 420 for (i = 0; i < kvm_nr_uret_msrs; ++i) { 421 rdmsrl_safe(kvm_uret_msrs_list[i], &value); 422 msrs->values[i].host = value; 423 msrs->values[i].curr = value; 424 } 425 } 426 427 int kvm_set_user_return_msr(unsigned slot, u64 value, u64 mask) 428 { 429 unsigned int cpu = smp_processor_id(); 430 struct kvm_user_return_msrs *msrs = per_cpu_ptr(user_return_msrs, cpu); 431 int err; 432 433 value = (value & mask) | (msrs->values[slot].host & ~mask); 434 if (value == msrs->values[slot].curr) 435 return 0; 436 err = wrmsrl_safe(kvm_uret_msrs_list[slot], value); 437 if (err) 438 return 1; 439 440 msrs->values[slot].curr = value; 441 if (!msrs->registered) { 442 msrs->urn.on_user_return = kvm_on_user_return; 443 user_return_notifier_register(&msrs->urn); 444 msrs->registered = true; 445 } 446 return 0; 447 } 448 EXPORT_SYMBOL_GPL(kvm_set_user_return_msr); 449 450 static void drop_user_return_notifiers(void) 451 { 452 unsigned int cpu = smp_processor_id(); 453 struct kvm_user_return_msrs *msrs = per_cpu_ptr(user_return_msrs, cpu); 454 455 if (msrs->registered) 456 kvm_on_user_return(&msrs->urn); 457 } 458 459 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu) 460 { 461 return vcpu->arch.apic_base; 462 } 463 EXPORT_SYMBOL_GPL(kvm_get_apic_base); 464 465 enum lapic_mode kvm_get_apic_mode(struct kvm_vcpu *vcpu) 466 { 467 return kvm_apic_mode(kvm_get_apic_base(vcpu)); 468 } 469 EXPORT_SYMBOL_GPL(kvm_get_apic_mode); 470 471 int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 472 { 473 enum lapic_mode old_mode = kvm_get_apic_mode(vcpu); 474 enum lapic_mode new_mode = kvm_apic_mode(msr_info->data); 475 u64 reserved_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu) | 0x2ff | 476 (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) ? 0 : X2APIC_ENABLE); 477 478 if ((msr_info->data & reserved_bits) != 0 || new_mode == LAPIC_MODE_INVALID) 479 return 1; 480 if (!msr_info->host_initiated) { 481 if (old_mode == LAPIC_MODE_X2APIC && new_mode == LAPIC_MODE_XAPIC) 482 return 1; 483 if (old_mode == LAPIC_MODE_DISABLED && new_mode == LAPIC_MODE_X2APIC) 484 return 1; 485 } 486 487 kvm_lapic_set_base(vcpu, msr_info->data); 488 kvm_recalculate_apic_map(vcpu->kvm); 489 return 0; 490 } 491 EXPORT_SYMBOL_GPL(kvm_set_apic_base); 492 493 /* 494 * Handle a fault on a hardware virtualization (VMX or SVM) instruction. 495 * 496 * Hardware virtualization extension instructions may fault if a reboot turns 497 * off virtualization while processes are running. Usually after catching the 498 * fault we just panic; during reboot instead the instruction is ignored. 499 */ 500 noinstr void kvm_spurious_fault(void) 501 { 502 /* Fault while not rebooting. We want the trace. */ 503 BUG_ON(!kvm_rebooting); 504 } 505 EXPORT_SYMBOL_GPL(kvm_spurious_fault); 506 507 #define EXCPT_BENIGN 0 508 #define EXCPT_CONTRIBUTORY 1 509 #define EXCPT_PF 2 510 511 static int exception_class(int vector) 512 { 513 switch (vector) { 514 case PF_VECTOR: 515 return EXCPT_PF; 516 case DE_VECTOR: 517 case TS_VECTOR: 518 case NP_VECTOR: 519 case SS_VECTOR: 520 case GP_VECTOR: 521 return EXCPT_CONTRIBUTORY; 522 default: 523 break; 524 } 525 return EXCPT_BENIGN; 526 } 527 528 #define EXCPT_FAULT 0 529 #define EXCPT_TRAP 1 530 #define EXCPT_ABORT 2 531 #define EXCPT_INTERRUPT 3 532 533 static int exception_type(int vector) 534 { 535 unsigned int mask; 536 537 if (WARN_ON(vector > 31 || vector == NMI_VECTOR)) 538 return EXCPT_INTERRUPT; 539 540 mask = 1 << vector; 541 542 /* #DB is trap, as instruction watchpoints are handled elsewhere */ 543 if (mask & ((1 << DB_VECTOR) | (1 << BP_VECTOR) | (1 << OF_VECTOR))) 544 return EXCPT_TRAP; 545 546 if (mask & ((1 << DF_VECTOR) | (1 << MC_VECTOR))) 547 return EXCPT_ABORT; 548 549 /* Reserved exceptions will result in fault */ 550 return EXCPT_FAULT; 551 } 552 553 void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu) 554 { 555 unsigned nr = vcpu->arch.exception.nr; 556 bool has_payload = vcpu->arch.exception.has_payload; 557 unsigned long payload = vcpu->arch.exception.payload; 558 559 if (!has_payload) 560 return; 561 562 switch (nr) { 563 case DB_VECTOR: 564 /* 565 * "Certain debug exceptions may clear bit 0-3. The 566 * remaining contents of the DR6 register are never 567 * cleared by the processor". 568 */ 569 vcpu->arch.dr6 &= ~DR_TRAP_BITS; 570 /* 571 * In order to reflect the #DB exception payload in guest 572 * dr6, three components need to be considered: active low 573 * bit, FIXED_1 bits and active high bits (e.g. DR6_BD, 574 * DR6_BS and DR6_BT) 575 * DR6_ACTIVE_LOW contains the FIXED_1 and active low bits. 576 * In the target guest dr6: 577 * FIXED_1 bits should always be set. 578 * Active low bits should be cleared if 1-setting in payload. 579 * Active high bits should be set if 1-setting in payload. 580 * 581 * Note, the payload is compatible with the pending debug 582 * exceptions/exit qualification under VMX, that active_low bits 583 * are active high in payload. 584 * So they need to be flipped for DR6. 585 */ 586 vcpu->arch.dr6 |= DR6_ACTIVE_LOW; 587 vcpu->arch.dr6 |= payload; 588 vcpu->arch.dr6 ^= payload & DR6_ACTIVE_LOW; 589 590 /* 591 * The #DB payload is defined as compatible with the 'pending 592 * debug exceptions' field under VMX, not DR6. While bit 12 is 593 * defined in the 'pending debug exceptions' field (enabled 594 * breakpoint), it is reserved and must be zero in DR6. 595 */ 596 vcpu->arch.dr6 &= ~BIT(12); 597 break; 598 case PF_VECTOR: 599 vcpu->arch.cr2 = payload; 600 break; 601 } 602 603 vcpu->arch.exception.has_payload = false; 604 vcpu->arch.exception.payload = 0; 605 } 606 EXPORT_SYMBOL_GPL(kvm_deliver_exception_payload); 607 608 static void kvm_multiple_exception(struct kvm_vcpu *vcpu, 609 unsigned nr, bool has_error, u32 error_code, 610 bool has_payload, unsigned long payload, bool reinject) 611 { 612 u32 prev_nr; 613 int class1, class2; 614 615 kvm_make_request(KVM_REQ_EVENT, vcpu); 616 617 if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) { 618 queue: 619 if (reinject) { 620 /* 621 * On vmentry, vcpu->arch.exception.pending is only 622 * true if an event injection was blocked by 623 * nested_run_pending. In that case, however, 624 * vcpu_enter_guest requests an immediate exit, 625 * and the guest shouldn't proceed far enough to 626 * need reinjection. 627 */ 628 WARN_ON_ONCE(vcpu->arch.exception.pending); 629 vcpu->arch.exception.injected = true; 630 if (WARN_ON_ONCE(has_payload)) { 631 /* 632 * A reinjected event has already 633 * delivered its payload. 634 */ 635 has_payload = false; 636 payload = 0; 637 } 638 } else { 639 vcpu->arch.exception.pending = true; 640 vcpu->arch.exception.injected = false; 641 } 642 vcpu->arch.exception.has_error_code = has_error; 643 vcpu->arch.exception.nr = nr; 644 vcpu->arch.exception.error_code = error_code; 645 vcpu->arch.exception.has_payload = has_payload; 646 vcpu->arch.exception.payload = payload; 647 if (!is_guest_mode(vcpu)) 648 kvm_deliver_exception_payload(vcpu); 649 return; 650 } 651 652 /* to check exception */ 653 prev_nr = vcpu->arch.exception.nr; 654 if (prev_nr == DF_VECTOR) { 655 /* triple fault -> shutdown */ 656 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 657 return; 658 } 659 class1 = exception_class(prev_nr); 660 class2 = exception_class(nr); 661 if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY) 662 || (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) { 663 /* 664 * Generate double fault per SDM Table 5-5. Set 665 * exception.pending = true so that the double fault 666 * can trigger a nested vmexit. 667 */ 668 vcpu->arch.exception.pending = true; 669 vcpu->arch.exception.injected = false; 670 vcpu->arch.exception.has_error_code = true; 671 vcpu->arch.exception.nr = DF_VECTOR; 672 vcpu->arch.exception.error_code = 0; 673 vcpu->arch.exception.has_payload = false; 674 vcpu->arch.exception.payload = 0; 675 } else 676 /* replace previous exception with a new one in a hope 677 that instruction re-execution will regenerate lost 678 exception */ 679 goto queue; 680 } 681 682 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr) 683 { 684 kvm_multiple_exception(vcpu, nr, false, 0, false, 0, false); 685 } 686 EXPORT_SYMBOL_GPL(kvm_queue_exception); 687 688 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr) 689 { 690 kvm_multiple_exception(vcpu, nr, false, 0, false, 0, true); 691 } 692 EXPORT_SYMBOL_GPL(kvm_requeue_exception); 693 694 void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr, 695 unsigned long payload) 696 { 697 kvm_multiple_exception(vcpu, nr, false, 0, true, payload, false); 698 } 699 EXPORT_SYMBOL_GPL(kvm_queue_exception_p); 700 701 static void kvm_queue_exception_e_p(struct kvm_vcpu *vcpu, unsigned nr, 702 u32 error_code, unsigned long payload) 703 { 704 kvm_multiple_exception(vcpu, nr, true, error_code, 705 true, payload, false); 706 } 707 708 int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err) 709 { 710 if (err) 711 kvm_inject_gp(vcpu, 0); 712 else 713 return kvm_skip_emulated_instruction(vcpu); 714 715 return 1; 716 } 717 EXPORT_SYMBOL_GPL(kvm_complete_insn_gp); 718 719 static int complete_emulated_insn_gp(struct kvm_vcpu *vcpu, int err) 720 { 721 if (err) { 722 kvm_inject_gp(vcpu, 0); 723 return 1; 724 } 725 726 return kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE | EMULTYPE_SKIP | 727 EMULTYPE_COMPLETE_USER_EXIT); 728 } 729 730 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) 731 { 732 ++vcpu->stat.pf_guest; 733 vcpu->arch.exception.nested_apf = 734 is_guest_mode(vcpu) && fault->async_page_fault; 735 if (vcpu->arch.exception.nested_apf) { 736 vcpu->arch.apf.nested_apf_token = fault->address; 737 kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code); 738 } else { 739 kvm_queue_exception_e_p(vcpu, PF_VECTOR, fault->error_code, 740 fault->address); 741 } 742 } 743 EXPORT_SYMBOL_GPL(kvm_inject_page_fault); 744 745 /* Returns true if the page fault was immediately morphed into a VM-Exit. */ 746 bool kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu, 747 struct x86_exception *fault) 748 { 749 struct kvm_mmu *fault_mmu; 750 WARN_ON_ONCE(fault->vector != PF_VECTOR); 751 752 fault_mmu = fault->nested_page_fault ? vcpu->arch.mmu : 753 vcpu->arch.walk_mmu; 754 755 /* 756 * Invalidate the TLB entry for the faulting address, if it exists, 757 * else the access will fault indefinitely (and to emulate hardware). 758 */ 759 if ((fault->error_code & PFERR_PRESENT_MASK) && 760 !(fault->error_code & PFERR_RSVD_MASK)) 761 kvm_mmu_invalidate_gva(vcpu, fault_mmu, fault->address, 762 fault_mmu->root.hpa); 763 764 /* 765 * A workaround for KVM's bad exception handling. If KVM injected an 766 * exception into L2, and L2 encountered a #PF while vectoring the 767 * injected exception, manually check to see if L1 wants to intercept 768 * #PF, otherwise queuing the #PF will lead to #DF or a lost exception. 769 * In all other cases, defer the check to nested_ops->check_events(), 770 * which will correctly handle priority (this does not). Note, other 771 * exceptions, e.g. #GP, are theoretically affected, #PF is simply the 772 * most problematic, e.g. when L0 and L1 are both intercepting #PF for 773 * shadow paging. 774 * 775 * TODO: Rewrite exception handling to track injected and pending 776 * (VM-Exit) exceptions separately. 777 */ 778 if (unlikely(vcpu->arch.exception.injected && is_guest_mode(vcpu)) && 779 kvm_x86_ops.nested_ops->handle_page_fault_workaround(vcpu, fault)) 780 return true; 781 782 fault_mmu->inject_page_fault(vcpu, fault); 783 return false; 784 } 785 EXPORT_SYMBOL_GPL(kvm_inject_emulated_page_fault); 786 787 void kvm_inject_nmi(struct kvm_vcpu *vcpu) 788 { 789 atomic_inc(&vcpu->arch.nmi_queued); 790 kvm_make_request(KVM_REQ_NMI, vcpu); 791 } 792 EXPORT_SYMBOL_GPL(kvm_inject_nmi); 793 794 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) 795 { 796 kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, false); 797 } 798 EXPORT_SYMBOL_GPL(kvm_queue_exception_e); 799 800 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) 801 { 802 kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, true); 803 } 804 EXPORT_SYMBOL_GPL(kvm_requeue_exception_e); 805 806 /* 807 * Checks if cpl <= required_cpl; if true, return true. Otherwise queue 808 * a #GP and return false. 809 */ 810 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl) 811 { 812 if (static_call(kvm_x86_get_cpl)(vcpu) <= required_cpl) 813 return true; 814 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); 815 return false; 816 } 817 EXPORT_SYMBOL_GPL(kvm_require_cpl); 818 819 bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr) 820 { 821 if ((dr != 4 && dr != 5) || !kvm_read_cr4_bits(vcpu, X86_CR4_DE)) 822 return true; 823 824 kvm_queue_exception(vcpu, UD_VECTOR); 825 return false; 826 } 827 EXPORT_SYMBOL_GPL(kvm_require_dr); 828 829 static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu) 830 { 831 return vcpu->arch.reserved_gpa_bits | rsvd_bits(5, 8) | rsvd_bits(1, 2); 832 } 833 834 /* 835 * Load the pae pdptrs. Return 1 if they are all valid, 0 otherwise. 836 */ 837 int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3) 838 { 839 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 840 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT; 841 gpa_t real_gpa; 842 int i; 843 int ret; 844 u64 pdpte[ARRAY_SIZE(mmu->pdptrs)]; 845 846 /* 847 * If the MMU is nested, CR3 holds an L2 GPA and needs to be translated 848 * to an L1 GPA. 849 */ 850 real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(pdpt_gfn), 851 PFERR_USER_MASK | PFERR_WRITE_MASK, NULL); 852 if (real_gpa == UNMAPPED_GVA) 853 return 0; 854 855 /* Note the offset, PDPTRs are 32 byte aligned when using PAE paging. */ 856 ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(real_gpa), pdpte, 857 cr3 & GENMASK(11, 5), sizeof(pdpte)); 858 if (ret < 0) 859 return 0; 860 861 for (i = 0; i < ARRAY_SIZE(pdpte); ++i) { 862 if ((pdpte[i] & PT_PRESENT_MASK) && 863 (pdpte[i] & pdptr_rsvd_bits(vcpu))) { 864 return 0; 865 } 866 } 867 868 /* 869 * Marking VCPU_EXREG_PDPTR dirty doesn't work for !tdp_enabled. 870 * Shadow page roots need to be reconstructed instead. 871 */ 872 if (!tdp_enabled && memcmp(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs))) 873 kvm_mmu_free_roots(vcpu->kvm, mmu, KVM_MMU_ROOT_CURRENT); 874 875 memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs)); 876 kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR); 877 kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu); 878 vcpu->arch.pdptrs_from_userspace = false; 879 880 return 1; 881 } 882 EXPORT_SYMBOL_GPL(load_pdptrs); 883 884 void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0) 885 { 886 if ((cr0 ^ old_cr0) & X86_CR0_PG) { 887 kvm_clear_async_pf_completion_queue(vcpu); 888 kvm_async_pf_hash_reset(vcpu); 889 890 /* 891 * Clearing CR0.PG is defined to flush the TLB from the guest's 892 * perspective. 893 */ 894 if (!(cr0 & X86_CR0_PG)) 895 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 896 } 897 898 if ((cr0 ^ old_cr0) & KVM_MMU_CR0_ROLE_BITS) 899 kvm_mmu_reset_context(vcpu); 900 901 if (((cr0 ^ old_cr0) & X86_CR0_CD) && 902 kvm_arch_has_noncoherent_dma(vcpu->kvm) && 903 !kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) 904 kvm_zap_gfn_range(vcpu->kvm, 0, ~0ULL); 905 } 906 EXPORT_SYMBOL_GPL(kvm_post_set_cr0); 907 908 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 909 { 910 unsigned long old_cr0 = kvm_read_cr0(vcpu); 911 912 cr0 |= X86_CR0_ET; 913 914 #ifdef CONFIG_X86_64 915 if (cr0 & 0xffffffff00000000UL) 916 return 1; 917 #endif 918 919 cr0 &= ~CR0_RESERVED_BITS; 920 921 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) 922 return 1; 923 924 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) 925 return 1; 926 927 #ifdef CONFIG_X86_64 928 if ((vcpu->arch.efer & EFER_LME) && !is_paging(vcpu) && 929 (cr0 & X86_CR0_PG)) { 930 int cs_db, cs_l; 931 932 if (!is_pae(vcpu)) 933 return 1; 934 static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l); 935 if (cs_l) 936 return 1; 937 } 938 #endif 939 if (!(vcpu->arch.efer & EFER_LME) && (cr0 & X86_CR0_PG) && 940 is_pae(vcpu) && ((cr0 ^ old_cr0) & X86_CR0_PDPTR_BITS) && 941 !load_pdptrs(vcpu, kvm_read_cr3(vcpu))) 942 return 1; 943 944 if (!(cr0 & X86_CR0_PG) && 945 (is_64_bit_mode(vcpu) || kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE))) 946 return 1; 947 948 static_call(kvm_x86_set_cr0)(vcpu, cr0); 949 950 kvm_post_set_cr0(vcpu, old_cr0, cr0); 951 952 return 0; 953 } 954 EXPORT_SYMBOL_GPL(kvm_set_cr0); 955 956 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) 957 { 958 (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f)); 959 } 960 EXPORT_SYMBOL_GPL(kvm_lmsw); 961 962 void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu) 963 { 964 if (vcpu->arch.guest_state_protected) 965 return; 966 967 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) { 968 969 if (vcpu->arch.xcr0 != host_xcr0) 970 xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); 971 972 if (vcpu->arch.xsaves_enabled && 973 vcpu->arch.ia32_xss != host_xss) 974 wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss); 975 } 976 977 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS 978 if (static_cpu_has(X86_FEATURE_PKU) && 979 vcpu->arch.pkru != vcpu->arch.host_pkru && 980 ((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) || 981 kvm_read_cr4_bits(vcpu, X86_CR4_PKE))) 982 write_pkru(vcpu->arch.pkru); 983 #endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */ 984 } 985 EXPORT_SYMBOL_GPL(kvm_load_guest_xsave_state); 986 987 void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu) 988 { 989 if (vcpu->arch.guest_state_protected) 990 return; 991 992 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS 993 if (static_cpu_has(X86_FEATURE_PKU) && 994 ((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) || 995 kvm_read_cr4_bits(vcpu, X86_CR4_PKE))) { 996 vcpu->arch.pkru = rdpkru(); 997 if (vcpu->arch.pkru != vcpu->arch.host_pkru) 998 write_pkru(vcpu->arch.host_pkru); 999 } 1000 #endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */ 1001 1002 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) { 1003 1004 if (vcpu->arch.xcr0 != host_xcr0) 1005 xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0); 1006 1007 if (vcpu->arch.xsaves_enabled && 1008 vcpu->arch.ia32_xss != host_xss) 1009 wrmsrl(MSR_IA32_XSS, host_xss); 1010 } 1011 1012 } 1013 EXPORT_SYMBOL_GPL(kvm_load_host_xsave_state); 1014 1015 static inline u64 kvm_guest_supported_xcr0(struct kvm_vcpu *vcpu) 1016 { 1017 return vcpu->arch.guest_fpu.fpstate->user_xfeatures; 1018 } 1019 1020 #ifdef CONFIG_X86_64 1021 static inline u64 kvm_guest_supported_xfd(struct kvm_vcpu *vcpu) 1022 { 1023 return kvm_guest_supported_xcr0(vcpu) & XFEATURE_MASK_USER_DYNAMIC; 1024 } 1025 #endif 1026 1027 static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) 1028 { 1029 u64 xcr0 = xcr; 1030 u64 old_xcr0 = vcpu->arch.xcr0; 1031 u64 valid_bits; 1032 1033 /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now */ 1034 if (index != XCR_XFEATURE_ENABLED_MASK) 1035 return 1; 1036 if (!(xcr0 & XFEATURE_MASK_FP)) 1037 return 1; 1038 if ((xcr0 & XFEATURE_MASK_YMM) && !(xcr0 & XFEATURE_MASK_SSE)) 1039 return 1; 1040 1041 /* 1042 * Do not allow the guest to set bits that we do not support 1043 * saving. However, xcr0 bit 0 is always set, even if the 1044 * emulated CPU does not support XSAVE (see kvm_vcpu_reset()). 1045 */ 1046 valid_bits = kvm_guest_supported_xcr0(vcpu) | XFEATURE_MASK_FP; 1047 if (xcr0 & ~valid_bits) 1048 return 1; 1049 1050 if ((!(xcr0 & XFEATURE_MASK_BNDREGS)) != 1051 (!(xcr0 & XFEATURE_MASK_BNDCSR))) 1052 return 1; 1053 1054 if (xcr0 & XFEATURE_MASK_AVX512) { 1055 if (!(xcr0 & XFEATURE_MASK_YMM)) 1056 return 1; 1057 if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512) 1058 return 1; 1059 } 1060 1061 if ((xcr0 & XFEATURE_MASK_XTILE) && 1062 ((xcr0 & XFEATURE_MASK_XTILE) != XFEATURE_MASK_XTILE)) 1063 return 1; 1064 1065 vcpu->arch.xcr0 = xcr0; 1066 1067 if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND) 1068 kvm_update_cpuid_runtime(vcpu); 1069 return 0; 1070 } 1071 1072 int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu) 1073 { 1074 if (static_call(kvm_x86_get_cpl)(vcpu) != 0 || 1075 __kvm_set_xcr(vcpu, kvm_rcx_read(vcpu), kvm_read_edx_eax(vcpu))) { 1076 kvm_inject_gp(vcpu, 0); 1077 return 1; 1078 } 1079 1080 return kvm_skip_emulated_instruction(vcpu); 1081 } 1082 EXPORT_SYMBOL_GPL(kvm_emulate_xsetbv); 1083 1084 bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 1085 { 1086 if (cr4 & cr4_reserved_bits) 1087 return false; 1088 1089 if (cr4 & vcpu->arch.cr4_guest_rsvd_bits) 1090 return false; 1091 1092 return static_call(kvm_x86_is_valid_cr4)(vcpu, cr4); 1093 } 1094 EXPORT_SYMBOL_GPL(kvm_is_valid_cr4); 1095 1096 void kvm_post_set_cr4(struct kvm_vcpu *vcpu, unsigned long old_cr4, unsigned long cr4) 1097 { 1098 if ((cr4 ^ old_cr4) & KVM_MMU_CR4_ROLE_BITS) 1099 kvm_mmu_reset_context(vcpu); 1100 1101 /* 1102 * If CR4.PCIDE is changed 0 -> 1, there is no need to flush the TLB 1103 * according to the SDM; however, stale prev_roots could be reused 1104 * incorrectly in the future after a MOV to CR3 with NOFLUSH=1, so we 1105 * free them all. This is *not* a superset of KVM_REQ_TLB_FLUSH_GUEST 1106 * or KVM_REQ_TLB_FLUSH_CURRENT, because the hardware TLB is not flushed, 1107 * so fall through. 1108 */ 1109 if (!tdp_enabled && 1110 (cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) 1111 kvm_mmu_unload(vcpu); 1112 1113 /* 1114 * The TLB has to be flushed for all PCIDs if any of the following 1115 * (architecturally required) changes happen: 1116 * - CR4.PCIDE is changed from 1 to 0 1117 * - CR4.PGE is toggled 1118 * 1119 * This is a superset of KVM_REQ_TLB_FLUSH_CURRENT. 1120 */ 1121 if (((cr4 ^ old_cr4) & X86_CR4_PGE) || 1122 (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE))) 1123 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 1124 1125 /* 1126 * The TLB has to be flushed for the current PCID if any of the 1127 * following (architecturally required) changes happen: 1128 * - CR4.SMEP is changed from 0 to 1 1129 * - CR4.PAE is toggled 1130 */ 1131 else if (((cr4 ^ old_cr4) & X86_CR4_PAE) || 1132 ((cr4 & X86_CR4_SMEP) && !(old_cr4 & X86_CR4_SMEP))) 1133 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); 1134 1135 } 1136 EXPORT_SYMBOL_GPL(kvm_post_set_cr4); 1137 1138 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 1139 { 1140 unsigned long old_cr4 = kvm_read_cr4(vcpu); 1141 1142 if (!kvm_is_valid_cr4(vcpu, cr4)) 1143 return 1; 1144 1145 if (is_long_mode(vcpu)) { 1146 if (!(cr4 & X86_CR4_PAE)) 1147 return 1; 1148 if ((cr4 ^ old_cr4) & X86_CR4_LA57) 1149 return 1; 1150 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) 1151 && ((cr4 ^ old_cr4) & X86_CR4_PDPTR_BITS) 1152 && !load_pdptrs(vcpu, kvm_read_cr3(vcpu))) 1153 return 1; 1154 1155 if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) { 1156 if (!guest_cpuid_has(vcpu, X86_FEATURE_PCID)) 1157 return 1; 1158 1159 /* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */ 1160 if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu)) 1161 return 1; 1162 } 1163 1164 static_call(kvm_x86_set_cr4)(vcpu, cr4); 1165 1166 kvm_post_set_cr4(vcpu, old_cr4, cr4); 1167 1168 return 0; 1169 } 1170 EXPORT_SYMBOL_GPL(kvm_set_cr4); 1171 1172 static void kvm_invalidate_pcid(struct kvm_vcpu *vcpu, unsigned long pcid) 1173 { 1174 struct kvm_mmu *mmu = vcpu->arch.mmu; 1175 unsigned long roots_to_free = 0; 1176 int i; 1177 1178 /* 1179 * MOV CR3 and INVPCID are usually not intercepted when using TDP, but 1180 * this is reachable when running EPT=1 and unrestricted_guest=0, and 1181 * also via the emulator. KVM's TDP page tables are not in the scope of 1182 * the invalidation, but the guest's TLB entries need to be flushed as 1183 * the CPU may have cached entries in its TLB for the target PCID. 1184 */ 1185 if (unlikely(tdp_enabled)) { 1186 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 1187 return; 1188 } 1189 1190 /* 1191 * If neither the current CR3 nor any of the prev_roots use the given 1192 * PCID, then nothing needs to be done here because a resync will 1193 * happen anyway before switching to any other CR3. 1194 */ 1195 if (kvm_get_active_pcid(vcpu) == pcid) { 1196 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); 1197 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); 1198 } 1199 1200 /* 1201 * If PCID is disabled, there is no need to free prev_roots even if the 1202 * PCIDs for them are also 0, because MOV to CR3 always flushes the TLB 1203 * with PCIDE=0. 1204 */ 1205 if (!kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)) 1206 return; 1207 1208 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) 1209 if (kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd) == pcid) 1210 roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i); 1211 1212 kvm_mmu_free_roots(vcpu->kvm, mmu, roots_to_free); 1213 } 1214 1215 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) 1216 { 1217 bool skip_tlb_flush = false; 1218 unsigned long pcid = 0; 1219 #ifdef CONFIG_X86_64 1220 bool pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE); 1221 1222 if (pcid_enabled) { 1223 skip_tlb_flush = cr3 & X86_CR3_PCID_NOFLUSH; 1224 cr3 &= ~X86_CR3_PCID_NOFLUSH; 1225 pcid = cr3 & X86_CR3_PCID_MASK; 1226 } 1227 #endif 1228 1229 /* PDPTRs are always reloaded for PAE paging. */ 1230 if (cr3 == kvm_read_cr3(vcpu) && !is_pae_paging(vcpu)) 1231 goto handle_tlb_flush; 1232 1233 /* 1234 * Do not condition the GPA check on long mode, this helper is used to 1235 * stuff CR3, e.g. for RSM emulation, and there is no guarantee that 1236 * the current vCPU mode is accurate. 1237 */ 1238 if (kvm_vcpu_is_illegal_gpa(vcpu, cr3)) 1239 return 1; 1240 1241 if (is_pae_paging(vcpu) && !load_pdptrs(vcpu, cr3)) 1242 return 1; 1243 1244 if (cr3 != kvm_read_cr3(vcpu)) 1245 kvm_mmu_new_pgd(vcpu, cr3); 1246 1247 vcpu->arch.cr3 = cr3; 1248 kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); 1249 /* Do not call post_set_cr3, we do not get here for confidential guests. */ 1250 1251 handle_tlb_flush: 1252 /* 1253 * A load of CR3 that flushes the TLB flushes only the current PCID, 1254 * even if PCID is disabled, in which case PCID=0 is flushed. It's a 1255 * moot point in the end because _disabling_ PCID will flush all PCIDs, 1256 * and it's impossible to use a non-zero PCID when PCID is disabled, 1257 * i.e. only PCID=0 can be relevant. 1258 */ 1259 if (!skip_tlb_flush) 1260 kvm_invalidate_pcid(vcpu, pcid); 1261 1262 return 0; 1263 } 1264 EXPORT_SYMBOL_GPL(kvm_set_cr3); 1265 1266 int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) 1267 { 1268 if (cr8 & CR8_RESERVED_BITS) 1269 return 1; 1270 if (lapic_in_kernel(vcpu)) 1271 kvm_lapic_set_tpr(vcpu, cr8); 1272 else 1273 vcpu->arch.cr8 = cr8; 1274 return 0; 1275 } 1276 EXPORT_SYMBOL_GPL(kvm_set_cr8); 1277 1278 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu) 1279 { 1280 if (lapic_in_kernel(vcpu)) 1281 return kvm_lapic_get_cr8(vcpu); 1282 else 1283 return vcpu->arch.cr8; 1284 } 1285 EXPORT_SYMBOL_GPL(kvm_get_cr8); 1286 1287 static void kvm_update_dr0123(struct kvm_vcpu *vcpu) 1288 { 1289 int i; 1290 1291 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) { 1292 for (i = 0; i < KVM_NR_DB_REGS; i++) 1293 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; 1294 } 1295 } 1296 1297 void kvm_update_dr7(struct kvm_vcpu *vcpu) 1298 { 1299 unsigned long dr7; 1300 1301 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) 1302 dr7 = vcpu->arch.guest_debug_dr7; 1303 else 1304 dr7 = vcpu->arch.dr7; 1305 static_call(kvm_x86_set_dr7)(vcpu, dr7); 1306 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED; 1307 if (dr7 & DR7_BP_EN_MASK) 1308 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED; 1309 } 1310 EXPORT_SYMBOL_GPL(kvm_update_dr7); 1311 1312 static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu) 1313 { 1314 u64 fixed = DR6_FIXED_1; 1315 1316 if (!guest_cpuid_has(vcpu, X86_FEATURE_RTM)) 1317 fixed |= DR6_RTM; 1318 1319 if (!guest_cpuid_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT)) 1320 fixed |= DR6_BUS_LOCK; 1321 return fixed; 1322 } 1323 1324 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) 1325 { 1326 size_t size = ARRAY_SIZE(vcpu->arch.db); 1327 1328 switch (dr) { 1329 case 0 ... 3: 1330 vcpu->arch.db[array_index_nospec(dr, size)] = val; 1331 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) 1332 vcpu->arch.eff_db[dr] = val; 1333 break; 1334 case 4: 1335 case 6: 1336 if (!kvm_dr6_valid(val)) 1337 return 1; /* #GP */ 1338 vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu); 1339 break; 1340 case 5: 1341 default: /* 7 */ 1342 if (!kvm_dr7_valid(val)) 1343 return 1; /* #GP */ 1344 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; 1345 kvm_update_dr7(vcpu); 1346 break; 1347 } 1348 1349 return 0; 1350 } 1351 EXPORT_SYMBOL_GPL(kvm_set_dr); 1352 1353 void kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) 1354 { 1355 size_t size = ARRAY_SIZE(vcpu->arch.db); 1356 1357 switch (dr) { 1358 case 0 ... 3: 1359 *val = vcpu->arch.db[array_index_nospec(dr, size)]; 1360 break; 1361 case 4: 1362 case 6: 1363 *val = vcpu->arch.dr6; 1364 break; 1365 case 5: 1366 default: /* 7 */ 1367 *val = vcpu->arch.dr7; 1368 break; 1369 } 1370 } 1371 EXPORT_SYMBOL_GPL(kvm_get_dr); 1372 1373 int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu) 1374 { 1375 u32 ecx = kvm_rcx_read(vcpu); 1376 u64 data; 1377 1378 if (kvm_pmu_rdpmc(vcpu, ecx, &data)) { 1379 kvm_inject_gp(vcpu, 0); 1380 return 1; 1381 } 1382 1383 kvm_rax_write(vcpu, (u32)data); 1384 kvm_rdx_write(vcpu, data >> 32); 1385 return kvm_skip_emulated_instruction(vcpu); 1386 } 1387 EXPORT_SYMBOL_GPL(kvm_emulate_rdpmc); 1388 1389 /* 1390 * List of msr numbers which we expose to userspace through KVM_GET_MSRS 1391 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. 1392 * 1393 * The three MSR lists(msrs_to_save, emulated_msrs, msr_based_features) 1394 * extract the supported MSRs from the related const lists. 1395 * msrs_to_save is selected from the msrs_to_save_all to reflect the 1396 * capabilities of the host cpu. This capabilities test skips MSRs that are 1397 * kvm-specific. Those are put in emulated_msrs_all; filtering of emulated_msrs 1398 * may depend on host virtualization features rather than host cpu features. 1399 */ 1400 1401 static const u32 msrs_to_save_all[] = { 1402 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, 1403 MSR_STAR, 1404 #ifdef CONFIG_X86_64 1405 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR, 1406 #endif 1407 MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA, 1408 MSR_IA32_FEAT_CTL, MSR_IA32_BNDCFGS, MSR_TSC_AUX, 1409 MSR_IA32_SPEC_CTRL, 1410 MSR_IA32_RTIT_CTL, MSR_IA32_RTIT_STATUS, MSR_IA32_RTIT_CR3_MATCH, 1411 MSR_IA32_RTIT_OUTPUT_BASE, MSR_IA32_RTIT_OUTPUT_MASK, 1412 MSR_IA32_RTIT_ADDR0_A, MSR_IA32_RTIT_ADDR0_B, 1413 MSR_IA32_RTIT_ADDR1_A, MSR_IA32_RTIT_ADDR1_B, 1414 MSR_IA32_RTIT_ADDR2_A, MSR_IA32_RTIT_ADDR2_B, 1415 MSR_IA32_RTIT_ADDR3_A, MSR_IA32_RTIT_ADDR3_B, 1416 MSR_IA32_UMWAIT_CONTROL, 1417 1418 MSR_ARCH_PERFMON_FIXED_CTR0, MSR_ARCH_PERFMON_FIXED_CTR1, 1419 MSR_ARCH_PERFMON_FIXED_CTR0 + 2, 1420 MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_STATUS, 1421 MSR_CORE_PERF_GLOBAL_CTRL, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 1422 MSR_ARCH_PERFMON_PERFCTR0, MSR_ARCH_PERFMON_PERFCTR1, 1423 MSR_ARCH_PERFMON_PERFCTR0 + 2, MSR_ARCH_PERFMON_PERFCTR0 + 3, 1424 MSR_ARCH_PERFMON_PERFCTR0 + 4, MSR_ARCH_PERFMON_PERFCTR0 + 5, 1425 MSR_ARCH_PERFMON_PERFCTR0 + 6, MSR_ARCH_PERFMON_PERFCTR0 + 7, 1426 MSR_ARCH_PERFMON_PERFCTR0 + 8, MSR_ARCH_PERFMON_PERFCTR0 + 9, 1427 MSR_ARCH_PERFMON_PERFCTR0 + 10, MSR_ARCH_PERFMON_PERFCTR0 + 11, 1428 MSR_ARCH_PERFMON_PERFCTR0 + 12, MSR_ARCH_PERFMON_PERFCTR0 + 13, 1429 MSR_ARCH_PERFMON_PERFCTR0 + 14, MSR_ARCH_PERFMON_PERFCTR0 + 15, 1430 MSR_ARCH_PERFMON_PERFCTR0 + 16, MSR_ARCH_PERFMON_PERFCTR0 + 17, 1431 MSR_ARCH_PERFMON_EVENTSEL0, MSR_ARCH_PERFMON_EVENTSEL1, 1432 MSR_ARCH_PERFMON_EVENTSEL0 + 2, MSR_ARCH_PERFMON_EVENTSEL0 + 3, 1433 MSR_ARCH_PERFMON_EVENTSEL0 + 4, MSR_ARCH_PERFMON_EVENTSEL0 + 5, 1434 MSR_ARCH_PERFMON_EVENTSEL0 + 6, MSR_ARCH_PERFMON_EVENTSEL0 + 7, 1435 MSR_ARCH_PERFMON_EVENTSEL0 + 8, MSR_ARCH_PERFMON_EVENTSEL0 + 9, 1436 MSR_ARCH_PERFMON_EVENTSEL0 + 10, MSR_ARCH_PERFMON_EVENTSEL0 + 11, 1437 MSR_ARCH_PERFMON_EVENTSEL0 + 12, MSR_ARCH_PERFMON_EVENTSEL0 + 13, 1438 MSR_ARCH_PERFMON_EVENTSEL0 + 14, MSR_ARCH_PERFMON_EVENTSEL0 + 15, 1439 MSR_ARCH_PERFMON_EVENTSEL0 + 16, MSR_ARCH_PERFMON_EVENTSEL0 + 17, 1440 MSR_IA32_PEBS_ENABLE, MSR_IA32_DS_AREA, MSR_PEBS_DATA_CFG, 1441 1442 MSR_K7_EVNTSEL0, MSR_K7_EVNTSEL1, MSR_K7_EVNTSEL2, MSR_K7_EVNTSEL3, 1443 MSR_K7_PERFCTR0, MSR_K7_PERFCTR1, MSR_K7_PERFCTR2, MSR_K7_PERFCTR3, 1444 MSR_F15H_PERF_CTL0, MSR_F15H_PERF_CTL1, MSR_F15H_PERF_CTL2, 1445 MSR_F15H_PERF_CTL3, MSR_F15H_PERF_CTL4, MSR_F15H_PERF_CTL5, 1446 MSR_F15H_PERF_CTR0, MSR_F15H_PERF_CTR1, MSR_F15H_PERF_CTR2, 1447 MSR_F15H_PERF_CTR3, MSR_F15H_PERF_CTR4, MSR_F15H_PERF_CTR5, 1448 MSR_IA32_XFD, MSR_IA32_XFD_ERR, 1449 }; 1450 1451 static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_all)]; 1452 static unsigned num_msrs_to_save; 1453 1454 static const u32 emulated_msrs_all[] = { 1455 MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, 1456 MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW, 1457 HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL, 1458 HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC, 1459 HV_X64_MSR_TSC_FREQUENCY, HV_X64_MSR_APIC_FREQUENCY, 1460 HV_X64_MSR_CRASH_P0, HV_X64_MSR_CRASH_P1, HV_X64_MSR_CRASH_P2, 1461 HV_X64_MSR_CRASH_P3, HV_X64_MSR_CRASH_P4, HV_X64_MSR_CRASH_CTL, 1462 HV_X64_MSR_RESET, 1463 HV_X64_MSR_VP_INDEX, 1464 HV_X64_MSR_VP_RUNTIME, 1465 HV_X64_MSR_SCONTROL, 1466 HV_X64_MSR_STIMER0_CONFIG, 1467 HV_X64_MSR_VP_ASSIST_PAGE, 1468 HV_X64_MSR_REENLIGHTENMENT_CONTROL, HV_X64_MSR_TSC_EMULATION_CONTROL, 1469 HV_X64_MSR_TSC_EMULATION_STATUS, 1470 HV_X64_MSR_SYNDBG_OPTIONS, 1471 HV_X64_MSR_SYNDBG_CONTROL, HV_X64_MSR_SYNDBG_STATUS, 1472 HV_X64_MSR_SYNDBG_SEND_BUFFER, HV_X64_MSR_SYNDBG_RECV_BUFFER, 1473 HV_X64_MSR_SYNDBG_PENDING_BUFFER, 1474 1475 MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME, 1476 MSR_KVM_PV_EOI_EN, MSR_KVM_ASYNC_PF_INT, MSR_KVM_ASYNC_PF_ACK, 1477 1478 MSR_IA32_TSC_ADJUST, 1479 MSR_IA32_TSC_DEADLINE, 1480 MSR_IA32_ARCH_CAPABILITIES, 1481 MSR_IA32_PERF_CAPABILITIES, 1482 MSR_IA32_MISC_ENABLE, 1483 MSR_IA32_MCG_STATUS, 1484 MSR_IA32_MCG_CTL, 1485 MSR_IA32_MCG_EXT_CTL, 1486 MSR_IA32_SMBASE, 1487 MSR_SMI_COUNT, 1488 MSR_PLATFORM_INFO, 1489 MSR_MISC_FEATURES_ENABLES, 1490 MSR_AMD64_VIRT_SPEC_CTRL, 1491 MSR_AMD64_TSC_RATIO, 1492 MSR_IA32_POWER_CTL, 1493 MSR_IA32_UCODE_REV, 1494 1495 /* 1496 * The following list leaves out MSRs whose values are determined 1497 * by arch/x86/kvm/vmx/nested.c based on CPUID or other MSRs. 1498 * We always support the "true" VMX control MSRs, even if the host 1499 * processor does not, so I am putting these registers here rather 1500 * than in msrs_to_save_all. 1501 */ 1502 MSR_IA32_VMX_BASIC, 1503 MSR_IA32_VMX_TRUE_PINBASED_CTLS, 1504 MSR_IA32_VMX_TRUE_PROCBASED_CTLS, 1505 MSR_IA32_VMX_TRUE_EXIT_CTLS, 1506 MSR_IA32_VMX_TRUE_ENTRY_CTLS, 1507 MSR_IA32_VMX_MISC, 1508 MSR_IA32_VMX_CR0_FIXED0, 1509 MSR_IA32_VMX_CR4_FIXED0, 1510 MSR_IA32_VMX_VMCS_ENUM, 1511 MSR_IA32_VMX_PROCBASED_CTLS2, 1512 MSR_IA32_VMX_EPT_VPID_CAP, 1513 MSR_IA32_VMX_VMFUNC, 1514 1515 MSR_K7_HWCR, 1516 MSR_KVM_POLL_CONTROL, 1517 }; 1518 1519 static u32 emulated_msrs[ARRAY_SIZE(emulated_msrs_all)]; 1520 static unsigned num_emulated_msrs; 1521 1522 /* 1523 * List of msr numbers which are used to expose MSR-based features that 1524 * can be used by a hypervisor to validate requested CPU features. 1525 */ 1526 static const u32 msr_based_features_all[] = { 1527 MSR_IA32_VMX_BASIC, 1528 MSR_IA32_VMX_TRUE_PINBASED_CTLS, 1529 MSR_IA32_VMX_PINBASED_CTLS, 1530 MSR_IA32_VMX_TRUE_PROCBASED_CTLS, 1531 MSR_IA32_VMX_PROCBASED_CTLS, 1532 MSR_IA32_VMX_TRUE_EXIT_CTLS, 1533 MSR_IA32_VMX_EXIT_CTLS, 1534 MSR_IA32_VMX_TRUE_ENTRY_CTLS, 1535 MSR_IA32_VMX_ENTRY_CTLS, 1536 MSR_IA32_VMX_MISC, 1537 MSR_IA32_VMX_CR0_FIXED0, 1538 MSR_IA32_VMX_CR0_FIXED1, 1539 MSR_IA32_VMX_CR4_FIXED0, 1540 MSR_IA32_VMX_CR4_FIXED1, 1541 MSR_IA32_VMX_VMCS_ENUM, 1542 MSR_IA32_VMX_PROCBASED_CTLS2, 1543 MSR_IA32_VMX_EPT_VPID_CAP, 1544 MSR_IA32_VMX_VMFUNC, 1545 1546 MSR_F10H_DECFG, 1547 MSR_IA32_UCODE_REV, 1548 MSR_IA32_ARCH_CAPABILITIES, 1549 MSR_IA32_PERF_CAPABILITIES, 1550 }; 1551 1552 static u32 msr_based_features[ARRAY_SIZE(msr_based_features_all)]; 1553 static unsigned int num_msr_based_features; 1554 1555 static u64 kvm_get_arch_capabilities(void) 1556 { 1557 u64 data = 0; 1558 1559 if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) 1560 rdmsrl(MSR_IA32_ARCH_CAPABILITIES, data); 1561 1562 /* 1563 * If nx_huge_pages is enabled, KVM's shadow paging will ensure that 1564 * the nested hypervisor runs with NX huge pages. If it is not, 1565 * L1 is anyway vulnerable to ITLB_MULTIHIT exploits from other 1566 * L1 guests, so it need not worry about its own (L2) guests. 1567 */ 1568 data |= ARCH_CAP_PSCHANGE_MC_NO; 1569 1570 /* 1571 * If we're doing cache flushes (either "always" or "cond") 1572 * we will do one whenever the guest does a vmlaunch/vmresume. 1573 * If an outer hypervisor is doing the cache flush for us 1574 * (VMENTER_L1D_FLUSH_NESTED_VM), we can safely pass that 1575 * capability to the guest too, and if EPT is disabled we're not 1576 * vulnerable. Overall, only VMENTER_L1D_FLUSH_NEVER will 1577 * require a nested hypervisor to do a flush of its own. 1578 */ 1579 if (l1tf_vmx_mitigation != VMENTER_L1D_FLUSH_NEVER) 1580 data |= ARCH_CAP_SKIP_VMENTRY_L1DFLUSH; 1581 1582 if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN)) 1583 data |= ARCH_CAP_RDCL_NO; 1584 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) 1585 data |= ARCH_CAP_SSB_NO; 1586 if (!boot_cpu_has_bug(X86_BUG_MDS)) 1587 data |= ARCH_CAP_MDS_NO; 1588 1589 if (!boot_cpu_has(X86_FEATURE_RTM)) { 1590 /* 1591 * If RTM=0 because the kernel has disabled TSX, the host might 1592 * have TAA_NO or TSX_CTRL. Clear TAA_NO (the guest sees RTM=0 1593 * and therefore knows that there cannot be TAA) but keep 1594 * TSX_CTRL: some buggy userspaces leave it set on tsx=on hosts, 1595 * and we want to allow migrating those guests to tsx=off hosts. 1596 */ 1597 data &= ~ARCH_CAP_TAA_NO; 1598 } else if (!boot_cpu_has_bug(X86_BUG_TAA)) { 1599 data |= ARCH_CAP_TAA_NO; 1600 } else { 1601 /* 1602 * Nothing to do here; we emulate TSX_CTRL if present on the 1603 * host so the guest can choose between disabling TSX or 1604 * using VERW to clear CPU buffers. 1605 */ 1606 } 1607 1608 return data; 1609 } 1610 1611 static int kvm_get_msr_feature(struct kvm_msr_entry *msr) 1612 { 1613 switch (msr->index) { 1614 case MSR_IA32_ARCH_CAPABILITIES: 1615 msr->data = kvm_get_arch_capabilities(); 1616 break; 1617 case MSR_IA32_UCODE_REV: 1618 rdmsrl_safe(msr->index, &msr->data); 1619 break; 1620 default: 1621 return static_call(kvm_x86_get_msr_feature)(msr); 1622 } 1623 return 0; 1624 } 1625 1626 static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data) 1627 { 1628 struct kvm_msr_entry msr; 1629 int r; 1630 1631 msr.index = index; 1632 r = kvm_get_msr_feature(&msr); 1633 1634 if (r == KVM_MSR_RET_INVALID) { 1635 /* Unconditionally clear the output for simplicity */ 1636 *data = 0; 1637 if (kvm_msr_ignored_check(index, 0, false)) 1638 r = 0; 1639 } 1640 1641 if (r) 1642 return r; 1643 1644 *data = msr.data; 1645 1646 return 0; 1647 } 1648 1649 static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) 1650 { 1651 if (efer & EFER_FFXSR && !guest_cpuid_has(vcpu, X86_FEATURE_FXSR_OPT)) 1652 return false; 1653 1654 if (efer & EFER_SVME && !guest_cpuid_has(vcpu, X86_FEATURE_SVM)) 1655 return false; 1656 1657 if (efer & (EFER_LME | EFER_LMA) && 1658 !guest_cpuid_has(vcpu, X86_FEATURE_LM)) 1659 return false; 1660 1661 if (efer & EFER_NX && !guest_cpuid_has(vcpu, X86_FEATURE_NX)) 1662 return false; 1663 1664 return true; 1665 1666 } 1667 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) 1668 { 1669 if (efer & efer_reserved_bits) 1670 return false; 1671 1672 return __kvm_valid_efer(vcpu, efer); 1673 } 1674 EXPORT_SYMBOL_GPL(kvm_valid_efer); 1675 1676 static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 1677 { 1678 u64 old_efer = vcpu->arch.efer; 1679 u64 efer = msr_info->data; 1680 int r; 1681 1682 if (efer & efer_reserved_bits) 1683 return 1; 1684 1685 if (!msr_info->host_initiated) { 1686 if (!__kvm_valid_efer(vcpu, efer)) 1687 return 1; 1688 1689 if (is_paging(vcpu) && 1690 (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) 1691 return 1; 1692 } 1693 1694 efer &= ~EFER_LMA; 1695 efer |= vcpu->arch.efer & EFER_LMA; 1696 1697 r = static_call(kvm_x86_set_efer)(vcpu, efer); 1698 if (r) { 1699 WARN_ON(r > 0); 1700 return r; 1701 } 1702 1703 if ((efer ^ old_efer) & KVM_MMU_EFER_ROLE_BITS) 1704 kvm_mmu_reset_context(vcpu); 1705 1706 return 0; 1707 } 1708 1709 void kvm_enable_efer_bits(u64 mask) 1710 { 1711 efer_reserved_bits &= ~mask; 1712 } 1713 EXPORT_SYMBOL_GPL(kvm_enable_efer_bits); 1714 1715 bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type) 1716 { 1717 struct kvm_x86_msr_filter *msr_filter; 1718 struct msr_bitmap_range *ranges; 1719 struct kvm *kvm = vcpu->kvm; 1720 bool allowed; 1721 int idx; 1722 u32 i; 1723 1724 /* x2APIC MSRs do not support filtering. */ 1725 if (index >= 0x800 && index <= 0x8ff) 1726 return true; 1727 1728 idx = srcu_read_lock(&kvm->srcu); 1729 1730 msr_filter = srcu_dereference(kvm->arch.msr_filter, &kvm->srcu); 1731 if (!msr_filter) { 1732 allowed = true; 1733 goto out; 1734 } 1735 1736 allowed = msr_filter->default_allow; 1737 ranges = msr_filter->ranges; 1738 1739 for (i = 0; i < msr_filter->count; i++) { 1740 u32 start = ranges[i].base; 1741 u32 end = start + ranges[i].nmsrs; 1742 u32 flags = ranges[i].flags; 1743 unsigned long *bitmap = ranges[i].bitmap; 1744 1745 if ((index >= start) && (index < end) && (flags & type)) { 1746 allowed = !!test_bit(index - start, bitmap); 1747 break; 1748 } 1749 } 1750 1751 out: 1752 srcu_read_unlock(&kvm->srcu, idx); 1753 1754 return allowed; 1755 } 1756 EXPORT_SYMBOL_GPL(kvm_msr_allowed); 1757 1758 /* 1759 * Write @data into the MSR specified by @index. Select MSR specific fault 1760 * checks are bypassed if @host_initiated is %true. 1761 * Returns 0 on success, non-0 otherwise. 1762 * Assumes vcpu_load() was already called. 1763 */ 1764 static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data, 1765 bool host_initiated) 1766 { 1767 struct msr_data msr; 1768 1769 switch (index) { 1770 case MSR_FS_BASE: 1771 case MSR_GS_BASE: 1772 case MSR_KERNEL_GS_BASE: 1773 case MSR_CSTAR: 1774 case MSR_LSTAR: 1775 if (is_noncanonical_address(data, vcpu)) 1776 return 1; 1777 break; 1778 case MSR_IA32_SYSENTER_EIP: 1779 case MSR_IA32_SYSENTER_ESP: 1780 /* 1781 * IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if 1782 * non-canonical address is written on Intel but not on 1783 * AMD (which ignores the top 32-bits, because it does 1784 * not implement 64-bit SYSENTER). 1785 * 1786 * 64-bit code should hence be able to write a non-canonical 1787 * value on AMD. Making the address canonical ensures that 1788 * vmentry does not fail on Intel after writing a non-canonical 1789 * value, and that something deterministic happens if the guest 1790 * invokes 64-bit SYSENTER. 1791 */ 1792 data = __canonical_address(data, vcpu_virt_addr_bits(vcpu)); 1793 break; 1794 case MSR_TSC_AUX: 1795 if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX)) 1796 return 1; 1797 1798 if (!host_initiated && 1799 !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) && 1800 !guest_cpuid_has(vcpu, X86_FEATURE_RDPID)) 1801 return 1; 1802 1803 /* 1804 * Per Intel's SDM, bits 63:32 are reserved, but AMD's APM has 1805 * incomplete and conflicting architectural behavior. Current 1806 * AMD CPUs completely ignore bits 63:32, i.e. they aren't 1807 * reserved and always read as zeros. Enforce Intel's reserved 1808 * bits check if and only if the guest CPU is Intel, and clear 1809 * the bits in all other cases. This ensures cross-vendor 1810 * migration will provide consistent behavior for the guest. 1811 */ 1812 if (guest_cpuid_is_intel(vcpu) && (data >> 32) != 0) 1813 return 1; 1814 1815 data = (u32)data; 1816 break; 1817 } 1818 1819 msr.data = data; 1820 msr.index = index; 1821 msr.host_initiated = host_initiated; 1822 1823 return static_call(kvm_x86_set_msr)(vcpu, &msr); 1824 } 1825 1826 static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu, 1827 u32 index, u64 data, bool host_initiated) 1828 { 1829 int ret = __kvm_set_msr(vcpu, index, data, host_initiated); 1830 1831 if (ret == KVM_MSR_RET_INVALID) 1832 if (kvm_msr_ignored_check(index, data, true)) 1833 ret = 0; 1834 1835 return ret; 1836 } 1837 1838 /* 1839 * Read the MSR specified by @index into @data. Select MSR specific fault 1840 * checks are bypassed if @host_initiated is %true. 1841 * Returns 0 on success, non-0 otherwise. 1842 * Assumes vcpu_load() was already called. 1843 */ 1844 int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, 1845 bool host_initiated) 1846 { 1847 struct msr_data msr; 1848 int ret; 1849 1850 switch (index) { 1851 case MSR_TSC_AUX: 1852 if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX)) 1853 return 1; 1854 1855 if (!host_initiated && 1856 !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) && 1857 !guest_cpuid_has(vcpu, X86_FEATURE_RDPID)) 1858 return 1; 1859 break; 1860 } 1861 1862 msr.index = index; 1863 msr.host_initiated = host_initiated; 1864 1865 ret = static_call(kvm_x86_get_msr)(vcpu, &msr); 1866 if (!ret) 1867 *data = msr.data; 1868 return ret; 1869 } 1870 1871 static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu, 1872 u32 index, u64 *data, bool host_initiated) 1873 { 1874 int ret = __kvm_get_msr(vcpu, index, data, host_initiated); 1875 1876 if (ret == KVM_MSR_RET_INVALID) { 1877 /* Unconditionally clear *data for simplicity */ 1878 *data = 0; 1879 if (kvm_msr_ignored_check(index, 0, false)) 1880 ret = 0; 1881 } 1882 1883 return ret; 1884 } 1885 1886 static int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data) 1887 { 1888 if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_READ)) 1889 return KVM_MSR_RET_FILTERED; 1890 return kvm_get_msr_ignored_check(vcpu, index, data, false); 1891 } 1892 1893 static int kvm_set_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 data) 1894 { 1895 if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_WRITE)) 1896 return KVM_MSR_RET_FILTERED; 1897 return kvm_set_msr_ignored_check(vcpu, index, data, false); 1898 } 1899 1900 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data) 1901 { 1902 return kvm_get_msr_ignored_check(vcpu, index, data, false); 1903 } 1904 EXPORT_SYMBOL_GPL(kvm_get_msr); 1905 1906 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data) 1907 { 1908 return kvm_set_msr_ignored_check(vcpu, index, data, false); 1909 } 1910 EXPORT_SYMBOL_GPL(kvm_set_msr); 1911 1912 static void complete_userspace_rdmsr(struct kvm_vcpu *vcpu) 1913 { 1914 if (!vcpu->run->msr.error) { 1915 kvm_rax_write(vcpu, (u32)vcpu->run->msr.data); 1916 kvm_rdx_write(vcpu, vcpu->run->msr.data >> 32); 1917 } 1918 } 1919 1920 static int complete_emulated_msr_access(struct kvm_vcpu *vcpu) 1921 { 1922 return complete_emulated_insn_gp(vcpu, vcpu->run->msr.error); 1923 } 1924 1925 static int complete_emulated_rdmsr(struct kvm_vcpu *vcpu) 1926 { 1927 complete_userspace_rdmsr(vcpu); 1928 return complete_emulated_msr_access(vcpu); 1929 } 1930 1931 static int complete_fast_msr_access(struct kvm_vcpu *vcpu) 1932 { 1933 return static_call(kvm_x86_complete_emulated_msr)(vcpu, vcpu->run->msr.error); 1934 } 1935 1936 static int complete_fast_rdmsr(struct kvm_vcpu *vcpu) 1937 { 1938 complete_userspace_rdmsr(vcpu); 1939 return complete_fast_msr_access(vcpu); 1940 } 1941 1942 static u64 kvm_msr_reason(int r) 1943 { 1944 switch (r) { 1945 case KVM_MSR_RET_INVALID: 1946 return KVM_MSR_EXIT_REASON_UNKNOWN; 1947 case KVM_MSR_RET_FILTERED: 1948 return KVM_MSR_EXIT_REASON_FILTER; 1949 default: 1950 return KVM_MSR_EXIT_REASON_INVAL; 1951 } 1952 } 1953 1954 static int kvm_msr_user_space(struct kvm_vcpu *vcpu, u32 index, 1955 u32 exit_reason, u64 data, 1956 int (*completion)(struct kvm_vcpu *vcpu), 1957 int r) 1958 { 1959 u64 msr_reason = kvm_msr_reason(r); 1960 1961 /* Check if the user wanted to know about this MSR fault */ 1962 if (!(vcpu->kvm->arch.user_space_msr_mask & msr_reason)) 1963 return 0; 1964 1965 vcpu->run->exit_reason = exit_reason; 1966 vcpu->run->msr.error = 0; 1967 memset(vcpu->run->msr.pad, 0, sizeof(vcpu->run->msr.pad)); 1968 vcpu->run->msr.reason = msr_reason; 1969 vcpu->run->msr.index = index; 1970 vcpu->run->msr.data = data; 1971 vcpu->arch.complete_userspace_io = completion; 1972 1973 return 1; 1974 } 1975 1976 int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu) 1977 { 1978 u32 ecx = kvm_rcx_read(vcpu); 1979 u64 data; 1980 int r; 1981 1982 r = kvm_get_msr_with_filter(vcpu, ecx, &data); 1983 1984 if (!r) { 1985 trace_kvm_msr_read(ecx, data); 1986 1987 kvm_rax_write(vcpu, data & -1u); 1988 kvm_rdx_write(vcpu, (data >> 32) & -1u); 1989 } else { 1990 /* MSR read failed? See if we should ask user space */ 1991 if (kvm_msr_user_space(vcpu, ecx, KVM_EXIT_X86_RDMSR, 0, 1992 complete_fast_rdmsr, r)) 1993 return 0; 1994 trace_kvm_msr_read_ex(ecx); 1995 } 1996 1997 return static_call(kvm_x86_complete_emulated_msr)(vcpu, r); 1998 } 1999 EXPORT_SYMBOL_GPL(kvm_emulate_rdmsr); 2000 2001 int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu) 2002 { 2003 u32 ecx = kvm_rcx_read(vcpu); 2004 u64 data = kvm_read_edx_eax(vcpu); 2005 int r; 2006 2007 r = kvm_set_msr_with_filter(vcpu, ecx, data); 2008 2009 if (!r) { 2010 trace_kvm_msr_write(ecx, data); 2011 } else { 2012 /* MSR write failed? See if we should ask user space */ 2013 if (kvm_msr_user_space(vcpu, ecx, KVM_EXIT_X86_WRMSR, data, 2014 complete_fast_msr_access, r)) 2015 return 0; 2016 /* Signal all other negative errors to userspace */ 2017 if (r < 0) 2018 return r; 2019 trace_kvm_msr_write_ex(ecx, data); 2020 } 2021 2022 return static_call(kvm_x86_complete_emulated_msr)(vcpu, r); 2023 } 2024 EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr); 2025 2026 int kvm_emulate_as_nop(struct kvm_vcpu *vcpu) 2027 { 2028 return kvm_skip_emulated_instruction(vcpu); 2029 } 2030 EXPORT_SYMBOL_GPL(kvm_emulate_as_nop); 2031 2032 int kvm_emulate_invd(struct kvm_vcpu *vcpu) 2033 { 2034 /* Treat an INVD instruction as a NOP and just skip it. */ 2035 return kvm_emulate_as_nop(vcpu); 2036 } 2037 EXPORT_SYMBOL_GPL(kvm_emulate_invd); 2038 2039 int kvm_handle_invalid_op(struct kvm_vcpu *vcpu) 2040 { 2041 kvm_queue_exception(vcpu, UD_VECTOR); 2042 return 1; 2043 } 2044 EXPORT_SYMBOL_GPL(kvm_handle_invalid_op); 2045 2046 2047 static int kvm_emulate_monitor_mwait(struct kvm_vcpu *vcpu, const char *insn) 2048 { 2049 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MWAIT_NEVER_FAULTS) && 2050 !guest_cpuid_has(vcpu, X86_FEATURE_MWAIT)) 2051 return kvm_handle_invalid_op(vcpu); 2052 2053 pr_warn_once("kvm: %s instruction emulated as NOP!\n", insn); 2054 return kvm_emulate_as_nop(vcpu); 2055 } 2056 int kvm_emulate_mwait(struct kvm_vcpu *vcpu) 2057 { 2058 return kvm_emulate_monitor_mwait(vcpu, "MWAIT"); 2059 } 2060 EXPORT_SYMBOL_GPL(kvm_emulate_mwait); 2061 2062 int kvm_emulate_monitor(struct kvm_vcpu *vcpu) 2063 { 2064 return kvm_emulate_monitor_mwait(vcpu, "MONITOR"); 2065 } 2066 EXPORT_SYMBOL_GPL(kvm_emulate_monitor); 2067 2068 static inline bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu) 2069 { 2070 xfer_to_guest_mode_prepare(); 2071 return vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu) || 2072 xfer_to_guest_mode_work_pending(); 2073 } 2074 2075 /* 2076 * The fast path for frequent and performance sensitive wrmsr emulation, 2077 * i.e. the sending of IPI, sending IPI early in the VM-Exit flow reduces 2078 * the latency of virtual IPI by avoiding the expensive bits of transitioning 2079 * from guest to host, e.g. reacquiring KVM's SRCU lock. In contrast to the 2080 * other cases which must be called after interrupts are enabled on the host. 2081 */ 2082 static int handle_fastpath_set_x2apic_icr_irqoff(struct kvm_vcpu *vcpu, u64 data) 2083 { 2084 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(vcpu->arch.apic)) 2085 return 1; 2086 2087 if (((data & APIC_SHORT_MASK) == APIC_DEST_NOSHORT) && 2088 ((data & APIC_DEST_MASK) == APIC_DEST_PHYSICAL) && 2089 ((data & APIC_MODE_MASK) == APIC_DM_FIXED) && 2090 ((u32)(data >> 32) != X2APIC_BROADCAST)) 2091 return kvm_x2apic_icr_write(vcpu->arch.apic, data); 2092 2093 return 1; 2094 } 2095 2096 static int handle_fastpath_set_tscdeadline(struct kvm_vcpu *vcpu, u64 data) 2097 { 2098 if (!kvm_can_use_hv_timer(vcpu)) 2099 return 1; 2100 2101 kvm_set_lapic_tscdeadline_msr(vcpu, data); 2102 return 0; 2103 } 2104 2105 fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu) 2106 { 2107 u32 msr = kvm_rcx_read(vcpu); 2108 u64 data; 2109 fastpath_t ret = EXIT_FASTPATH_NONE; 2110 2111 switch (msr) { 2112 case APIC_BASE_MSR + (APIC_ICR >> 4): 2113 data = kvm_read_edx_eax(vcpu); 2114 if (!handle_fastpath_set_x2apic_icr_irqoff(vcpu, data)) { 2115 kvm_skip_emulated_instruction(vcpu); 2116 ret = EXIT_FASTPATH_EXIT_HANDLED; 2117 } 2118 break; 2119 case MSR_IA32_TSC_DEADLINE: 2120 data = kvm_read_edx_eax(vcpu); 2121 if (!handle_fastpath_set_tscdeadline(vcpu, data)) { 2122 kvm_skip_emulated_instruction(vcpu); 2123 ret = EXIT_FASTPATH_REENTER_GUEST; 2124 } 2125 break; 2126 default: 2127 break; 2128 } 2129 2130 if (ret != EXIT_FASTPATH_NONE) 2131 trace_kvm_msr_write(msr, data); 2132 2133 return ret; 2134 } 2135 EXPORT_SYMBOL_GPL(handle_fastpath_set_msr_irqoff); 2136 2137 /* 2138 * Adapt set_msr() to msr_io()'s calling convention 2139 */ 2140 static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) 2141 { 2142 return kvm_get_msr_ignored_check(vcpu, index, data, true); 2143 } 2144 2145 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) 2146 { 2147 return kvm_set_msr_ignored_check(vcpu, index, *data, true); 2148 } 2149 2150 #ifdef CONFIG_X86_64 2151 struct pvclock_clock { 2152 int vclock_mode; 2153 u64 cycle_last; 2154 u64 mask; 2155 u32 mult; 2156 u32 shift; 2157 u64 base_cycles; 2158 u64 offset; 2159 }; 2160 2161 struct pvclock_gtod_data { 2162 seqcount_t seq; 2163 2164 struct pvclock_clock clock; /* extract of a clocksource struct */ 2165 struct pvclock_clock raw_clock; /* extract of a clocksource struct */ 2166 2167 ktime_t offs_boot; 2168 u64 wall_time_sec; 2169 }; 2170 2171 static struct pvclock_gtod_data pvclock_gtod_data; 2172 2173 static void update_pvclock_gtod(struct timekeeper *tk) 2174 { 2175 struct pvclock_gtod_data *vdata = &pvclock_gtod_data; 2176 2177 write_seqcount_begin(&vdata->seq); 2178 2179 /* copy pvclock gtod data */ 2180 vdata->clock.vclock_mode = tk->tkr_mono.clock->vdso_clock_mode; 2181 vdata->clock.cycle_last = tk->tkr_mono.cycle_last; 2182 vdata->clock.mask = tk->tkr_mono.mask; 2183 vdata->clock.mult = tk->tkr_mono.mult; 2184 vdata->clock.shift = tk->tkr_mono.shift; 2185 vdata->clock.base_cycles = tk->tkr_mono.xtime_nsec; 2186 vdata->clock.offset = tk->tkr_mono.base; 2187 2188 vdata->raw_clock.vclock_mode = tk->tkr_raw.clock->vdso_clock_mode; 2189 vdata->raw_clock.cycle_last = tk->tkr_raw.cycle_last; 2190 vdata->raw_clock.mask = tk->tkr_raw.mask; 2191 vdata->raw_clock.mult = tk->tkr_raw.mult; 2192 vdata->raw_clock.shift = tk->tkr_raw.shift; 2193 vdata->raw_clock.base_cycles = tk->tkr_raw.xtime_nsec; 2194 vdata->raw_clock.offset = tk->tkr_raw.base; 2195 2196 vdata->wall_time_sec = tk->xtime_sec; 2197 2198 vdata->offs_boot = tk->offs_boot; 2199 2200 write_seqcount_end(&vdata->seq); 2201 } 2202 2203 static s64 get_kvmclock_base_ns(void) 2204 { 2205 /* Count up from boot time, but with the frequency of the raw clock. */ 2206 return ktime_to_ns(ktime_add(ktime_get_raw(), pvclock_gtod_data.offs_boot)); 2207 } 2208 #else 2209 static s64 get_kvmclock_base_ns(void) 2210 { 2211 /* Master clock not used, so we can just use CLOCK_BOOTTIME. */ 2212 return ktime_get_boottime_ns(); 2213 } 2214 #endif 2215 2216 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock, int sec_hi_ofs) 2217 { 2218 int version; 2219 int r; 2220 struct pvclock_wall_clock wc; 2221 u32 wc_sec_hi; 2222 u64 wall_nsec; 2223 2224 if (!wall_clock) 2225 return; 2226 2227 r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version)); 2228 if (r) 2229 return; 2230 2231 if (version & 1) 2232 ++version; /* first time write, random junk */ 2233 2234 ++version; 2235 2236 if (kvm_write_guest(kvm, wall_clock, &version, sizeof(version))) 2237 return; 2238 2239 /* 2240 * The guest calculates current wall clock time by adding 2241 * system time (updated by kvm_guest_time_update below) to the 2242 * wall clock specified here. We do the reverse here. 2243 */ 2244 wall_nsec = ktime_get_real_ns() - get_kvmclock_ns(kvm); 2245 2246 wc.nsec = do_div(wall_nsec, 1000000000); 2247 wc.sec = (u32)wall_nsec; /* overflow in 2106 guest time */ 2248 wc.version = version; 2249 2250 kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc)); 2251 2252 if (sec_hi_ofs) { 2253 wc_sec_hi = wall_nsec >> 32; 2254 kvm_write_guest(kvm, wall_clock + sec_hi_ofs, 2255 &wc_sec_hi, sizeof(wc_sec_hi)); 2256 } 2257 2258 version++; 2259 kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); 2260 } 2261 2262 static void kvm_write_system_time(struct kvm_vcpu *vcpu, gpa_t system_time, 2263 bool old_msr, bool host_initiated) 2264 { 2265 struct kvm_arch *ka = &vcpu->kvm->arch; 2266 2267 if (vcpu->vcpu_id == 0 && !host_initiated) { 2268 if (ka->boot_vcpu_runs_old_kvmclock != old_msr) 2269 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); 2270 2271 ka->boot_vcpu_runs_old_kvmclock = old_msr; 2272 } 2273 2274 vcpu->arch.time = system_time; 2275 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu); 2276 2277 /* we verify if the enable bit is set... */ 2278 if (system_time & 1) { 2279 kvm_gfn_to_pfn_cache_init(vcpu->kvm, &vcpu->arch.pv_time, vcpu, 2280 KVM_HOST_USES_PFN, system_time & ~1ULL, 2281 sizeof(struct pvclock_vcpu_time_info)); 2282 } else { 2283 kvm_gfn_to_pfn_cache_destroy(vcpu->kvm, &vcpu->arch.pv_time); 2284 } 2285 2286 return; 2287 } 2288 2289 static uint32_t div_frac(uint32_t dividend, uint32_t divisor) 2290 { 2291 do_shl32_div32(dividend, divisor); 2292 return dividend; 2293 } 2294 2295 static void kvm_get_time_scale(uint64_t scaled_hz, uint64_t base_hz, 2296 s8 *pshift, u32 *pmultiplier) 2297 { 2298 uint64_t scaled64; 2299 int32_t shift = 0; 2300 uint64_t tps64; 2301 uint32_t tps32; 2302 2303 tps64 = base_hz; 2304 scaled64 = scaled_hz; 2305 while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) { 2306 tps64 >>= 1; 2307 shift--; 2308 } 2309 2310 tps32 = (uint32_t)tps64; 2311 while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) { 2312 if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000) 2313 scaled64 >>= 1; 2314 else 2315 tps32 <<= 1; 2316 shift++; 2317 } 2318 2319 *pshift = shift; 2320 *pmultiplier = div_frac(scaled64, tps32); 2321 } 2322 2323 #ifdef CONFIG_X86_64 2324 static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0); 2325 #endif 2326 2327 static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz); 2328 static unsigned long max_tsc_khz; 2329 2330 static u32 adjust_tsc_khz(u32 khz, s32 ppm) 2331 { 2332 u64 v = (u64)khz * (1000000 + ppm); 2333 do_div(v, 1000000); 2334 return v; 2335 } 2336 2337 static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier); 2338 2339 static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale) 2340 { 2341 u64 ratio; 2342 2343 /* Guest TSC same frequency as host TSC? */ 2344 if (!scale) { 2345 kvm_vcpu_write_tsc_multiplier(vcpu, kvm_caps.default_tsc_scaling_ratio); 2346 return 0; 2347 } 2348 2349 /* TSC scaling supported? */ 2350 if (!kvm_caps.has_tsc_control) { 2351 if (user_tsc_khz > tsc_khz) { 2352 vcpu->arch.tsc_catchup = 1; 2353 vcpu->arch.tsc_always_catchup = 1; 2354 return 0; 2355 } else { 2356 pr_warn_ratelimited("user requested TSC rate below hardware speed\n"); 2357 return -1; 2358 } 2359 } 2360 2361 /* TSC scaling required - calculate ratio */ 2362 ratio = mul_u64_u32_div(1ULL << kvm_caps.tsc_scaling_ratio_frac_bits, 2363 user_tsc_khz, tsc_khz); 2364 2365 if (ratio == 0 || ratio >= kvm_caps.max_tsc_scaling_ratio) { 2366 pr_warn_ratelimited("Invalid TSC scaling ratio - virtual-tsc-khz=%u\n", 2367 user_tsc_khz); 2368 return -1; 2369 } 2370 2371 kvm_vcpu_write_tsc_multiplier(vcpu, ratio); 2372 return 0; 2373 } 2374 2375 static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz) 2376 { 2377 u32 thresh_lo, thresh_hi; 2378 int use_scaling = 0; 2379 2380 /* tsc_khz can be zero if TSC calibration fails */ 2381 if (user_tsc_khz == 0) { 2382 /* set tsc_scaling_ratio to a safe value */ 2383 kvm_vcpu_write_tsc_multiplier(vcpu, kvm_caps.default_tsc_scaling_ratio); 2384 return -1; 2385 } 2386 2387 /* Compute a scale to convert nanoseconds in TSC cycles */ 2388 kvm_get_time_scale(user_tsc_khz * 1000LL, NSEC_PER_SEC, 2389 &vcpu->arch.virtual_tsc_shift, 2390 &vcpu->arch.virtual_tsc_mult); 2391 vcpu->arch.virtual_tsc_khz = user_tsc_khz; 2392 2393 /* 2394 * Compute the variation in TSC rate which is acceptable 2395 * within the range of tolerance and decide if the 2396 * rate being applied is within that bounds of the hardware 2397 * rate. If so, no scaling or compensation need be done. 2398 */ 2399 thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm); 2400 thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm); 2401 if (user_tsc_khz < thresh_lo || user_tsc_khz > thresh_hi) { 2402 pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", user_tsc_khz, thresh_lo, thresh_hi); 2403 use_scaling = 1; 2404 } 2405 return set_tsc_khz(vcpu, user_tsc_khz, use_scaling); 2406 } 2407 2408 static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns) 2409 { 2410 u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec, 2411 vcpu->arch.virtual_tsc_mult, 2412 vcpu->arch.virtual_tsc_shift); 2413 tsc += vcpu->arch.this_tsc_write; 2414 return tsc; 2415 } 2416 2417 #ifdef CONFIG_X86_64 2418 static inline int gtod_is_based_on_tsc(int mode) 2419 { 2420 return mode == VDSO_CLOCKMODE_TSC || mode == VDSO_CLOCKMODE_HVCLOCK; 2421 } 2422 #endif 2423 2424 static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu) 2425 { 2426 #ifdef CONFIG_X86_64 2427 bool vcpus_matched; 2428 struct kvm_arch *ka = &vcpu->kvm->arch; 2429 struct pvclock_gtod_data *gtod = &pvclock_gtod_data; 2430 2431 vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == 2432 atomic_read(&vcpu->kvm->online_vcpus)); 2433 2434 /* 2435 * Once the masterclock is enabled, always perform request in 2436 * order to update it. 2437 * 2438 * In order to enable masterclock, the host clocksource must be TSC 2439 * and the vcpus need to have matched TSCs. When that happens, 2440 * perform request to enable masterclock. 2441 */ 2442 if (ka->use_master_clock || 2443 (gtod_is_based_on_tsc(gtod->clock.vclock_mode) && vcpus_matched)) 2444 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); 2445 2446 trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc, 2447 atomic_read(&vcpu->kvm->online_vcpus), 2448 ka->use_master_clock, gtod->clock.vclock_mode); 2449 #endif 2450 } 2451 2452 /* 2453 * Multiply tsc by a fixed point number represented by ratio. 2454 * 2455 * The most significant 64-N bits (mult) of ratio represent the 2456 * integral part of the fixed point number; the remaining N bits 2457 * (frac) represent the fractional part, ie. ratio represents a fixed 2458 * point number (mult + frac * 2^(-N)). 2459 * 2460 * N equals to kvm_caps.tsc_scaling_ratio_frac_bits. 2461 */ 2462 static inline u64 __scale_tsc(u64 ratio, u64 tsc) 2463 { 2464 return mul_u64_u64_shr(tsc, ratio, kvm_caps.tsc_scaling_ratio_frac_bits); 2465 } 2466 2467 u64 kvm_scale_tsc(u64 tsc, u64 ratio) 2468 { 2469 u64 _tsc = tsc; 2470 2471 if (ratio != kvm_caps.default_tsc_scaling_ratio) 2472 _tsc = __scale_tsc(ratio, tsc); 2473 2474 return _tsc; 2475 } 2476 EXPORT_SYMBOL_GPL(kvm_scale_tsc); 2477 2478 static u64 kvm_compute_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) 2479 { 2480 u64 tsc; 2481 2482 tsc = kvm_scale_tsc(rdtsc(), vcpu->arch.l1_tsc_scaling_ratio); 2483 2484 return target_tsc - tsc; 2485 } 2486 2487 u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc) 2488 { 2489 return vcpu->arch.l1_tsc_offset + 2490 kvm_scale_tsc(host_tsc, vcpu->arch.l1_tsc_scaling_ratio); 2491 } 2492 EXPORT_SYMBOL_GPL(kvm_read_l1_tsc); 2493 2494 u64 kvm_calc_nested_tsc_offset(u64 l1_offset, u64 l2_offset, u64 l2_multiplier) 2495 { 2496 u64 nested_offset; 2497 2498 if (l2_multiplier == kvm_caps.default_tsc_scaling_ratio) 2499 nested_offset = l1_offset; 2500 else 2501 nested_offset = mul_s64_u64_shr((s64) l1_offset, l2_multiplier, 2502 kvm_caps.tsc_scaling_ratio_frac_bits); 2503 2504 nested_offset += l2_offset; 2505 return nested_offset; 2506 } 2507 EXPORT_SYMBOL_GPL(kvm_calc_nested_tsc_offset); 2508 2509 u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier) 2510 { 2511 if (l2_multiplier != kvm_caps.default_tsc_scaling_ratio) 2512 return mul_u64_u64_shr(l1_multiplier, l2_multiplier, 2513 kvm_caps.tsc_scaling_ratio_frac_bits); 2514 2515 return l1_multiplier; 2516 } 2517 EXPORT_SYMBOL_GPL(kvm_calc_nested_tsc_multiplier); 2518 2519 static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 l1_offset) 2520 { 2521 trace_kvm_write_tsc_offset(vcpu->vcpu_id, 2522 vcpu->arch.l1_tsc_offset, 2523 l1_offset); 2524 2525 vcpu->arch.l1_tsc_offset = l1_offset; 2526 2527 /* 2528 * If we are here because L1 chose not to trap WRMSR to TSC then 2529 * according to the spec this should set L1's TSC (as opposed to 2530 * setting L1's offset for L2). 2531 */ 2532 if (is_guest_mode(vcpu)) 2533 vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset( 2534 l1_offset, 2535 static_call(kvm_x86_get_l2_tsc_offset)(vcpu), 2536 static_call(kvm_x86_get_l2_tsc_multiplier)(vcpu)); 2537 else 2538 vcpu->arch.tsc_offset = l1_offset; 2539 2540 static_call(kvm_x86_write_tsc_offset)(vcpu, vcpu->arch.tsc_offset); 2541 } 2542 2543 static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier) 2544 { 2545 vcpu->arch.l1_tsc_scaling_ratio = l1_multiplier; 2546 2547 /* Userspace is changing the multiplier while L2 is active */ 2548 if (is_guest_mode(vcpu)) 2549 vcpu->arch.tsc_scaling_ratio = kvm_calc_nested_tsc_multiplier( 2550 l1_multiplier, 2551 static_call(kvm_x86_get_l2_tsc_multiplier)(vcpu)); 2552 else 2553 vcpu->arch.tsc_scaling_ratio = l1_multiplier; 2554 2555 if (kvm_caps.has_tsc_control) 2556 static_call(kvm_x86_write_tsc_multiplier)( 2557 vcpu, vcpu->arch.tsc_scaling_ratio); 2558 } 2559 2560 static inline bool kvm_check_tsc_unstable(void) 2561 { 2562 #ifdef CONFIG_X86_64 2563 /* 2564 * TSC is marked unstable when we're running on Hyper-V, 2565 * 'TSC page' clocksource is good. 2566 */ 2567 if (pvclock_gtod_data.clock.vclock_mode == VDSO_CLOCKMODE_HVCLOCK) 2568 return false; 2569 #endif 2570 return check_tsc_unstable(); 2571 } 2572 2573 /* 2574 * Infers attempts to synchronize the guest's tsc from host writes. Sets the 2575 * offset for the vcpu and tracks the TSC matching generation that the vcpu 2576 * participates in. 2577 */ 2578 static void __kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 offset, u64 tsc, 2579 u64 ns, bool matched) 2580 { 2581 struct kvm *kvm = vcpu->kvm; 2582 2583 lockdep_assert_held(&kvm->arch.tsc_write_lock); 2584 2585 /* 2586 * We also track th most recent recorded KHZ, write and time to 2587 * allow the matching interval to be extended at each write. 2588 */ 2589 kvm->arch.last_tsc_nsec = ns; 2590 kvm->arch.last_tsc_write = tsc; 2591 kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; 2592 kvm->arch.last_tsc_offset = offset; 2593 2594 vcpu->arch.last_guest_tsc = tsc; 2595 2596 kvm_vcpu_write_tsc_offset(vcpu, offset); 2597 2598 if (!matched) { 2599 /* 2600 * We split periods of matched TSC writes into generations. 2601 * For each generation, we track the original measured 2602 * nanosecond time, offset, and write, so if TSCs are in 2603 * sync, we can match exact offset, and if not, we can match 2604 * exact software computation in compute_guest_tsc() 2605 * 2606 * These values are tracked in kvm->arch.cur_xxx variables. 2607 */ 2608 kvm->arch.cur_tsc_generation++; 2609 kvm->arch.cur_tsc_nsec = ns; 2610 kvm->arch.cur_tsc_write = tsc; 2611 kvm->arch.cur_tsc_offset = offset; 2612 kvm->arch.nr_vcpus_matched_tsc = 0; 2613 } else if (vcpu->arch.this_tsc_generation != kvm->arch.cur_tsc_generation) { 2614 kvm->arch.nr_vcpus_matched_tsc++; 2615 } 2616 2617 /* Keep track of which generation this VCPU has synchronized to */ 2618 vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation; 2619 vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; 2620 vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; 2621 2622 kvm_track_tsc_matching(vcpu); 2623 } 2624 2625 static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data) 2626 { 2627 struct kvm *kvm = vcpu->kvm; 2628 u64 offset, ns, elapsed; 2629 unsigned long flags; 2630 bool matched = false; 2631 bool synchronizing = false; 2632 2633 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); 2634 offset = kvm_compute_l1_tsc_offset(vcpu, data); 2635 ns = get_kvmclock_base_ns(); 2636 elapsed = ns - kvm->arch.last_tsc_nsec; 2637 2638 if (vcpu->arch.virtual_tsc_khz) { 2639 if (data == 0) { 2640 /* 2641 * detection of vcpu initialization -- need to sync 2642 * with other vCPUs. This particularly helps to keep 2643 * kvm_clock stable after CPU hotplug 2644 */ 2645 synchronizing = true; 2646 } else { 2647 u64 tsc_exp = kvm->arch.last_tsc_write + 2648 nsec_to_cycles(vcpu, elapsed); 2649 u64 tsc_hz = vcpu->arch.virtual_tsc_khz * 1000LL; 2650 /* 2651 * Special case: TSC write with a small delta (1 second) 2652 * of virtual cycle time against real time is 2653 * interpreted as an attempt to synchronize the CPU. 2654 */ 2655 synchronizing = data < tsc_exp + tsc_hz && 2656 data + tsc_hz > tsc_exp; 2657 } 2658 } 2659 2660 /* 2661 * For a reliable TSC, we can match TSC offsets, and for an unstable 2662 * TSC, we add elapsed time in this computation. We could let the 2663 * compensation code attempt to catch up if we fall behind, but 2664 * it's better to try to match offsets from the beginning. 2665 */ 2666 if (synchronizing && 2667 vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) { 2668 if (!kvm_check_tsc_unstable()) { 2669 offset = kvm->arch.cur_tsc_offset; 2670 } else { 2671 u64 delta = nsec_to_cycles(vcpu, elapsed); 2672 data += delta; 2673 offset = kvm_compute_l1_tsc_offset(vcpu, data); 2674 } 2675 matched = true; 2676 } 2677 2678 __kvm_synchronize_tsc(vcpu, offset, data, ns, matched); 2679 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); 2680 } 2681 2682 static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, 2683 s64 adjustment) 2684 { 2685 u64 tsc_offset = vcpu->arch.l1_tsc_offset; 2686 kvm_vcpu_write_tsc_offset(vcpu, tsc_offset + adjustment); 2687 } 2688 2689 static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment) 2690 { 2691 if (vcpu->arch.l1_tsc_scaling_ratio != kvm_caps.default_tsc_scaling_ratio) 2692 WARN_ON(adjustment < 0); 2693 adjustment = kvm_scale_tsc((u64) adjustment, 2694 vcpu->arch.l1_tsc_scaling_ratio); 2695 adjust_tsc_offset_guest(vcpu, adjustment); 2696 } 2697 2698 #ifdef CONFIG_X86_64 2699 2700 static u64 read_tsc(void) 2701 { 2702 u64 ret = (u64)rdtsc_ordered(); 2703 u64 last = pvclock_gtod_data.clock.cycle_last; 2704 2705 if (likely(ret >= last)) 2706 return ret; 2707 2708 /* 2709 * GCC likes to generate cmov here, but this branch is extremely 2710 * predictable (it's just a function of time and the likely is 2711 * very likely) and there's a data dependence, so force GCC 2712 * to generate a branch instead. I don't barrier() because 2713 * we don't actually need a barrier, and if this function 2714 * ever gets inlined it will generate worse code. 2715 */ 2716 asm volatile (""); 2717 return last; 2718 } 2719 2720 static inline u64 vgettsc(struct pvclock_clock *clock, u64 *tsc_timestamp, 2721 int *mode) 2722 { 2723 long v; 2724 u64 tsc_pg_val; 2725 2726 switch (clock->vclock_mode) { 2727 case VDSO_CLOCKMODE_HVCLOCK: 2728 tsc_pg_val = hv_read_tsc_page_tsc(hv_get_tsc_page(), 2729 tsc_timestamp); 2730 if (tsc_pg_val != U64_MAX) { 2731 /* TSC page valid */ 2732 *mode = VDSO_CLOCKMODE_HVCLOCK; 2733 v = (tsc_pg_val - clock->cycle_last) & 2734 clock->mask; 2735 } else { 2736 /* TSC page invalid */ 2737 *mode = VDSO_CLOCKMODE_NONE; 2738 } 2739 break; 2740 case VDSO_CLOCKMODE_TSC: 2741 *mode = VDSO_CLOCKMODE_TSC; 2742 *tsc_timestamp = read_tsc(); 2743 v = (*tsc_timestamp - clock->cycle_last) & 2744 clock->mask; 2745 break; 2746 default: 2747 *mode = VDSO_CLOCKMODE_NONE; 2748 } 2749 2750 if (*mode == VDSO_CLOCKMODE_NONE) 2751 *tsc_timestamp = v = 0; 2752 2753 return v * clock->mult; 2754 } 2755 2756 static int do_monotonic_raw(s64 *t, u64 *tsc_timestamp) 2757 { 2758 struct pvclock_gtod_data *gtod = &pvclock_gtod_data; 2759 unsigned long seq; 2760 int mode; 2761 u64 ns; 2762 2763 do { 2764 seq = read_seqcount_begin(>od->seq); 2765 ns = gtod->raw_clock.base_cycles; 2766 ns += vgettsc(>od->raw_clock, tsc_timestamp, &mode); 2767 ns >>= gtod->raw_clock.shift; 2768 ns += ktime_to_ns(ktime_add(gtod->raw_clock.offset, gtod->offs_boot)); 2769 } while (unlikely(read_seqcount_retry(>od->seq, seq))); 2770 *t = ns; 2771 2772 return mode; 2773 } 2774 2775 static int do_realtime(struct timespec64 *ts, u64 *tsc_timestamp) 2776 { 2777 struct pvclock_gtod_data *gtod = &pvclock_gtod_data; 2778 unsigned long seq; 2779 int mode; 2780 u64 ns; 2781 2782 do { 2783 seq = read_seqcount_begin(>od->seq); 2784 ts->tv_sec = gtod->wall_time_sec; 2785 ns = gtod->clock.base_cycles; 2786 ns += vgettsc(>od->clock, tsc_timestamp, &mode); 2787 ns >>= gtod->clock.shift; 2788 } while (unlikely(read_seqcount_retry(>od->seq, seq))); 2789 2790 ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); 2791 ts->tv_nsec = ns; 2792 2793 return mode; 2794 } 2795 2796 /* returns true if host is using TSC based clocksource */ 2797 static bool kvm_get_time_and_clockread(s64 *kernel_ns, u64 *tsc_timestamp) 2798 { 2799 /* checked again under seqlock below */ 2800 if (!gtod_is_based_on_tsc(pvclock_gtod_data.clock.vclock_mode)) 2801 return false; 2802 2803 return gtod_is_based_on_tsc(do_monotonic_raw(kernel_ns, 2804 tsc_timestamp)); 2805 } 2806 2807 /* returns true if host is using TSC based clocksource */ 2808 static bool kvm_get_walltime_and_clockread(struct timespec64 *ts, 2809 u64 *tsc_timestamp) 2810 { 2811 /* checked again under seqlock below */ 2812 if (!gtod_is_based_on_tsc(pvclock_gtod_data.clock.vclock_mode)) 2813 return false; 2814 2815 return gtod_is_based_on_tsc(do_realtime(ts, tsc_timestamp)); 2816 } 2817 #endif 2818 2819 /* 2820 * 2821 * Assuming a stable TSC across physical CPUS, and a stable TSC 2822 * across virtual CPUs, the following condition is possible. 2823 * Each numbered line represents an event visible to both 2824 * CPUs at the next numbered event. 2825 * 2826 * "timespecX" represents host monotonic time. "tscX" represents 2827 * RDTSC value. 2828 * 2829 * VCPU0 on CPU0 | VCPU1 on CPU1 2830 * 2831 * 1. read timespec0,tsc0 2832 * 2. | timespec1 = timespec0 + N 2833 * | tsc1 = tsc0 + M 2834 * 3. transition to guest | transition to guest 2835 * 4. ret0 = timespec0 + (rdtsc - tsc0) | 2836 * 5. | ret1 = timespec1 + (rdtsc - tsc1) 2837 * | ret1 = timespec0 + N + (rdtsc - (tsc0 + M)) 2838 * 2839 * Since ret0 update is visible to VCPU1 at time 5, to obey monotonicity: 2840 * 2841 * - ret0 < ret1 2842 * - timespec0 + (rdtsc - tsc0) < timespec0 + N + (rdtsc - (tsc0 + M)) 2843 * ... 2844 * - 0 < N - M => M < N 2845 * 2846 * That is, when timespec0 != timespec1, M < N. Unfortunately that is not 2847 * always the case (the difference between two distinct xtime instances 2848 * might be smaller then the difference between corresponding TSC reads, 2849 * when updating guest vcpus pvclock areas). 2850 * 2851 * To avoid that problem, do not allow visibility of distinct 2852 * system_timestamp/tsc_timestamp values simultaneously: use a master 2853 * copy of host monotonic time values. Update that master copy 2854 * in lockstep. 2855 * 2856 * Rely on synchronization of host TSCs and guest TSCs for monotonicity. 2857 * 2858 */ 2859 2860 static void pvclock_update_vm_gtod_copy(struct kvm *kvm) 2861 { 2862 #ifdef CONFIG_X86_64 2863 struct kvm_arch *ka = &kvm->arch; 2864 int vclock_mode; 2865 bool host_tsc_clocksource, vcpus_matched; 2866 2867 lockdep_assert_held(&kvm->arch.tsc_write_lock); 2868 vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == 2869 atomic_read(&kvm->online_vcpus)); 2870 2871 /* 2872 * If the host uses TSC clock, then passthrough TSC as stable 2873 * to the guest. 2874 */ 2875 host_tsc_clocksource = kvm_get_time_and_clockread( 2876 &ka->master_kernel_ns, 2877 &ka->master_cycle_now); 2878 2879 ka->use_master_clock = host_tsc_clocksource && vcpus_matched 2880 && !ka->backwards_tsc_observed 2881 && !ka->boot_vcpu_runs_old_kvmclock; 2882 2883 if (ka->use_master_clock) 2884 atomic_set(&kvm_guest_has_master_clock, 1); 2885 2886 vclock_mode = pvclock_gtod_data.clock.vclock_mode; 2887 trace_kvm_update_master_clock(ka->use_master_clock, vclock_mode, 2888 vcpus_matched); 2889 #endif 2890 } 2891 2892 static void kvm_make_mclock_inprogress_request(struct kvm *kvm) 2893 { 2894 kvm_make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS); 2895 } 2896 2897 static void __kvm_start_pvclock_update(struct kvm *kvm) 2898 { 2899 raw_spin_lock_irq(&kvm->arch.tsc_write_lock); 2900 write_seqcount_begin(&kvm->arch.pvclock_sc); 2901 } 2902 2903 static void kvm_start_pvclock_update(struct kvm *kvm) 2904 { 2905 kvm_make_mclock_inprogress_request(kvm); 2906 2907 /* no guest entries from this point */ 2908 __kvm_start_pvclock_update(kvm); 2909 } 2910 2911 static void kvm_end_pvclock_update(struct kvm *kvm) 2912 { 2913 struct kvm_arch *ka = &kvm->arch; 2914 struct kvm_vcpu *vcpu; 2915 unsigned long i; 2916 2917 write_seqcount_end(&ka->pvclock_sc); 2918 raw_spin_unlock_irq(&ka->tsc_write_lock); 2919 kvm_for_each_vcpu(i, vcpu, kvm) 2920 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 2921 2922 /* guest entries allowed */ 2923 kvm_for_each_vcpu(i, vcpu, kvm) 2924 kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu); 2925 } 2926 2927 static void kvm_update_masterclock(struct kvm *kvm) 2928 { 2929 kvm_hv_request_tsc_page_update(kvm); 2930 kvm_start_pvclock_update(kvm); 2931 pvclock_update_vm_gtod_copy(kvm); 2932 kvm_end_pvclock_update(kvm); 2933 } 2934 2935 /* Called within read_seqcount_begin/retry for kvm->pvclock_sc. */ 2936 static void __get_kvmclock(struct kvm *kvm, struct kvm_clock_data *data) 2937 { 2938 struct kvm_arch *ka = &kvm->arch; 2939 struct pvclock_vcpu_time_info hv_clock; 2940 2941 /* both __this_cpu_read() and rdtsc() should be on the same cpu */ 2942 get_cpu(); 2943 2944 data->flags = 0; 2945 if (ka->use_master_clock && __this_cpu_read(cpu_tsc_khz)) { 2946 #ifdef CONFIG_X86_64 2947 struct timespec64 ts; 2948 2949 if (kvm_get_walltime_and_clockread(&ts, &data->host_tsc)) { 2950 data->realtime = ts.tv_nsec + NSEC_PER_SEC * ts.tv_sec; 2951 data->flags |= KVM_CLOCK_REALTIME | KVM_CLOCK_HOST_TSC; 2952 } else 2953 #endif 2954 data->host_tsc = rdtsc(); 2955 2956 data->flags |= KVM_CLOCK_TSC_STABLE; 2957 hv_clock.tsc_timestamp = ka->master_cycle_now; 2958 hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset; 2959 kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL, 2960 &hv_clock.tsc_shift, 2961 &hv_clock.tsc_to_system_mul); 2962 data->clock = __pvclock_read_cycles(&hv_clock, data->host_tsc); 2963 } else { 2964 data->clock = get_kvmclock_base_ns() + ka->kvmclock_offset; 2965 } 2966 2967 put_cpu(); 2968 } 2969 2970 static void get_kvmclock(struct kvm *kvm, struct kvm_clock_data *data) 2971 { 2972 struct kvm_arch *ka = &kvm->arch; 2973 unsigned seq; 2974 2975 do { 2976 seq = read_seqcount_begin(&ka->pvclock_sc); 2977 __get_kvmclock(kvm, data); 2978 } while (read_seqcount_retry(&ka->pvclock_sc, seq)); 2979 } 2980 2981 u64 get_kvmclock_ns(struct kvm *kvm) 2982 { 2983 struct kvm_clock_data data; 2984 2985 get_kvmclock(kvm, &data); 2986 return data.clock; 2987 } 2988 2989 static void kvm_setup_guest_pvclock(struct kvm_vcpu *v, 2990 struct gfn_to_pfn_cache *gpc, 2991 unsigned int offset) 2992 { 2993 struct kvm_vcpu_arch *vcpu = &v->arch; 2994 struct pvclock_vcpu_time_info *guest_hv_clock; 2995 unsigned long flags; 2996 2997 read_lock_irqsave(&gpc->lock, flags); 2998 while (!kvm_gfn_to_pfn_cache_check(v->kvm, gpc, gpc->gpa, 2999 offset + sizeof(*guest_hv_clock))) { 3000 read_unlock_irqrestore(&gpc->lock, flags); 3001 3002 if (kvm_gfn_to_pfn_cache_refresh(v->kvm, gpc, gpc->gpa, 3003 offset + sizeof(*guest_hv_clock))) 3004 return; 3005 3006 read_lock_irqsave(&gpc->lock, flags); 3007 } 3008 3009 guest_hv_clock = (void *)(gpc->khva + offset); 3010 3011 /* 3012 * This VCPU is paused, but it's legal for a guest to read another 3013 * VCPU's kvmclock, so we really have to follow the specification where 3014 * it says that version is odd if data is being modified, and even after 3015 * it is consistent. 3016 */ 3017 3018 guest_hv_clock->version = vcpu->hv_clock.version = (guest_hv_clock->version + 1) | 1; 3019 smp_wmb(); 3020 3021 /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */ 3022 vcpu->hv_clock.flags |= (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED); 3023 3024 if (vcpu->pvclock_set_guest_stopped_request) { 3025 vcpu->hv_clock.flags |= PVCLOCK_GUEST_STOPPED; 3026 vcpu->pvclock_set_guest_stopped_request = false; 3027 } 3028 3029 memcpy(guest_hv_clock, &vcpu->hv_clock, sizeof(*guest_hv_clock)); 3030 smp_wmb(); 3031 3032 guest_hv_clock->version = ++vcpu->hv_clock.version; 3033 3034 mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT); 3035 read_unlock_irqrestore(&gpc->lock, flags); 3036 3037 trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock); 3038 } 3039 3040 static int kvm_guest_time_update(struct kvm_vcpu *v) 3041 { 3042 unsigned long flags, tgt_tsc_khz; 3043 unsigned seq; 3044 struct kvm_vcpu_arch *vcpu = &v->arch; 3045 struct kvm_arch *ka = &v->kvm->arch; 3046 s64 kernel_ns; 3047 u64 tsc_timestamp, host_tsc; 3048 u8 pvclock_flags; 3049 bool use_master_clock; 3050 3051 kernel_ns = 0; 3052 host_tsc = 0; 3053 3054 /* 3055 * If the host uses TSC clock, then passthrough TSC as stable 3056 * to the guest. 3057 */ 3058 do { 3059 seq = read_seqcount_begin(&ka->pvclock_sc); 3060 use_master_clock = ka->use_master_clock; 3061 if (use_master_clock) { 3062 host_tsc = ka->master_cycle_now; 3063 kernel_ns = ka->master_kernel_ns; 3064 } 3065 } while (read_seqcount_retry(&ka->pvclock_sc, seq)); 3066 3067 /* Keep irq disabled to prevent changes to the clock */ 3068 local_irq_save(flags); 3069 tgt_tsc_khz = __this_cpu_read(cpu_tsc_khz); 3070 if (unlikely(tgt_tsc_khz == 0)) { 3071 local_irq_restore(flags); 3072 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); 3073 return 1; 3074 } 3075 if (!use_master_clock) { 3076 host_tsc = rdtsc(); 3077 kernel_ns = get_kvmclock_base_ns(); 3078 } 3079 3080 tsc_timestamp = kvm_read_l1_tsc(v, host_tsc); 3081 3082 /* 3083 * We may have to catch up the TSC to match elapsed wall clock 3084 * time for two reasons, even if kvmclock is used. 3085 * 1) CPU could have been running below the maximum TSC rate 3086 * 2) Broken TSC compensation resets the base at each VCPU 3087 * entry to avoid unknown leaps of TSC even when running 3088 * again on the same CPU. This may cause apparent elapsed 3089 * time to disappear, and the guest to stand still or run 3090 * very slowly. 3091 */ 3092 if (vcpu->tsc_catchup) { 3093 u64 tsc = compute_guest_tsc(v, kernel_ns); 3094 if (tsc > tsc_timestamp) { 3095 adjust_tsc_offset_guest(v, tsc - tsc_timestamp); 3096 tsc_timestamp = tsc; 3097 } 3098 } 3099 3100 local_irq_restore(flags); 3101 3102 /* With all the info we got, fill in the values */ 3103 3104 if (kvm_caps.has_tsc_control) 3105 tgt_tsc_khz = kvm_scale_tsc(tgt_tsc_khz, 3106 v->arch.l1_tsc_scaling_ratio); 3107 3108 if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) { 3109 kvm_get_time_scale(NSEC_PER_SEC, tgt_tsc_khz * 1000LL, 3110 &vcpu->hv_clock.tsc_shift, 3111 &vcpu->hv_clock.tsc_to_system_mul); 3112 vcpu->hw_tsc_khz = tgt_tsc_khz; 3113 } 3114 3115 vcpu->hv_clock.tsc_timestamp = tsc_timestamp; 3116 vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset; 3117 vcpu->last_guest_tsc = tsc_timestamp; 3118 3119 /* If the host uses TSC clocksource, then it is stable */ 3120 pvclock_flags = 0; 3121 if (use_master_clock) 3122 pvclock_flags |= PVCLOCK_TSC_STABLE_BIT; 3123 3124 vcpu->hv_clock.flags = pvclock_flags; 3125 3126 if (vcpu->pv_time.active) 3127 kvm_setup_guest_pvclock(v, &vcpu->pv_time, 0); 3128 if (vcpu->xen.vcpu_info_cache.active) 3129 kvm_setup_guest_pvclock(v, &vcpu->xen.vcpu_info_cache, 3130 offsetof(struct compat_vcpu_info, time)); 3131 if (vcpu->xen.vcpu_time_info_cache.active) 3132 kvm_setup_guest_pvclock(v, &vcpu->xen.vcpu_time_info_cache, 0); 3133 kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock); 3134 return 0; 3135 } 3136 3137 /* 3138 * kvmclock updates which are isolated to a given vcpu, such as 3139 * vcpu->cpu migration, should not allow system_timestamp from 3140 * the rest of the vcpus to remain static. Otherwise ntp frequency 3141 * correction applies to one vcpu's system_timestamp but not 3142 * the others. 3143 * 3144 * So in those cases, request a kvmclock update for all vcpus. 3145 * We need to rate-limit these requests though, as they can 3146 * considerably slow guests that have a large number of vcpus. 3147 * The time for a remote vcpu to update its kvmclock is bound 3148 * by the delay we use to rate-limit the updates. 3149 */ 3150 3151 #define KVMCLOCK_UPDATE_DELAY msecs_to_jiffies(100) 3152 3153 static void kvmclock_update_fn(struct work_struct *work) 3154 { 3155 unsigned long i; 3156 struct delayed_work *dwork = to_delayed_work(work); 3157 struct kvm_arch *ka = container_of(dwork, struct kvm_arch, 3158 kvmclock_update_work); 3159 struct kvm *kvm = container_of(ka, struct kvm, arch); 3160 struct kvm_vcpu *vcpu; 3161 3162 kvm_for_each_vcpu(i, vcpu, kvm) { 3163 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 3164 kvm_vcpu_kick(vcpu); 3165 } 3166 } 3167 3168 static void kvm_gen_kvmclock_update(struct kvm_vcpu *v) 3169 { 3170 struct kvm *kvm = v->kvm; 3171 3172 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); 3173 schedule_delayed_work(&kvm->arch.kvmclock_update_work, 3174 KVMCLOCK_UPDATE_DELAY); 3175 } 3176 3177 #define KVMCLOCK_SYNC_PERIOD (300 * HZ) 3178 3179 static void kvmclock_sync_fn(struct work_struct *work) 3180 { 3181 struct delayed_work *dwork = to_delayed_work(work); 3182 struct kvm_arch *ka = container_of(dwork, struct kvm_arch, 3183 kvmclock_sync_work); 3184 struct kvm *kvm = container_of(ka, struct kvm, arch); 3185 3186 if (!kvmclock_periodic_sync) 3187 return; 3188 3189 schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0); 3190 schedule_delayed_work(&kvm->arch.kvmclock_sync_work, 3191 KVMCLOCK_SYNC_PERIOD); 3192 } 3193 3194 /* 3195 * On AMD, HWCR[McStatusWrEn] controls whether setting MCi_STATUS results in #GP. 3196 */ 3197 static bool can_set_mci_status(struct kvm_vcpu *vcpu) 3198 { 3199 /* McStatusWrEn enabled? */ 3200 if (guest_cpuid_is_amd_or_hygon(vcpu)) 3201 return !!(vcpu->arch.msr_hwcr & BIT_ULL(18)); 3202 3203 return false; 3204 } 3205 3206 static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 3207 { 3208 u64 mcg_cap = vcpu->arch.mcg_cap; 3209 unsigned bank_num = mcg_cap & 0xff; 3210 u32 msr = msr_info->index; 3211 u64 data = msr_info->data; 3212 3213 switch (msr) { 3214 case MSR_IA32_MCG_STATUS: 3215 vcpu->arch.mcg_status = data; 3216 break; 3217 case MSR_IA32_MCG_CTL: 3218 if (!(mcg_cap & MCG_CTL_P) && 3219 (data || !msr_info->host_initiated)) 3220 return 1; 3221 if (data != 0 && data != ~(u64)0) 3222 return 1; 3223 vcpu->arch.mcg_ctl = data; 3224 break; 3225 default: 3226 if (msr >= MSR_IA32_MC0_CTL && 3227 msr < MSR_IA32_MCx_CTL(bank_num)) { 3228 u32 offset = array_index_nospec( 3229 msr - MSR_IA32_MC0_CTL, 3230 MSR_IA32_MCx_CTL(bank_num) - MSR_IA32_MC0_CTL); 3231 3232 /* only 0 or all 1s can be written to IA32_MCi_CTL 3233 * some Linux kernels though clear bit 10 in bank 4 to 3234 * workaround a BIOS/GART TBL issue on AMD K8s, ignore 3235 * this to avoid an uncatched #GP in the guest. 3236 * 3237 * UNIXWARE clears bit 0 of MC1_CTL to ignore 3238 * correctable, single-bit ECC data errors. 3239 */ 3240 if ((offset & 0x3) == 0 && 3241 data != 0 && (data | (1 << 10) | 1) != ~(u64)0) 3242 return 1; 3243 3244 /* MCi_STATUS */ 3245 if (!msr_info->host_initiated && 3246 (offset & 0x3) == 1 && data != 0) { 3247 if (!can_set_mci_status(vcpu)) 3248 return 1; 3249 } 3250 3251 vcpu->arch.mce_banks[offset] = data; 3252 break; 3253 } 3254 return 1; 3255 } 3256 return 0; 3257 } 3258 3259 static inline bool kvm_pv_async_pf_enabled(struct kvm_vcpu *vcpu) 3260 { 3261 u64 mask = KVM_ASYNC_PF_ENABLED | KVM_ASYNC_PF_DELIVERY_AS_INT; 3262 3263 return (vcpu->arch.apf.msr_en_val & mask) == mask; 3264 } 3265 3266 static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) 3267 { 3268 gpa_t gpa = data & ~0x3f; 3269 3270 /* Bits 4:5 are reserved, Should be zero */ 3271 if (data & 0x30) 3272 return 1; 3273 3274 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_VMEXIT) && 3275 (data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT)) 3276 return 1; 3277 3278 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT) && 3279 (data & KVM_ASYNC_PF_DELIVERY_AS_INT)) 3280 return 1; 3281 3282 if (!lapic_in_kernel(vcpu)) 3283 return data ? 1 : 0; 3284 3285 vcpu->arch.apf.msr_en_val = data; 3286 3287 if (!kvm_pv_async_pf_enabled(vcpu)) { 3288 kvm_clear_async_pf_completion_queue(vcpu); 3289 kvm_async_pf_hash_reset(vcpu); 3290 return 0; 3291 } 3292 3293 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, 3294 sizeof(u64))) 3295 return 1; 3296 3297 vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); 3298 vcpu->arch.apf.delivery_as_pf_vmexit = data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT; 3299 3300 kvm_async_pf_wakeup_all(vcpu); 3301 3302 return 0; 3303 } 3304 3305 static int kvm_pv_enable_async_pf_int(struct kvm_vcpu *vcpu, u64 data) 3306 { 3307 /* Bits 8-63 are reserved */ 3308 if (data >> 8) 3309 return 1; 3310 3311 if (!lapic_in_kernel(vcpu)) 3312 return 1; 3313 3314 vcpu->arch.apf.msr_int_val = data; 3315 3316 vcpu->arch.apf.vec = data & KVM_ASYNC_PF_VEC_MASK; 3317 3318 return 0; 3319 } 3320 3321 static void kvmclock_reset(struct kvm_vcpu *vcpu) 3322 { 3323 kvm_gfn_to_pfn_cache_destroy(vcpu->kvm, &vcpu->arch.pv_time); 3324 vcpu->arch.time = 0; 3325 } 3326 3327 static void kvm_vcpu_flush_tlb_all(struct kvm_vcpu *vcpu) 3328 { 3329 ++vcpu->stat.tlb_flush; 3330 static_call(kvm_x86_flush_tlb_all)(vcpu); 3331 } 3332 3333 static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu) 3334 { 3335 ++vcpu->stat.tlb_flush; 3336 3337 if (!tdp_enabled) { 3338 /* 3339 * A TLB flush on behalf of the guest is equivalent to 3340 * INVPCID(all), toggling CR4.PGE, etc., which requires 3341 * a forced sync of the shadow page tables. Ensure all the 3342 * roots are synced and the guest TLB in hardware is clean. 3343 */ 3344 kvm_mmu_sync_roots(vcpu); 3345 kvm_mmu_sync_prev_roots(vcpu); 3346 } 3347 3348 static_call(kvm_x86_flush_tlb_guest)(vcpu); 3349 } 3350 3351 3352 static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu) 3353 { 3354 ++vcpu->stat.tlb_flush; 3355 static_call(kvm_x86_flush_tlb_current)(vcpu); 3356 } 3357 3358 /* 3359 * Service "local" TLB flush requests, which are specific to the current MMU 3360 * context. In addition to the generic event handling in vcpu_enter_guest(), 3361 * TLB flushes that are targeted at an MMU context also need to be serviced 3362 * prior before nested VM-Enter/VM-Exit. 3363 */ 3364 void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu) 3365 { 3366 if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu)) 3367 kvm_vcpu_flush_tlb_current(vcpu); 3368 3369 if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu)) 3370 kvm_vcpu_flush_tlb_guest(vcpu); 3371 } 3372 EXPORT_SYMBOL_GPL(kvm_service_local_tlb_flush_requests); 3373 3374 static void record_steal_time(struct kvm_vcpu *vcpu) 3375 { 3376 struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache; 3377 struct kvm_steal_time __user *st; 3378 struct kvm_memslots *slots; 3379 u64 steal; 3380 u32 version; 3381 3382 if (kvm_xen_msr_enabled(vcpu->kvm)) { 3383 kvm_xen_runstate_set_running(vcpu); 3384 return; 3385 } 3386 3387 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) 3388 return; 3389 3390 if (WARN_ON_ONCE(current->mm != vcpu->kvm->mm)) 3391 return; 3392 3393 slots = kvm_memslots(vcpu->kvm); 3394 3395 if (unlikely(slots->generation != ghc->generation || 3396 kvm_is_error_hva(ghc->hva) || !ghc->memslot)) { 3397 gfn_t gfn = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS; 3398 3399 /* We rely on the fact that it fits in a single page. */ 3400 BUILD_BUG_ON((sizeof(*st) - 1) & KVM_STEAL_VALID_BITS); 3401 3402 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gfn, sizeof(*st)) || 3403 kvm_is_error_hva(ghc->hva) || !ghc->memslot) 3404 return; 3405 } 3406 3407 st = (struct kvm_steal_time __user *)ghc->hva; 3408 /* 3409 * Doing a TLB flush here, on the guest's behalf, can avoid 3410 * expensive IPIs. 3411 */ 3412 if (guest_pv_has(vcpu, KVM_FEATURE_PV_TLB_FLUSH)) { 3413 u8 st_preempted = 0; 3414 int err = -EFAULT; 3415 3416 if (!user_access_begin(st, sizeof(*st))) 3417 return; 3418 3419 asm volatile("1: xchgb %0, %2\n" 3420 "xor %1, %1\n" 3421 "2:\n" 3422 _ASM_EXTABLE_UA(1b, 2b) 3423 : "+q" (st_preempted), 3424 "+&r" (err), 3425 "+m" (st->preempted)); 3426 if (err) 3427 goto out; 3428 3429 user_access_end(); 3430 3431 vcpu->arch.st.preempted = 0; 3432 3433 trace_kvm_pv_tlb_flush(vcpu->vcpu_id, 3434 st_preempted & KVM_VCPU_FLUSH_TLB); 3435 if (st_preempted & KVM_VCPU_FLUSH_TLB) 3436 kvm_vcpu_flush_tlb_guest(vcpu); 3437 3438 if (!user_access_begin(st, sizeof(*st))) 3439 goto dirty; 3440 } else { 3441 if (!user_access_begin(st, sizeof(*st))) 3442 return; 3443 3444 unsafe_put_user(0, &st->preempted, out); 3445 vcpu->arch.st.preempted = 0; 3446 } 3447 3448 unsafe_get_user(version, &st->version, out); 3449 if (version & 1) 3450 version += 1; /* first time write, random junk */ 3451 3452 version += 1; 3453 unsafe_put_user(version, &st->version, out); 3454 3455 smp_wmb(); 3456 3457 unsafe_get_user(steal, &st->steal, out); 3458 steal += current->sched_info.run_delay - 3459 vcpu->arch.st.last_steal; 3460 vcpu->arch.st.last_steal = current->sched_info.run_delay; 3461 unsafe_put_user(steal, &st->steal, out); 3462 3463 version += 1; 3464 unsafe_put_user(version, &st->version, out); 3465 3466 out: 3467 user_access_end(); 3468 dirty: 3469 mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa)); 3470 } 3471 3472 int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 3473 { 3474 bool pr = false; 3475 u32 msr = msr_info->index; 3476 u64 data = msr_info->data; 3477 3478 if (msr && msr == vcpu->kvm->arch.xen_hvm_config.msr) 3479 return kvm_xen_write_hypercall_page(vcpu, data); 3480 3481 switch (msr) { 3482 case MSR_AMD64_NB_CFG: 3483 case MSR_IA32_UCODE_WRITE: 3484 case MSR_VM_HSAVE_PA: 3485 case MSR_AMD64_PATCH_LOADER: 3486 case MSR_AMD64_BU_CFG2: 3487 case MSR_AMD64_DC_CFG: 3488 case MSR_F15H_EX_CFG: 3489 break; 3490 3491 case MSR_IA32_UCODE_REV: 3492 if (msr_info->host_initiated) 3493 vcpu->arch.microcode_version = data; 3494 break; 3495 case MSR_IA32_ARCH_CAPABILITIES: 3496 if (!msr_info->host_initiated) 3497 return 1; 3498 vcpu->arch.arch_capabilities = data; 3499 break; 3500 case MSR_IA32_PERF_CAPABILITIES: { 3501 struct kvm_msr_entry msr_ent = {.index = msr, .data = 0}; 3502 3503 if (!msr_info->host_initiated) 3504 return 1; 3505 if (kvm_get_msr_feature(&msr_ent)) 3506 return 1; 3507 if (data & ~msr_ent.data) 3508 return 1; 3509 3510 vcpu->arch.perf_capabilities = data; 3511 3512 return 0; 3513 } 3514 case MSR_EFER: 3515 return set_efer(vcpu, msr_info); 3516 case MSR_K7_HWCR: 3517 data &= ~(u64)0x40; /* ignore flush filter disable */ 3518 data &= ~(u64)0x100; /* ignore ignne emulation enable */ 3519 data &= ~(u64)0x8; /* ignore TLB cache disable */ 3520 3521 /* Handle McStatusWrEn */ 3522 if (data == BIT_ULL(18)) { 3523 vcpu->arch.msr_hwcr = data; 3524 } else if (data != 0) { 3525 vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n", 3526 data); 3527 return 1; 3528 } 3529 break; 3530 case MSR_FAM10H_MMIO_CONF_BASE: 3531 if (data != 0) { 3532 vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: " 3533 "0x%llx\n", data); 3534 return 1; 3535 } 3536 break; 3537 case 0x200 ... 0x2ff: 3538 return kvm_mtrr_set_msr(vcpu, msr, data); 3539 case MSR_IA32_APICBASE: 3540 return kvm_set_apic_base(vcpu, msr_info); 3541 case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff: 3542 return kvm_x2apic_msr_write(vcpu, msr, data); 3543 case MSR_IA32_TSC_DEADLINE: 3544 kvm_set_lapic_tscdeadline_msr(vcpu, data); 3545 break; 3546 case MSR_IA32_TSC_ADJUST: 3547 if (guest_cpuid_has(vcpu, X86_FEATURE_TSC_ADJUST)) { 3548 if (!msr_info->host_initiated) { 3549 s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr; 3550 adjust_tsc_offset_guest(vcpu, adj); 3551 /* Before back to guest, tsc_timestamp must be adjusted 3552 * as well, otherwise guest's percpu pvclock time could jump. 3553 */ 3554 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 3555 } 3556 vcpu->arch.ia32_tsc_adjust_msr = data; 3557 } 3558 break; 3559 case MSR_IA32_MISC_ENABLE: { 3560 u64 old_val = vcpu->arch.ia32_misc_enable_msr; 3561 3562 if (!msr_info->host_initiated) { 3563 /* RO bits */ 3564 if ((old_val ^ data) & MSR_IA32_MISC_ENABLE_PMU_RO_MASK) 3565 return 1; 3566 3567 /* R bits, i.e. writes are ignored, but don't fault. */ 3568 data = data & ~MSR_IA32_MISC_ENABLE_EMON; 3569 data |= old_val & MSR_IA32_MISC_ENABLE_EMON; 3570 } 3571 3572 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT) && 3573 ((old_val ^ data) & MSR_IA32_MISC_ENABLE_MWAIT)) { 3574 if (!guest_cpuid_has(vcpu, X86_FEATURE_XMM3)) 3575 return 1; 3576 vcpu->arch.ia32_misc_enable_msr = data; 3577 kvm_update_cpuid_runtime(vcpu); 3578 } else { 3579 vcpu->arch.ia32_misc_enable_msr = data; 3580 } 3581 break; 3582 } 3583 case MSR_IA32_SMBASE: 3584 if (!msr_info->host_initiated) 3585 return 1; 3586 vcpu->arch.smbase = data; 3587 break; 3588 case MSR_IA32_POWER_CTL: 3589 vcpu->arch.msr_ia32_power_ctl = data; 3590 break; 3591 case MSR_IA32_TSC: 3592 if (msr_info->host_initiated) { 3593 kvm_synchronize_tsc(vcpu, data); 3594 } else { 3595 u64 adj = kvm_compute_l1_tsc_offset(vcpu, data) - vcpu->arch.l1_tsc_offset; 3596 adjust_tsc_offset_guest(vcpu, adj); 3597 vcpu->arch.ia32_tsc_adjust_msr += adj; 3598 } 3599 break; 3600 case MSR_IA32_XSS: 3601 if (!msr_info->host_initiated && 3602 !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)) 3603 return 1; 3604 /* 3605 * KVM supports exposing PT to the guest, but does not support 3606 * IA32_XSS[bit 8]. Guests have to use RDMSR/WRMSR rather than 3607 * XSAVES/XRSTORS to save/restore PT MSRs. 3608 */ 3609 if (data & ~kvm_caps.supported_xss) 3610 return 1; 3611 vcpu->arch.ia32_xss = data; 3612 kvm_update_cpuid_runtime(vcpu); 3613 break; 3614 case MSR_SMI_COUNT: 3615 if (!msr_info->host_initiated) 3616 return 1; 3617 vcpu->arch.smi_count = data; 3618 break; 3619 case MSR_KVM_WALL_CLOCK_NEW: 3620 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) 3621 return 1; 3622 3623 vcpu->kvm->arch.wall_clock = data; 3624 kvm_write_wall_clock(vcpu->kvm, data, 0); 3625 break; 3626 case MSR_KVM_WALL_CLOCK: 3627 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) 3628 return 1; 3629 3630 vcpu->kvm->arch.wall_clock = data; 3631 kvm_write_wall_clock(vcpu->kvm, data, 0); 3632 break; 3633 case MSR_KVM_SYSTEM_TIME_NEW: 3634 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) 3635 return 1; 3636 3637 kvm_write_system_time(vcpu, data, false, msr_info->host_initiated); 3638 break; 3639 case MSR_KVM_SYSTEM_TIME: 3640 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) 3641 return 1; 3642 3643 kvm_write_system_time(vcpu, data, true, msr_info->host_initiated); 3644 break; 3645 case MSR_KVM_ASYNC_PF_EN: 3646 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF)) 3647 return 1; 3648 3649 if (kvm_pv_enable_async_pf(vcpu, data)) 3650 return 1; 3651 break; 3652 case MSR_KVM_ASYNC_PF_INT: 3653 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) 3654 return 1; 3655 3656 if (kvm_pv_enable_async_pf_int(vcpu, data)) 3657 return 1; 3658 break; 3659 case MSR_KVM_ASYNC_PF_ACK: 3660 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) 3661 return 1; 3662 if (data & 0x1) { 3663 vcpu->arch.apf.pageready_pending = false; 3664 kvm_check_async_pf_completion(vcpu); 3665 } 3666 break; 3667 case MSR_KVM_STEAL_TIME: 3668 if (!guest_pv_has(vcpu, KVM_FEATURE_STEAL_TIME)) 3669 return 1; 3670 3671 if (unlikely(!sched_info_on())) 3672 return 1; 3673 3674 if (data & KVM_STEAL_RESERVED_MASK) 3675 return 1; 3676 3677 vcpu->arch.st.msr_val = data; 3678 3679 if (!(data & KVM_MSR_ENABLED)) 3680 break; 3681 3682 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); 3683 3684 break; 3685 case MSR_KVM_PV_EOI_EN: 3686 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_EOI)) 3687 return 1; 3688 3689 if (kvm_lapic_set_pv_eoi(vcpu, data, sizeof(u8))) 3690 return 1; 3691 break; 3692 3693 case MSR_KVM_POLL_CONTROL: 3694 if (!guest_pv_has(vcpu, KVM_FEATURE_POLL_CONTROL)) 3695 return 1; 3696 3697 /* only enable bit supported */ 3698 if (data & (-1ULL << 1)) 3699 return 1; 3700 3701 vcpu->arch.msr_kvm_poll_control = data; 3702 break; 3703 3704 case MSR_IA32_MCG_CTL: 3705 case MSR_IA32_MCG_STATUS: 3706 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: 3707 return set_msr_mce(vcpu, msr_info); 3708 3709 case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3: 3710 case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1: 3711 pr = true; 3712 fallthrough; 3713 case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3: 3714 case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1: 3715 if (kvm_pmu_is_valid_msr(vcpu, msr)) 3716 return kvm_pmu_set_msr(vcpu, msr_info); 3717 3718 if (pr || data != 0) 3719 vcpu_unimpl(vcpu, "disabled perfctr wrmsr: " 3720 "0x%x data 0x%llx\n", msr, data); 3721 break; 3722 case MSR_K7_CLK_CTL: 3723 /* 3724 * Ignore all writes to this no longer documented MSR. 3725 * Writes are only relevant for old K7 processors, 3726 * all pre-dating SVM, but a recommended workaround from 3727 * AMD for these chips. It is possible to specify the 3728 * affected processor models on the command line, hence 3729 * the need to ignore the workaround. 3730 */ 3731 break; 3732 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: 3733 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER: 3734 case HV_X64_MSR_SYNDBG_OPTIONS: 3735 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: 3736 case HV_X64_MSR_CRASH_CTL: 3737 case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT: 3738 case HV_X64_MSR_REENLIGHTENMENT_CONTROL: 3739 case HV_X64_MSR_TSC_EMULATION_CONTROL: 3740 case HV_X64_MSR_TSC_EMULATION_STATUS: 3741 return kvm_hv_set_msr_common(vcpu, msr, data, 3742 msr_info->host_initiated); 3743 case MSR_IA32_BBL_CR_CTL3: 3744 /* Drop writes to this legacy MSR -- see rdmsr 3745 * counterpart for further detail. 3746 */ 3747 if (report_ignored_msrs) 3748 vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n", 3749 msr, data); 3750 break; 3751 case MSR_AMD64_OSVW_ID_LENGTH: 3752 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) 3753 return 1; 3754 vcpu->arch.osvw.length = data; 3755 break; 3756 case MSR_AMD64_OSVW_STATUS: 3757 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) 3758 return 1; 3759 vcpu->arch.osvw.status = data; 3760 break; 3761 case MSR_PLATFORM_INFO: 3762 if (!msr_info->host_initiated || 3763 (!(data & MSR_PLATFORM_INFO_CPUID_FAULT) && 3764 cpuid_fault_enabled(vcpu))) 3765 return 1; 3766 vcpu->arch.msr_platform_info = data; 3767 break; 3768 case MSR_MISC_FEATURES_ENABLES: 3769 if (data & ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT || 3770 (data & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT && 3771 !supports_cpuid_fault(vcpu))) 3772 return 1; 3773 vcpu->arch.msr_misc_features_enables = data; 3774 break; 3775 #ifdef CONFIG_X86_64 3776 case MSR_IA32_XFD: 3777 if (!msr_info->host_initiated && 3778 !guest_cpuid_has(vcpu, X86_FEATURE_XFD)) 3779 return 1; 3780 3781 if (data & ~kvm_guest_supported_xfd(vcpu)) 3782 return 1; 3783 3784 fpu_update_guest_xfd(&vcpu->arch.guest_fpu, data); 3785 break; 3786 case MSR_IA32_XFD_ERR: 3787 if (!msr_info->host_initiated && 3788 !guest_cpuid_has(vcpu, X86_FEATURE_XFD)) 3789 return 1; 3790 3791 if (data & ~kvm_guest_supported_xfd(vcpu)) 3792 return 1; 3793 3794 vcpu->arch.guest_fpu.xfd_err = data; 3795 break; 3796 #endif 3797 case MSR_IA32_PEBS_ENABLE: 3798 case MSR_IA32_DS_AREA: 3799 case MSR_PEBS_DATA_CFG: 3800 case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5: 3801 if (kvm_pmu_is_valid_msr(vcpu, msr)) 3802 return kvm_pmu_set_msr(vcpu, msr_info); 3803 /* 3804 * Userspace is allowed to write '0' to MSRs that KVM reports 3805 * as to-be-saved, even if an MSRs isn't fully supported. 3806 */ 3807 return !msr_info->host_initiated || data; 3808 default: 3809 if (kvm_pmu_is_valid_msr(vcpu, msr)) 3810 return kvm_pmu_set_msr(vcpu, msr_info); 3811 return KVM_MSR_RET_INVALID; 3812 } 3813 return 0; 3814 } 3815 EXPORT_SYMBOL_GPL(kvm_set_msr_common); 3816 3817 static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) 3818 { 3819 u64 data; 3820 u64 mcg_cap = vcpu->arch.mcg_cap; 3821 unsigned bank_num = mcg_cap & 0xff; 3822 3823 switch (msr) { 3824 case MSR_IA32_P5_MC_ADDR: 3825 case MSR_IA32_P5_MC_TYPE: 3826 data = 0; 3827 break; 3828 case MSR_IA32_MCG_CAP: 3829 data = vcpu->arch.mcg_cap; 3830 break; 3831 case MSR_IA32_MCG_CTL: 3832 if (!(mcg_cap & MCG_CTL_P) && !host) 3833 return 1; 3834 data = vcpu->arch.mcg_ctl; 3835 break; 3836 case MSR_IA32_MCG_STATUS: 3837 data = vcpu->arch.mcg_status; 3838 break; 3839 default: 3840 if (msr >= MSR_IA32_MC0_CTL && 3841 msr < MSR_IA32_MCx_CTL(bank_num)) { 3842 u32 offset = array_index_nospec( 3843 msr - MSR_IA32_MC0_CTL, 3844 MSR_IA32_MCx_CTL(bank_num) - MSR_IA32_MC0_CTL); 3845 3846 data = vcpu->arch.mce_banks[offset]; 3847 break; 3848 } 3849 return 1; 3850 } 3851 *pdata = data; 3852 return 0; 3853 } 3854 3855 int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 3856 { 3857 switch (msr_info->index) { 3858 case MSR_IA32_PLATFORM_ID: 3859 case MSR_IA32_EBL_CR_POWERON: 3860 case MSR_IA32_LASTBRANCHFROMIP: 3861 case MSR_IA32_LASTBRANCHTOIP: 3862 case MSR_IA32_LASTINTFROMIP: 3863 case MSR_IA32_LASTINTTOIP: 3864 case MSR_AMD64_SYSCFG: 3865 case MSR_K8_TSEG_ADDR: 3866 case MSR_K8_TSEG_MASK: 3867 case MSR_VM_HSAVE_PA: 3868 case MSR_K8_INT_PENDING_MSG: 3869 case MSR_AMD64_NB_CFG: 3870 case MSR_FAM10H_MMIO_CONF_BASE: 3871 case MSR_AMD64_BU_CFG2: 3872 case MSR_IA32_PERF_CTL: 3873 case MSR_AMD64_DC_CFG: 3874 case MSR_F15H_EX_CFG: 3875 /* 3876 * Intel Sandy Bridge CPUs must support the RAPL (running average power 3877 * limit) MSRs. Just return 0, as we do not want to expose the host 3878 * data here. Do not conditionalize this on CPUID, as KVM does not do 3879 * so for existing CPU-specific MSRs. 3880 */ 3881 case MSR_RAPL_POWER_UNIT: 3882 case MSR_PP0_ENERGY_STATUS: /* Power plane 0 (core) */ 3883 case MSR_PP1_ENERGY_STATUS: /* Power plane 1 (graphics uncore) */ 3884 case MSR_PKG_ENERGY_STATUS: /* Total package */ 3885 case MSR_DRAM_ENERGY_STATUS: /* DRAM controller */ 3886 msr_info->data = 0; 3887 break; 3888 case MSR_IA32_PEBS_ENABLE: 3889 case MSR_IA32_DS_AREA: 3890 case MSR_PEBS_DATA_CFG: 3891 case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5: 3892 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) 3893 return kvm_pmu_get_msr(vcpu, msr_info); 3894 /* 3895 * Userspace is allowed to read MSRs that KVM reports as 3896 * to-be-saved, even if an MSR isn't fully supported. 3897 */ 3898 if (!msr_info->host_initiated) 3899 return 1; 3900 msr_info->data = 0; 3901 break; 3902 case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3: 3903 case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3: 3904 case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1: 3905 case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1: 3906 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) 3907 return kvm_pmu_get_msr(vcpu, msr_info); 3908 msr_info->data = 0; 3909 break; 3910 case MSR_IA32_UCODE_REV: 3911 msr_info->data = vcpu->arch.microcode_version; 3912 break; 3913 case MSR_IA32_ARCH_CAPABILITIES: 3914 if (!msr_info->host_initiated && 3915 !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES)) 3916 return 1; 3917 msr_info->data = vcpu->arch.arch_capabilities; 3918 break; 3919 case MSR_IA32_PERF_CAPABILITIES: 3920 if (!msr_info->host_initiated && 3921 !guest_cpuid_has(vcpu, X86_FEATURE_PDCM)) 3922 return 1; 3923 msr_info->data = vcpu->arch.perf_capabilities; 3924 break; 3925 case MSR_IA32_POWER_CTL: 3926 msr_info->data = vcpu->arch.msr_ia32_power_ctl; 3927 break; 3928 case MSR_IA32_TSC: { 3929 /* 3930 * Intel SDM states that MSR_IA32_TSC read adds the TSC offset 3931 * even when not intercepted. AMD manual doesn't explicitly 3932 * state this but appears to behave the same. 3933 * 3934 * On userspace reads and writes, however, we unconditionally 3935 * return L1's TSC value to ensure backwards-compatible 3936 * behavior for migration. 3937 */ 3938 u64 offset, ratio; 3939 3940 if (msr_info->host_initiated) { 3941 offset = vcpu->arch.l1_tsc_offset; 3942 ratio = vcpu->arch.l1_tsc_scaling_ratio; 3943 } else { 3944 offset = vcpu->arch.tsc_offset; 3945 ratio = vcpu->arch.tsc_scaling_ratio; 3946 } 3947 3948 msr_info->data = kvm_scale_tsc(rdtsc(), ratio) + offset; 3949 break; 3950 } 3951 case MSR_MTRRcap: 3952 case 0x200 ... 0x2ff: 3953 return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data); 3954 case 0xcd: /* fsb frequency */ 3955 msr_info->data = 3; 3956 break; 3957 /* 3958 * MSR_EBC_FREQUENCY_ID 3959 * Conservative value valid for even the basic CPU models. 3960 * Models 0,1: 000 in bits 23:21 indicating a bus speed of 3961 * 100MHz, model 2 000 in bits 18:16 indicating 100MHz, 3962 * and 266MHz for model 3, or 4. Set Core Clock 3963 * Frequency to System Bus Frequency Ratio to 1 (bits 3964 * 31:24) even though these are only valid for CPU 3965 * models > 2, however guests may end up dividing or 3966 * multiplying by zero otherwise. 3967 */ 3968 case MSR_EBC_FREQUENCY_ID: 3969 msr_info->data = 1 << 24; 3970 break; 3971 case MSR_IA32_APICBASE: 3972 msr_info->data = kvm_get_apic_base(vcpu); 3973 break; 3974 case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff: 3975 return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data); 3976 case MSR_IA32_TSC_DEADLINE: 3977 msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu); 3978 break; 3979 case MSR_IA32_TSC_ADJUST: 3980 msr_info->data = (u64)vcpu->arch.ia32_tsc_adjust_msr; 3981 break; 3982 case MSR_IA32_MISC_ENABLE: 3983 msr_info->data = vcpu->arch.ia32_misc_enable_msr; 3984 break; 3985 case MSR_IA32_SMBASE: 3986 if (!msr_info->host_initiated) 3987 return 1; 3988 msr_info->data = vcpu->arch.smbase; 3989 break; 3990 case MSR_SMI_COUNT: 3991 msr_info->data = vcpu->arch.smi_count; 3992 break; 3993 case MSR_IA32_PERF_STATUS: 3994 /* TSC increment by tick */ 3995 msr_info->data = 1000ULL; 3996 /* CPU multiplier */ 3997 msr_info->data |= (((uint64_t)4ULL) << 40); 3998 break; 3999 case MSR_EFER: 4000 msr_info->data = vcpu->arch.efer; 4001 break; 4002 case MSR_KVM_WALL_CLOCK: 4003 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) 4004 return 1; 4005 4006 msr_info->data = vcpu->kvm->arch.wall_clock; 4007 break; 4008 case MSR_KVM_WALL_CLOCK_NEW: 4009 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) 4010 return 1; 4011 4012 msr_info->data = vcpu->kvm->arch.wall_clock; 4013 break; 4014 case MSR_KVM_SYSTEM_TIME: 4015 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) 4016 return 1; 4017 4018 msr_info->data = vcpu->arch.time; 4019 break; 4020 case MSR_KVM_SYSTEM_TIME_NEW: 4021 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) 4022 return 1; 4023 4024 msr_info->data = vcpu->arch.time; 4025 break; 4026 case MSR_KVM_ASYNC_PF_EN: 4027 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF)) 4028 return 1; 4029 4030 msr_info->data = vcpu->arch.apf.msr_en_val; 4031 break; 4032 case MSR_KVM_ASYNC_PF_INT: 4033 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) 4034 return 1; 4035 4036 msr_info->data = vcpu->arch.apf.msr_int_val; 4037 break; 4038 case MSR_KVM_ASYNC_PF_ACK: 4039 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) 4040 return 1; 4041 4042 msr_info->data = 0; 4043 break; 4044 case MSR_KVM_STEAL_TIME: 4045 if (!guest_pv_has(vcpu, KVM_FEATURE_STEAL_TIME)) 4046 return 1; 4047 4048 msr_info->data = vcpu->arch.st.msr_val; 4049 break; 4050 case MSR_KVM_PV_EOI_EN: 4051 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_EOI)) 4052 return 1; 4053 4054 msr_info->data = vcpu->arch.pv_eoi.msr_val; 4055 break; 4056 case MSR_KVM_POLL_CONTROL: 4057 if (!guest_pv_has(vcpu, KVM_FEATURE_POLL_CONTROL)) 4058 return 1; 4059 4060 msr_info->data = vcpu->arch.msr_kvm_poll_control; 4061 break; 4062 case MSR_IA32_P5_MC_ADDR: 4063 case MSR_IA32_P5_MC_TYPE: 4064 case MSR_IA32_MCG_CAP: 4065 case MSR_IA32_MCG_CTL: 4066 case MSR_IA32_MCG_STATUS: 4067 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: 4068 return get_msr_mce(vcpu, msr_info->index, &msr_info->data, 4069 msr_info->host_initiated); 4070 case MSR_IA32_XSS: 4071 if (!msr_info->host_initiated && 4072 !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)) 4073 return 1; 4074 msr_info->data = vcpu->arch.ia32_xss; 4075 break; 4076 case MSR_K7_CLK_CTL: 4077 /* 4078 * Provide expected ramp-up count for K7. All other 4079 * are set to zero, indicating minimum divisors for 4080 * every field. 4081 * 4082 * This prevents guest kernels on AMD host with CPU 4083 * type 6, model 8 and higher from exploding due to 4084 * the rdmsr failing. 4085 */ 4086 msr_info->data = 0x20000000; 4087 break; 4088 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: 4089 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER: 4090 case HV_X64_MSR_SYNDBG_OPTIONS: 4091 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: 4092 case HV_X64_MSR_CRASH_CTL: 4093 case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT: 4094 case HV_X64_MSR_REENLIGHTENMENT_CONTROL: 4095 case HV_X64_MSR_TSC_EMULATION_CONTROL: 4096 case HV_X64_MSR_TSC_EMULATION_STATUS: 4097 return kvm_hv_get_msr_common(vcpu, 4098 msr_info->index, &msr_info->data, 4099 msr_info->host_initiated); 4100 case MSR_IA32_BBL_CR_CTL3: 4101 /* This legacy MSR exists but isn't fully documented in current 4102 * silicon. It is however accessed by winxp in very narrow 4103 * scenarios where it sets bit #19, itself documented as 4104 * a "reserved" bit. Best effort attempt to source coherent 4105 * read data here should the balance of the register be 4106 * interpreted by the guest: 4107 * 4108 * L2 cache control register 3: 64GB range, 256KB size, 4109 * enabled, latency 0x1, configured 4110 */ 4111 msr_info->data = 0xbe702111; 4112 break; 4113 case MSR_AMD64_OSVW_ID_LENGTH: 4114 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) 4115 return 1; 4116 msr_info->data = vcpu->arch.osvw.length; 4117 break; 4118 case MSR_AMD64_OSVW_STATUS: 4119 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) 4120 return 1; 4121 msr_info->data = vcpu->arch.osvw.status; 4122 break; 4123 case MSR_PLATFORM_INFO: 4124 if (!msr_info->host_initiated && 4125 !vcpu->kvm->arch.guest_can_read_msr_platform_info) 4126 return 1; 4127 msr_info->data = vcpu->arch.msr_platform_info; 4128 break; 4129 case MSR_MISC_FEATURES_ENABLES: 4130 msr_info->data = vcpu->arch.msr_misc_features_enables; 4131 break; 4132 case MSR_K7_HWCR: 4133 msr_info->data = vcpu->arch.msr_hwcr; 4134 break; 4135 #ifdef CONFIG_X86_64 4136 case MSR_IA32_XFD: 4137 if (!msr_info->host_initiated && 4138 !guest_cpuid_has(vcpu, X86_FEATURE_XFD)) 4139 return 1; 4140 4141 msr_info->data = vcpu->arch.guest_fpu.fpstate->xfd; 4142 break; 4143 case MSR_IA32_XFD_ERR: 4144 if (!msr_info->host_initiated && 4145 !guest_cpuid_has(vcpu, X86_FEATURE_XFD)) 4146 return 1; 4147 4148 msr_info->data = vcpu->arch.guest_fpu.xfd_err; 4149 break; 4150 #endif 4151 default: 4152 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) 4153 return kvm_pmu_get_msr(vcpu, msr_info); 4154 return KVM_MSR_RET_INVALID; 4155 } 4156 return 0; 4157 } 4158 EXPORT_SYMBOL_GPL(kvm_get_msr_common); 4159 4160 /* 4161 * Read or write a bunch of msrs. All parameters are kernel addresses. 4162 * 4163 * @return number of msrs set successfully. 4164 */ 4165 static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs, 4166 struct kvm_msr_entry *entries, 4167 int (*do_msr)(struct kvm_vcpu *vcpu, 4168 unsigned index, u64 *data)) 4169 { 4170 int i; 4171 4172 for (i = 0; i < msrs->nmsrs; ++i) 4173 if (do_msr(vcpu, entries[i].index, &entries[i].data)) 4174 break; 4175 4176 return i; 4177 } 4178 4179 /* 4180 * Read or write a bunch of msrs. Parameters are user addresses. 4181 * 4182 * @return number of msrs set successfully. 4183 */ 4184 static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs, 4185 int (*do_msr)(struct kvm_vcpu *vcpu, 4186 unsigned index, u64 *data), 4187 int writeback) 4188 { 4189 struct kvm_msrs msrs; 4190 struct kvm_msr_entry *entries; 4191 int r, n; 4192 unsigned size; 4193 4194 r = -EFAULT; 4195 if (copy_from_user(&msrs, user_msrs, sizeof(msrs))) 4196 goto out; 4197 4198 r = -E2BIG; 4199 if (msrs.nmsrs >= MAX_IO_MSRS) 4200 goto out; 4201 4202 size = sizeof(struct kvm_msr_entry) * msrs.nmsrs; 4203 entries = memdup_user(user_msrs->entries, size); 4204 if (IS_ERR(entries)) { 4205 r = PTR_ERR(entries); 4206 goto out; 4207 } 4208 4209 r = n = __msr_io(vcpu, &msrs, entries, do_msr); 4210 if (r < 0) 4211 goto out_free; 4212 4213 r = -EFAULT; 4214 if (writeback && copy_to_user(user_msrs->entries, entries, size)) 4215 goto out_free; 4216 4217 r = n; 4218 4219 out_free: 4220 kfree(entries); 4221 out: 4222 return r; 4223 } 4224 4225 static inline bool kvm_can_mwait_in_guest(void) 4226 { 4227 return boot_cpu_has(X86_FEATURE_MWAIT) && 4228 !boot_cpu_has_bug(X86_BUG_MONITOR) && 4229 boot_cpu_has(X86_FEATURE_ARAT); 4230 } 4231 4232 static int kvm_ioctl_get_supported_hv_cpuid(struct kvm_vcpu *vcpu, 4233 struct kvm_cpuid2 __user *cpuid_arg) 4234 { 4235 struct kvm_cpuid2 cpuid; 4236 int r; 4237 4238 r = -EFAULT; 4239 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) 4240 return r; 4241 4242 r = kvm_get_hv_cpuid(vcpu, &cpuid, cpuid_arg->entries); 4243 if (r) 4244 return r; 4245 4246 r = -EFAULT; 4247 if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid))) 4248 return r; 4249 4250 return 0; 4251 } 4252 4253 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) 4254 { 4255 int r = 0; 4256 4257 switch (ext) { 4258 case KVM_CAP_IRQCHIP: 4259 case KVM_CAP_HLT: 4260 case KVM_CAP_MMU_SHADOW_CACHE_CONTROL: 4261 case KVM_CAP_SET_TSS_ADDR: 4262 case KVM_CAP_EXT_CPUID: 4263 case KVM_CAP_EXT_EMUL_CPUID: 4264 case KVM_CAP_CLOCKSOURCE: 4265 case KVM_CAP_PIT: 4266 case KVM_CAP_NOP_IO_DELAY: 4267 case KVM_CAP_MP_STATE: 4268 case KVM_CAP_SYNC_MMU: 4269 case KVM_CAP_USER_NMI: 4270 case KVM_CAP_REINJECT_CONTROL: 4271 case KVM_CAP_IRQ_INJECT_STATUS: 4272 case KVM_CAP_IOEVENTFD: 4273 case KVM_CAP_IOEVENTFD_NO_LENGTH: 4274 case KVM_CAP_PIT2: 4275 case KVM_CAP_PIT_STATE2: 4276 case KVM_CAP_SET_IDENTITY_MAP_ADDR: 4277 case KVM_CAP_VCPU_EVENTS: 4278 case KVM_CAP_HYPERV: 4279 case KVM_CAP_HYPERV_VAPIC: 4280 case KVM_CAP_HYPERV_SPIN: 4281 case KVM_CAP_HYPERV_SYNIC: 4282 case KVM_CAP_HYPERV_SYNIC2: 4283 case KVM_CAP_HYPERV_VP_INDEX: 4284 case KVM_CAP_HYPERV_EVENTFD: 4285 case KVM_CAP_HYPERV_TLBFLUSH: 4286 case KVM_CAP_HYPERV_SEND_IPI: 4287 case KVM_CAP_HYPERV_CPUID: 4288 case KVM_CAP_HYPERV_ENFORCE_CPUID: 4289 case KVM_CAP_SYS_HYPERV_CPUID: 4290 case KVM_CAP_PCI_SEGMENT: 4291 case KVM_CAP_DEBUGREGS: 4292 case KVM_CAP_X86_ROBUST_SINGLESTEP: 4293 case KVM_CAP_XSAVE: 4294 case KVM_CAP_ASYNC_PF: 4295 case KVM_CAP_ASYNC_PF_INT: 4296 case KVM_CAP_GET_TSC_KHZ: 4297 case KVM_CAP_KVMCLOCK_CTRL: 4298 case KVM_CAP_READONLY_MEM: 4299 case KVM_CAP_HYPERV_TIME: 4300 case KVM_CAP_IOAPIC_POLARITY_IGNORED: 4301 case KVM_CAP_TSC_DEADLINE_TIMER: 4302 case KVM_CAP_DISABLE_QUIRKS: 4303 case KVM_CAP_SET_BOOT_CPU_ID: 4304 case KVM_CAP_SPLIT_IRQCHIP: 4305 case KVM_CAP_IMMEDIATE_EXIT: 4306 case KVM_CAP_PMU_EVENT_FILTER: 4307 case KVM_CAP_GET_MSR_FEATURES: 4308 case KVM_CAP_MSR_PLATFORM_INFO: 4309 case KVM_CAP_EXCEPTION_PAYLOAD: 4310 case KVM_CAP_X86_TRIPLE_FAULT_EVENT: 4311 case KVM_CAP_SET_GUEST_DEBUG: 4312 case KVM_CAP_LAST_CPU: 4313 case KVM_CAP_X86_USER_SPACE_MSR: 4314 case KVM_CAP_X86_MSR_FILTER: 4315 case KVM_CAP_ENFORCE_PV_FEATURE_CPUID: 4316 #ifdef CONFIG_X86_SGX_KVM 4317 case KVM_CAP_SGX_ATTRIBUTE: 4318 #endif 4319 case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM: 4320 case KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM: 4321 case KVM_CAP_SREGS2: 4322 case KVM_CAP_EXIT_ON_EMULATION_FAILURE: 4323 case KVM_CAP_VCPU_ATTRIBUTES: 4324 case KVM_CAP_SYS_ATTRIBUTES: 4325 case KVM_CAP_VAPIC: 4326 case KVM_CAP_ENABLE_CAP: 4327 case KVM_CAP_VM_DISABLE_NX_HUGE_PAGES: 4328 r = 1; 4329 break; 4330 case KVM_CAP_EXIT_HYPERCALL: 4331 r = KVM_EXIT_HYPERCALL_VALID_MASK; 4332 break; 4333 case KVM_CAP_SET_GUEST_DEBUG2: 4334 return KVM_GUESTDBG_VALID_MASK; 4335 #ifdef CONFIG_KVM_XEN 4336 case KVM_CAP_XEN_HVM: 4337 r = KVM_XEN_HVM_CONFIG_HYPERCALL_MSR | 4338 KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL | 4339 KVM_XEN_HVM_CONFIG_SHARED_INFO | 4340 KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL | 4341 KVM_XEN_HVM_CONFIG_EVTCHN_SEND; 4342 if (sched_info_on()) 4343 r |= KVM_XEN_HVM_CONFIG_RUNSTATE; 4344 break; 4345 #endif 4346 case KVM_CAP_SYNC_REGS: 4347 r = KVM_SYNC_X86_VALID_FIELDS; 4348 break; 4349 case KVM_CAP_ADJUST_CLOCK: 4350 r = KVM_CLOCK_VALID_FLAGS; 4351 break; 4352 case KVM_CAP_X86_DISABLE_EXITS: 4353 r |= KVM_X86_DISABLE_EXITS_HLT | KVM_X86_DISABLE_EXITS_PAUSE | 4354 KVM_X86_DISABLE_EXITS_CSTATE; 4355 if(kvm_can_mwait_in_guest()) 4356 r |= KVM_X86_DISABLE_EXITS_MWAIT; 4357 break; 4358 case KVM_CAP_X86_SMM: 4359 /* SMBASE is usually relocated above 1M on modern chipsets, 4360 * and SMM handlers might indeed rely on 4G segment limits, 4361 * so do not report SMM to be available if real mode is 4362 * emulated via vm86 mode. Still, do not go to great lengths 4363 * to avoid userspace's usage of the feature, because it is a 4364 * fringe case that is not enabled except via specific settings 4365 * of the module parameters. 4366 */ 4367 r = static_call(kvm_x86_has_emulated_msr)(kvm, MSR_IA32_SMBASE); 4368 break; 4369 case KVM_CAP_NR_VCPUS: 4370 r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS); 4371 break; 4372 case KVM_CAP_MAX_VCPUS: 4373 r = KVM_MAX_VCPUS; 4374 break; 4375 case KVM_CAP_MAX_VCPU_ID: 4376 r = KVM_MAX_VCPU_IDS; 4377 break; 4378 case KVM_CAP_PV_MMU: /* obsolete */ 4379 r = 0; 4380 break; 4381 case KVM_CAP_MCE: 4382 r = KVM_MAX_MCE_BANKS; 4383 break; 4384 case KVM_CAP_XCRS: 4385 r = boot_cpu_has(X86_FEATURE_XSAVE); 4386 break; 4387 case KVM_CAP_TSC_CONTROL: 4388 case KVM_CAP_VM_TSC_CONTROL: 4389 r = kvm_caps.has_tsc_control; 4390 break; 4391 case KVM_CAP_X2APIC_API: 4392 r = KVM_X2APIC_API_VALID_FLAGS; 4393 break; 4394 case KVM_CAP_NESTED_STATE: 4395 r = kvm_x86_ops.nested_ops->get_state ? 4396 kvm_x86_ops.nested_ops->get_state(NULL, NULL, 0) : 0; 4397 break; 4398 case KVM_CAP_HYPERV_DIRECT_TLBFLUSH: 4399 r = kvm_x86_ops.enable_direct_tlbflush != NULL; 4400 break; 4401 case KVM_CAP_HYPERV_ENLIGHTENED_VMCS: 4402 r = kvm_x86_ops.nested_ops->enable_evmcs != NULL; 4403 break; 4404 case KVM_CAP_SMALLER_MAXPHYADDR: 4405 r = (int) allow_smaller_maxphyaddr; 4406 break; 4407 case KVM_CAP_STEAL_TIME: 4408 r = sched_info_on(); 4409 break; 4410 case KVM_CAP_X86_BUS_LOCK_EXIT: 4411 if (kvm_caps.has_bus_lock_exit) 4412 r = KVM_BUS_LOCK_DETECTION_OFF | 4413 KVM_BUS_LOCK_DETECTION_EXIT; 4414 else 4415 r = 0; 4416 break; 4417 case KVM_CAP_XSAVE2: { 4418 u64 guest_perm = xstate_get_guest_group_perm(); 4419 4420 r = xstate_required_size(kvm_caps.supported_xcr0 & guest_perm, false); 4421 if (r < sizeof(struct kvm_xsave)) 4422 r = sizeof(struct kvm_xsave); 4423 break; 4424 } 4425 case KVM_CAP_PMU_CAPABILITY: 4426 r = enable_pmu ? KVM_CAP_PMU_VALID_MASK : 0; 4427 break; 4428 case KVM_CAP_DISABLE_QUIRKS2: 4429 r = KVM_X86_VALID_QUIRKS; 4430 break; 4431 case KVM_CAP_X86_NOTIFY_VMEXIT: 4432 r = kvm_caps.has_notify_vmexit; 4433 break; 4434 default: 4435 break; 4436 } 4437 return r; 4438 } 4439 4440 static inline void __user *kvm_get_attr_addr(struct kvm_device_attr *attr) 4441 { 4442 void __user *uaddr = (void __user*)(unsigned long)attr->addr; 4443 4444 if ((u64)(unsigned long)uaddr != attr->addr) 4445 return ERR_PTR_USR(-EFAULT); 4446 return uaddr; 4447 } 4448 4449 static int kvm_x86_dev_get_attr(struct kvm_device_attr *attr) 4450 { 4451 u64 __user *uaddr = kvm_get_attr_addr(attr); 4452 4453 if (attr->group) 4454 return -ENXIO; 4455 4456 if (IS_ERR(uaddr)) 4457 return PTR_ERR(uaddr); 4458 4459 switch (attr->attr) { 4460 case KVM_X86_XCOMP_GUEST_SUPP: 4461 if (put_user(kvm_caps.supported_xcr0, uaddr)) 4462 return -EFAULT; 4463 return 0; 4464 default: 4465 return -ENXIO; 4466 break; 4467 } 4468 } 4469 4470 static int kvm_x86_dev_has_attr(struct kvm_device_attr *attr) 4471 { 4472 if (attr->group) 4473 return -ENXIO; 4474 4475 switch (attr->attr) { 4476 case KVM_X86_XCOMP_GUEST_SUPP: 4477 return 0; 4478 default: 4479 return -ENXIO; 4480 } 4481 } 4482 4483 long kvm_arch_dev_ioctl(struct file *filp, 4484 unsigned int ioctl, unsigned long arg) 4485 { 4486 void __user *argp = (void __user *)arg; 4487 long r; 4488 4489 switch (ioctl) { 4490 case KVM_GET_MSR_INDEX_LIST: { 4491 struct kvm_msr_list __user *user_msr_list = argp; 4492 struct kvm_msr_list msr_list; 4493 unsigned n; 4494 4495 r = -EFAULT; 4496 if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list))) 4497 goto out; 4498 n = msr_list.nmsrs; 4499 msr_list.nmsrs = num_msrs_to_save + num_emulated_msrs; 4500 if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list))) 4501 goto out; 4502 r = -E2BIG; 4503 if (n < msr_list.nmsrs) 4504 goto out; 4505 r = -EFAULT; 4506 if (copy_to_user(user_msr_list->indices, &msrs_to_save, 4507 num_msrs_to_save * sizeof(u32))) 4508 goto out; 4509 if (copy_to_user(user_msr_list->indices + num_msrs_to_save, 4510 &emulated_msrs, 4511 num_emulated_msrs * sizeof(u32))) 4512 goto out; 4513 r = 0; 4514 break; 4515 } 4516 case KVM_GET_SUPPORTED_CPUID: 4517 case KVM_GET_EMULATED_CPUID: { 4518 struct kvm_cpuid2 __user *cpuid_arg = argp; 4519 struct kvm_cpuid2 cpuid; 4520 4521 r = -EFAULT; 4522 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) 4523 goto out; 4524 4525 r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries, 4526 ioctl); 4527 if (r) 4528 goto out; 4529 4530 r = -EFAULT; 4531 if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid))) 4532 goto out; 4533 r = 0; 4534 break; 4535 } 4536 case KVM_X86_GET_MCE_CAP_SUPPORTED: 4537 r = -EFAULT; 4538 if (copy_to_user(argp, &kvm_caps.supported_mce_cap, 4539 sizeof(kvm_caps.supported_mce_cap))) 4540 goto out; 4541 r = 0; 4542 break; 4543 case KVM_GET_MSR_FEATURE_INDEX_LIST: { 4544 struct kvm_msr_list __user *user_msr_list = argp; 4545 struct kvm_msr_list msr_list; 4546 unsigned int n; 4547 4548 r = -EFAULT; 4549 if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list))) 4550 goto out; 4551 n = msr_list.nmsrs; 4552 msr_list.nmsrs = num_msr_based_features; 4553 if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list))) 4554 goto out; 4555 r = -E2BIG; 4556 if (n < msr_list.nmsrs) 4557 goto out; 4558 r = -EFAULT; 4559 if (copy_to_user(user_msr_list->indices, &msr_based_features, 4560 num_msr_based_features * sizeof(u32))) 4561 goto out; 4562 r = 0; 4563 break; 4564 } 4565 case KVM_GET_MSRS: 4566 r = msr_io(NULL, argp, do_get_msr_feature, 1); 4567 break; 4568 case KVM_GET_SUPPORTED_HV_CPUID: 4569 r = kvm_ioctl_get_supported_hv_cpuid(NULL, argp); 4570 break; 4571 case KVM_GET_DEVICE_ATTR: { 4572 struct kvm_device_attr attr; 4573 r = -EFAULT; 4574 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 4575 break; 4576 r = kvm_x86_dev_get_attr(&attr); 4577 break; 4578 } 4579 case KVM_HAS_DEVICE_ATTR: { 4580 struct kvm_device_attr attr; 4581 r = -EFAULT; 4582 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 4583 break; 4584 r = kvm_x86_dev_has_attr(&attr); 4585 break; 4586 } 4587 default: 4588 r = -EINVAL; 4589 break; 4590 } 4591 out: 4592 return r; 4593 } 4594 4595 static void wbinvd_ipi(void *garbage) 4596 { 4597 wbinvd(); 4598 } 4599 4600 static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu) 4601 { 4602 return kvm_arch_has_noncoherent_dma(vcpu->kvm); 4603 } 4604 4605 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 4606 { 4607 /* Address WBINVD may be executed by guest */ 4608 if (need_emulate_wbinvd(vcpu)) { 4609 if (static_call(kvm_x86_has_wbinvd_exit)()) 4610 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); 4611 else if (vcpu->cpu != -1 && vcpu->cpu != cpu) 4612 smp_call_function_single(vcpu->cpu, 4613 wbinvd_ipi, NULL, 1); 4614 } 4615 4616 static_call(kvm_x86_vcpu_load)(vcpu, cpu); 4617 4618 /* Save host pkru register if supported */ 4619 vcpu->arch.host_pkru = read_pkru(); 4620 4621 /* Apply any externally detected TSC adjustments (due to suspend) */ 4622 if (unlikely(vcpu->arch.tsc_offset_adjustment)) { 4623 adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment); 4624 vcpu->arch.tsc_offset_adjustment = 0; 4625 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 4626 } 4627 4628 if (unlikely(vcpu->cpu != cpu) || kvm_check_tsc_unstable()) { 4629 s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 : 4630 rdtsc() - vcpu->arch.last_host_tsc; 4631 if (tsc_delta < 0) 4632 mark_tsc_unstable("KVM discovered backwards TSC"); 4633 4634 if (kvm_check_tsc_unstable()) { 4635 u64 offset = kvm_compute_l1_tsc_offset(vcpu, 4636 vcpu->arch.last_guest_tsc); 4637 kvm_vcpu_write_tsc_offset(vcpu, offset); 4638 vcpu->arch.tsc_catchup = 1; 4639 } 4640 4641 if (kvm_lapic_hv_timer_in_use(vcpu)) 4642 kvm_lapic_restart_hv_timer(vcpu); 4643 4644 /* 4645 * On a host with synchronized TSC, there is no need to update 4646 * kvmclock on vcpu->cpu migration 4647 */ 4648 if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1) 4649 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu); 4650 if (vcpu->cpu != cpu) 4651 kvm_make_request(KVM_REQ_MIGRATE_TIMER, vcpu); 4652 vcpu->cpu = cpu; 4653 } 4654 4655 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); 4656 } 4657 4658 static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) 4659 { 4660 struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache; 4661 struct kvm_steal_time __user *st; 4662 struct kvm_memslots *slots; 4663 static const u8 preempted = KVM_VCPU_PREEMPTED; 4664 4665 /* 4666 * The vCPU can be marked preempted if and only if the VM-Exit was on 4667 * an instruction boundary and will not trigger guest emulation of any 4668 * kind (see vcpu_run). Vendor specific code controls (conservatively) 4669 * when this is true, for example allowing the vCPU to be marked 4670 * preempted if and only if the VM-Exit was due to a host interrupt. 4671 */ 4672 if (!vcpu->arch.at_instruction_boundary) { 4673 vcpu->stat.preemption_other++; 4674 return; 4675 } 4676 4677 vcpu->stat.preemption_reported++; 4678 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) 4679 return; 4680 4681 if (vcpu->arch.st.preempted) 4682 return; 4683 4684 /* This happens on process exit */ 4685 if (unlikely(current->mm != vcpu->kvm->mm)) 4686 return; 4687 4688 slots = kvm_memslots(vcpu->kvm); 4689 4690 if (unlikely(slots->generation != ghc->generation || 4691 kvm_is_error_hva(ghc->hva) || !ghc->memslot)) 4692 return; 4693 4694 st = (struct kvm_steal_time __user *)ghc->hva; 4695 BUILD_BUG_ON(sizeof(st->preempted) != sizeof(preempted)); 4696 4697 if (!copy_to_user_nofault(&st->preempted, &preempted, sizeof(preempted))) 4698 vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED; 4699 4700 mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa)); 4701 } 4702 4703 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 4704 { 4705 int idx; 4706 4707 if (vcpu->preempted) { 4708 if (!vcpu->arch.guest_state_protected) 4709 vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu); 4710 4711 /* 4712 * Take the srcu lock as memslots will be accessed to check the gfn 4713 * cache generation against the memslots generation. 4714 */ 4715 idx = srcu_read_lock(&vcpu->kvm->srcu); 4716 if (kvm_xen_msr_enabled(vcpu->kvm)) 4717 kvm_xen_runstate_set_preempted(vcpu); 4718 else 4719 kvm_steal_time_set_preempted(vcpu); 4720 srcu_read_unlock(&vcpu->kvm->srcu, idx); 4721 } 4722 4723 static_call(kvm_x86_vcpu_put)(vcpu); 4724 vcpu->arch.last_host_tsc = rdtsc(); 4725 } 4726 4727 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, 4728 struct kvm_lapic_state *s) 4729 { 4730 static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); 4731 4732 return kvm_apic_get_state(vcpu, s); 4733 } 4734 4735 static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu, 4736 struct kvm_lapic_state *s) 4737 { 4738 int r; 4739 4740 r = kvm_apic_set_state(vcpu, s); 4741 if (r) 4742 return r; 4743 update_cr8_intercept(vcpu); 4744 4745 return 0; 4746 } 4747 4748 static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu) 4749 { 4750 /* 4751 * We can accept userspace's request for interrupt injection 4752 * as long as we have a place to store the interrupt number. 4753 * The actual injection will happen when the CPU is able to 4754 * deliver the interrupt. 4755 */ 4756 if (kvm_cpu_has_extint(vcpu)) 4757 return false; 4758 4759 /* Acknowledging ExtINT does not happen if LINT0 is masked. */ 4760 return (!lapic_in_kernel(vcpu) || 4761 kvm_apic_accept_pic_intr(vcpu)); 4762 } 4763 4764 static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu) 4765 { 4766 /* 4767 * Do not cause an interrupt window exit if an exception 4768 * is pending or an event needs reinjection; userspace 4769 * might want to inject the interrupt manually using KVM_SET_REGS 4770 * or KVM_SET_SREGS. For that to work, we must be at an 4771 * instruction boundary and with no events half-injected. 4772 */ 4773 return (kvm_arch_interrupt_allowed(vcpu) && 4774 kvm_cpu_accept_dm_intr(vcpu) && 4775 !kvm_event_needs_reinjection(vcpu) && 4776 !vcpu->arch.exception.pending); 4777 } 4778 4779 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, 4780 struct kvm_interrupt *irq) 4781 { 4782 if (irq->irq >= KVM_NR_INTERRUPTS) 4783 return -EINVAL; 4784 4785 if (!irqchip_in_kernel(vcpu->kvm)) { 4786 kvm_queue_interrupt(vcpu, irq->irq, false); 4787 kvm_make_request(KVM_REQ_EVENT, vcpu); 4788 return 0; 4789 } 4790 4791 /* 4792 * With in-kernel LAPIC, we only use this to inject EXTINT, so 4793 * fail for in-kernel 8259. 4794 */ 4795 if (pic_in_kernel(vcpu->kvm)) 4796 return -ENXIO; 4797 4798 if (vcpu->arch.pending_external_vector != -1) 4799 return -EEXIST; 4800 4801 vcpu->arch.pending_external_vector = irq->irq; 4802 kvm_make_request(KVM_REQ_EVENT, vcpu); 4803 return 0; 4804 } 4805 4806 static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu) 4807 { 4808 kvm_inject_nmi(vcpu); 4809 4810 return 0; 4811 } 4812 4813 static int kvm_vcpu_ioctl_smi(struct kvm_vcpu *vcpu) 4814 { 4815 kvm_make_request(KVM_REQ_SMI, vcpu); 4816 4817 return 0; 4818 } 4819 4820 static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu, 4821 struct kvm_tpr_access_ctl *tac) 4822 { 4823 if (tac->flags) 4824 return -EINVAL; 4825 vcpu->arch.tpr_access_reporting = !!tac->enabled; 4826 return 0; 4827 } 4828 4829 static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu, 4830 u64 mcg_cap) 4831 { 4832 int r; 4833 unsigned bank_num = mcg_cap & 0xff, bank; 4834 4835 r = -EINVAL; 4836 if (!bank_num || bank_num > KVM_MAX_MCE_BANKS) 4837 goto out; 4838 if (mcg_cap & ~(kvm_caps.supported_mce_cap | 0xff | 0xff0000)) 4839 goto out; 4840 r = 0; 4841 vcpu->arch.mcg_cap = mcg_cap; 4842 /* Init IA32_MCG_CTL to all 1s */ 4843 if (mcg_cap & MCG_CTL_P) 4844 vcpu->arch.mcg_ctl = ~(u64)0; 4845 /* Init IA32_MCi_CTL to all 1s */ 4846 for (bank = 0; bank < bank_num; bank++) 4847 vcpu->arch.mce_banks[bank*4] = ~(u64)0; 4848 4849 static_call(kvm_x86_setup_mce)(vcpu); 4850 out: 4851 return r; 4852 } 4853 4854 static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu, 4855 struct kvm_x86_mce *mce) 4856 { 4857 u64 mcg_cap = vcpu->arch.mcg_cap; 4858 unsigned bank_num = mcg_cap & 0xff; 4859 u64 *banks = vcpu->arch.mce_banks; 4860 4861 if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL)) 4862 return -EINVAL; 4863 /* 4864 * if IA32_MCG_CTL is not all 1s, the uncorrected error 4865 * reporting is disabled 4866 */ 4867 if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) && 4868 vcpu->arch.mcg_ctl != ~(u64)0) 4869 return 0; 4870 banks += 4 * mce->bank; 4871 /* 4872 * if IA32_MCi_CTL is not all 1s, the uncorrected error 4873 * reporting is disabled for the bank 4874 */ 4875 if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0) 4876 return 0; 4877 if (mce->status & MCI_STATUS_UC) { 4878 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) || 4879 !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) { 4880 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 4881 return 0; 4882 } 4883 if (banks[1] & MCI_STATUS_VAL) 4884 mce->status |= MCI_STATUS_OVER; 4885 banks[2] = mce->addr; 4886 banks[3] = mce->misc; 4887 vcpu->arch.mcg_status = mce->mcg_status; 4888 banks[1] = mce->status; 4889 kvm_queue_exception(vcpu, MC_VECTOR); 4890 } else if (!(banks[1] & MCI_STATUS_VAL) 4891 || !(banks[1] & MCI_STATUS_UC)) { 4892 if (banks[1] & MCI_STATUS_VAL) 4893 mce->status |= MCI_STATUS_OVER; 4894 banks[2] = mce->addr; 4895 banks[3] = mce->misc; 4896 banks[1] = mce->status; 4897 } else 4898 banks[1] |= MCI_STATUS_OVER; 4899 return 0; 4900 } 4901 4902 static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, 4903 struct kvm_vcpu_events *events) 4904 { 4905 process_nmi(vcpu); 4906 4907 if (kvm_check_request(KVM_REQ_SMI, vcpu)) 4908 process_smi(vcpu); 4909 4910 /* 4911 * In guest mode, payload delivery should be deferred, 4912 * so that the L1 hypervisor can intercept #PF before 4913 * CR2 is modified (or intercept #DB before DR6 is 4914 * modified under nVMX). Unless the per-VM capability, 4915 * KVM_CAP_EXCEPTION_PAYLOAD, is set, we may not defer the delivery of 4916 * an exception payload and handle after a KVM_GET_VCPU_EVENTS. Since we 4917 * opportunistically defer the exception payload, deliver it if the 4918 * capability hasn't been requested before processing a 4919 * KVM_GET_VCPU_EVENTS. 4920 */ 4921 if (!vcpu->kvm->arch.exception_payload_enabled && 4922 vcpu->arch.exception.pending && vcpu->arch.exception.has_payload) 4923 kvm_deliver_exception_payload(vcpu); 4924 4925 /* 4926 * The API doesn't provide the instruction length for software 4927 * exceptions, so don't report them. As long as the guest RIP 4928 * isn't advanced, we should expect to encounter the exception 4929 * again. 4930 */ 4931 if (kvm_exception_is_soft(vcpu->arch.exception.nr)) { 4932 events->exception.injected = 0; 4933 events->exception.pending = 0; 4934 } else { 4935 events->exception.injected = vcpu->arch.exception.injected; 4936 events->exception.pending = vcpu->arch.exception.pending; 4937 /* 4938 * For ABI compatibility, deliberately conflate 4939 * pending and injected exceptions when 4940 * KVM_CAP_EXCEPTION_PAYLOAD isn't enabled. 4941 */ 4942 if (!vcpu->kvm->arch.exception_payload_enabled) 4943 events->exception.injected |= 4944 vcpu->arch.exception.pending; 4945 } 4946 events->exception.nr = vcpu->arch.exception.nr; 4947 events->exception.has_error_code = vcpu->arch.exception.has_error_code; 4948 events->exception.error_code = vcpu->arch.exception.error_code; 4949 events->exception_has_payload = vcpu->arch.exception.has_payload; 4950 events->exception_payload = vcpu->arch.exception.payload; 4951 4952 events->interrupt.injected = 4953 vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft; 4954 events->interrupt.nr = vcpu->arch.interrupt.nr; 4955 events->interrupt.soft = 0; 4956 events->interrupt.shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu); 4957 4958 events->nmi.injected = vcpu->arch.nmi_injected; 4959 events->nmi.pending = vcpu->arch.nmi_pending != 0; 4960 events->nmi.masked = static_call(kvm_x86_get_nmi_mask)(vcpu); 4961 events->nmi.pad = 0; 4962 4963 events->sipi_vector = 0; /* never valid when reporting to user space */ 4964 4965 events->smi.smm = is_smm(vcpu); 4966 events->smi.pending = vcpu->arch.smi_pending; 4967 events->smi.smm_inside_nmi = 4968 !!(vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK); 4969 events->smi.latched_init = kvm_lapic_latched_init(vcpu); 4970 4971 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING 4972 | KVM_VCPUEVENT_VALID_SHADOW 4973 | KVM_VCPUEVENT_VALID_SMM); 4974 if (vcpu->kvm->arch.exception_payload_enabled) 4975 events->flags |= KVM_VCPUEVENT_VALID_PAYLOAD; 4976 if (vcpu->kvm->arch.triple_fault_event) { 4977 events->triple_fault.pending = kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu); 4978 events->flags |= KVM_VCPUEVENT_VALID_TRIPLE_FAULT; 4979 } 4980 4981 memset(&events->reserved, 0, sizeof(events->reserved)); 4982 } 4983 4984 static void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm); 4985 4986 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, 4987 struct kvm_vcpu_events *events) 4988 { 4989 if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING 4990 | KVM_VCPUEVENT_VALID_SIPI_VECTOR 4991 | KVM_VCPUEVENT_VALID_SHADOW 4992 | KVM_VCPUEVENT_VALID_SMM 4993 | KVM_VCPUEVENT_VALID_PAYLOAD 4994 | KVM_VCPUEVENT_VALID_TRIPLE_FAULT)) 4995 return -EINVAL; 4996 4997 if (events->flags & KVM_VCPUEVENT_VALID_PAYLOAD) { 4998 if (!vcpu->kvm->arch.exception_payload_enabled) 4999 return -EINVAL; 5000 if (events->exception.pending) 5001 events->exception.injected = 0; 5002 else 5003 events->exception_has_payload = 0; 5004 } else { 5005 events->exception.pending = 0; 5006 events->exception_has_payload = 0; 5007 } 5008 5009 if ((events->exception.injected || events->exception.pending) && 5010 (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR)) 5011 return -EINVAL; 5012 5013 /* INITs are latched while in SMM */ 5014 if (events->flags & KVM_VCPUEVENT_VALID_SMM && 5015 (events->smi.smm || events->smi.pending) && 5016 vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) 5017 return -EINVAL; 5018 5019 process_nmi(vcpu); 5020 vcpu->arch.exception.injected = events->exception.injected; 5021 vcpu->arch.exception.pending = events->exception.pending; 5022 vcpu->arch.exception.nr = events->exception.nr; 5023 vcpu->arch.exception.has_error_code = events->exception.has_error_code; 5024 vcpu->arch.exception.error_code = events->exception.error_code; 5025 vcpu->arch.exception.has_payload = events->exception_has_payload; 5026 vcpu->arch.exception.payload = events->exception_payload; 5027 5028 vcpu->arch.interrupt.injected = events->interrupt.injected; 5029 vcpu->arch.interrupt.nr = events->interrupt.nr; 5030 vcpu->arch.interrupt.soft = events->interrupt.soft; 5031 if (events->flags & KVM_VCPUEVENT_VALID_SHADOW) 5032 static_call(kvm_x86_set_interrupt_shadow)(vcpu, 5033 events->interrupt.shadow); 5034 5035 vcpu->arch.nmi_injected = events->nmi.injected; 5036 if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) 5037 vcpu->arch.nmi_pending = events->nmi.pending; 5038 static_call(kvm_x86_set_nmi_mask)(vcpu, events->nmi.masked); 5039 5040 if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR && 5041 lapic_in_kernel(vcpu)) 5042 vcpu->arch.apic->sipi_vector = events->sipi_vector; 5043 5044 if (events->flags & KVM_VCPUEVENT_VALID_SMM) { 5045 if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) { 5046 kvm_x86_ops.nested_ops->leave_nested(vcpu); 5047 kvm_smm_changed(vcpu, events->smi.smm); 5048 } 5049 5050 vcpu->arch.smi_pending = events->smi.pending; 5051 5052 if (events->smi.smm) { 5053 if (events->smi.smm_inside_nmi) 5054 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; 5055 else 5056 vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK; 5057 } 5058 5059 if (lapic_in_kernel(vcpu)) { 5060 if (events->smi.latched_init) 5061 set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); 5062 else 5063 clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); 5064 } 5065 } 5066 5067 if (events->flags & KVM_VCPUEVENT_VALID_TRIPLE_FAULT) { 5068 if (!vcpu->kvm->arch.triple_fault_event) 5069 return -EINVAL; 5070 if (events->triple_fault.pending) 5071 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 5072 else 5073 kvm_clear_request(KVM_REQ_TRIPLE_FAULT, vcpu); 5074 } 5075 5076 kvm_make_request(KVM_REQ_EVENT, vcpu); 5077 5078 return 0; 5079 } 5080 5081 static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu, 5082 struct kvm_debugregs *dbgregs) 5083 { 5084 unsigned long val; 5085 5086 memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db)); 5087 kvm_get_dr(vcpu, 6, &val); 5088 dbgregs->dr6 = val; 5089 dbgregs->dr7 = vcpu->arch.dr7; 5090 dbgregs->flags = 0; 5091 memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved)); 5092 } 5093 5094 static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, 5095 struct kvm_debugregs *dbgregs) 5096 { 5097 if (dbgregs->flags) 5098 return -EINVAL; 5099 5100 if (!kvm_dr6_valid(dbgregs->dr6)) 5101 return -EINVAL; 5102 if (!kvm_dr7_valid(dbgregs->dr7)) 5103 return -EINVAL; 5104 5105 memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); 5106 kvm_update_dr0123(vcpu); 5107 vcpu->arch.dr6 = dbgregs->dr6; 5108 vcpu->arch.dr7 = dbgregs->dr7; 5109 kvm_update_dr7(vcpu); 5110 5111 return 0; 5112 } 5113 5114 static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, 5115 struct kvm_xsave *guest_xsave) 5116 { 5117 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) 5118 return; 5119 5120 fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu, 5121 guest_xsave->region, 5122 sizeof(guest_xsave->region), 5123 vcpu->arch.pkru); 5124 } 5125 5126 static void kvm_vcpu_ioctl_x86_get_xsave2(struct kvm_vcpu *vcpu, 5127 u8 *state, unsigned int size) 5128 { 5129 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) 5130 return; 5131 5132 fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu, 5133 state, size, vcpu->arch.pkru); 5134 } 5135 5136 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, 5137 struct kvm_xsave *guest_xsave) 5138 { 5139 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) 5140 return 0; 5141 5142 return fpu_copy_uabi_to_guest_fpstate(&vcpu->arch.guest_fpu, 5143 guest_xsave->region, 5144 kvm_caps.supported_xcr0, 5145 &vcpu->arch.pkru); 5146 } 5147 5148 static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu, 5149 struct kvm_xcrs *guest_xcrs) 5150 { 5151 if (!boot_cpu_has(X86_FEATURE_XSAVE)) { 5152 guest_xcrs->nr_xcrs = 0; 5153 return; 5154 } 5155 5156 guest_xcrs->nr_xcrs = 1; 5157 guest_xcrs->flags = 0; 5158 guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK; 5159 guest_xcrs->xcrs[0].value = vcpu->arch.xcr0; 5160 } 5161 5162 static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu, 5163 struct kvm_xcrs *guest_xcrs) 5164 { 5165 int i, r = 0; 5166 5167 if (!boot_cpu_has(X86_FEATURE_XSAVE)) 5168 return -EINVAL; 5169 5170 if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags) 5171 return -EINVAL; 5172 5173 for (i = 0; i < guest_xcrs->nr_xcrs; i++) 5174 /* Only support XCR0 currently */ 5175 if (guest_xcrs->xcrs[i].xcr == XCR_XFEATURE_ENABLED_MASK) { 5176 r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK, 5177 guest_xcrs->xcrs[i].value); 5178 break; 5179 } 5180 if (r) 5181 r = -EINVAL; 5182 return r; 5183 } 5184 5185 /* 5186 * kvm_set_guest_paused() indicates to the guest kernel that it has been 5187 * stopped by the hypervisor. This function will be called from the host only. 5188 * EINVAL is returned when the host attempts to set the flag for a guest that 5189 * does not support pv clocks. 5190 */ 5191 static int kvm_set_guest_paused(struct kvm_vcpu *vcpu) 5192 { 5193 if (!vcpu->arch.pv_time.active) 5194 return -EINVAL; 5195 vcpu->arch.pvclock_set_guest_stopped_request = true; 5196 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 5197 return 0; 5198 } 5199 5200 static int kvm_arch_tsc_has_attr(struct kvm_vcpu *vcpu, 5201 struct kvm_device_attr *attr) 5202 { 5203 int r; 5204 5205 switch (attr->attr) { 5206 case KVM_VCPU_TSC_OFFSET: 5207 r = 0; 5208 break; 5209 default: 5210 r = -ENXIO; 5211 } 5212 5213 return r; 5214 } 5215 5216 static int kvm_arch_tsc_get_attr(struct kvm_vcpu *vcpu, 5217 struct kvm_device_attr *attr) 5218 { 5219 u64 __user *uaddr = kvm_get_attr_addr(attr); 5220 int r; 5221 5222 if (IS_ERR(uaddr)) 5223 return PTR_ERR(uaddr); 5224 5225 switch (attr->attr) { 5226 case KVM_VCPU_TSC_OFFSET: 5227 r = -EFAULT; 5228 if (put_user(vcpu->arch.l1_tsc_offset, uaddr)) 5229 break; 5230 r = 0; 5231 break; 5232 default: 5233 r = -ENXIO; 5234 } 5235 5236 return r; 5237 } 5238 5239 static int kvm_arch_tsc_set_attr(struct kvm_vcpu *vcpu, 5240 struct kvm_device_attr *attr) 5241 { 5242 u64 __user *uaddr = kvm_get_attr_addr(attr); 5243 struct kvm *kvm = vcpu->kvm; 5244 int r; 5245 5246 if (IS_ERR(uaddr)) 5247 return PTR_ERR(uaddr); 5248 5249 switch (attr->attr) { 5250 case KVM_VCPU_TSC_OFFSET: { 5251 u64 offset, tsc, ns; 5252 unsigned long flags; 5253 bool matched; 5254 5255 r = -EFAULT; 5256 if (get_user(offset, uaddr)) 5257 break; 5258 5259 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); 5260 5261 matched = (vcpu->arch.virtual_tsc_khz && 5262 kvm->arch.last_tsc_khz == vcpu->arch.virtual_tsc_khz && 5263 kvm->arch.last_tsc_offset == offset); 5264 5265 tsc = kvm_scale_tsc(rdtsc(), vcpu->arch.l1_tsc_scaling_ratio) + offset; 5266 ns = get_kvmclock_base_ns(); 5267 5268 __kvm_synchronize_tsc(vcpu, offset, tsc, ns, matched); 5269 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); 5270 5271 r = 0; 5272 break; 5273 } 5274 default: 5275 r = -ENXIO; 5276 } 5277 5278 return r; 5279 } 5280 5281 static int kvm_vcpu_ioctl_device_attr(struct kvm_vcpu *vcpu, 5282 unsigned int ioctl, 5283 void __user *argp) 5284 { 5285 struct kvm_device_attr attr; 5286 int r; 5287 5288 if (copy_from_user(&attr, argp, sizeof(attr))) 5289 return -EFAULT; 5290 5291 if (attr.group != KVM_VCPU_TSC_CTRL) 5292 return -ENXIO; 5293 5294 switch (ioctl) { 5295 case KVM_HAS_DEVICE_ATTR: 5296 r = kvm_arch_tsc_has_attr(vcpu, &attr); 5297 break; 5298 case KVM_GET_DEVICE_ATTR: 5299 r = kvm_arch_tsc_get_attr(vcpu, &attr); 5300 break; 5301 case KVM_SET_DEVICE_ATTR: 5302 r = kvm_arch_tsc_set_attr(vcpu, &attr); 5303 break; 5304 } 5305 5306 return r; 5307 } 5308 5309 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, 5310 struct kvm_enable_cap *cap) 5311 { 5312 int r; 5313 uint16_t vmcs_version; 5314 void __user *user_ptr; 5315 5316 if (cap->flags) 5317 return -EINVAL; 5318 5319 switch (cap->cap) { 5320 case KVM_CAP_HYPERV_SYNIC2: 5321 if (cap->args[0]) 5322 return -EINVAL; 5323 fallthrough; 5324 5325 case KVM_CAP_HYPERV_SYNIC: 5326 if (!irqchip_in_kernel(vcpu->kvm)) 5327 return -EINVAL; 5328 return kvm_hv_activate_synic(vcpu, cap->cap == 5329 KVM_CAP_HYPERV_SYNIC2); 5330 case KVM_CAP_HYPERV_ENLIGHTENED_VMCS: 5331 if (!kvm_x86_ops.nested_ops->enable_evmcs) 5332 return -ENOTTY; 5333 r = kvm_x86_ops.nested_ops->enable_evmcs(vcpu, &vmcs_version); 5334 if (!r) { 5335 user_ptr = (void __user *)(uintptr_t)cap->args[0]; 5336 if (copy_to_user(user_ptr, &vmcs_version, 5337 sizeof(vmcs_version))) 5338 r = -EFAULT; 5339 } 5340 return r; 5341 case KVM_CAP_HYPERV_DIRECT_TLBFLUSH: 5342 if (!kvm_x86_ops.enable_direct_tlbflush) 5343 return -ENOTTY; 5344 5345 return static_call(kvm_x86_enable_direct_tlbflush)(vcpu); 5346 5347 case KVM_CAP_HYPERV_ENFORCE_CPUID: 5348 return kvm_hv_set_enforce_cpuid(vcpu, cap->args[0]); 5349 5350 case KVM_CAP_ENFORCE_PV_FEATURE_CPUID: 5351 vcpu->arch.pv_cpuid.enforce = cap->args[0]; 5352 if (vcpu->arch.pv_cpuid.enforce) 5353 kvm_update_pv_runtime(vcpu); 5354 5355 return 0; 5356 default: 5357 return -EINVAL; 5358 } 5359 } 5360 5361 long kvm_arch_vcpu_ioctl(struct file *filp, 5362 unsigned int ioctl, unsigned long arg) 5363 { 5364 struct kvm_vcpu *vcpu = filp->private_data; 5365 void __user *argp = (void __user *)arg; 5366 int r; 5367 union { 5368 struct kvm_sregs2 *sregs2; 5369 struct kvm_lapic_state *lapic; 5370 struct kvm_xsave *xsave; 5371 struct kvm_xcrs *xcrs; 5372 void *buffer; 5373 } u; 5374 5375 vcpu_load(vcpu); 5376 5377 u.buffer = NULL; 5378 switch (ioctl) { 5379 case KVM_GET_LAPIC: { 5380 r = -EINVAL; 5381 if (!lapic_in_kernel(vcpu)) 5382 goto out; 5383 u.lapic = kzalloc(sizeof(struct kvm_lapic_state), 5384 GFP_KERNEL_ACCOUNT); 5385 5386 r = -ENOMEM; 5387 if (!u.lapic) 5388 goto out; 5389 r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic); 5390 if (r) 5391 goto out; 5392 r = -EFAULT; 5393 if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state))) 5394 goto out; 5395 r = 0; 5396 break; 5397 } 5398 case KVM_SET_LAPIC: { 5399 r = -EINVAL; 5400 if (!lapic_in_kernel(vcpu)) 5401 goto out; 5402 u.lapic = memdup_user(argp, sizeof(*u.lapic)); 5403 if (IS_ERR(u.lapic)) { 5404 r = PTR_ERR(u.lapic); 5405 goto out_nofree; 5406 } 5407 5408 r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic); 5409 break; 5410 } 5411 case KVM_INTERRUPT: { 5412 struct kvm_interrupt irq; 5413 5414 r = -EFAULT; 5415 if (copy_from_user(&irq, argp, sizeof(irq))) 5416 goto out; 5417 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); 5418 break; 5419 } 5420 case KVM_NMI: { 5421 r = kvm_vcpu_ioctl_nmi(vcpu); 5422 break; 5423 } 5424 case KVM_SMI: { 5425 r = kvm_vcpu_ioctl_smi(vcpu); 5426 break; 5427 } 5428 case KVM_SET_CPUID: { 5429 struct kvm_cpuid __user *cpuid_arg = argp; 5430 struct kvm_cpuid cpuid; 5431 5432 r = -EFAULT; 5433 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) 5434 goto out; 5435 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries); 5436 break; 5437 } 5438 case KVM_SET_CPUID2: { 5439 struct kvm_cpuid2 __user *cpuid_arg = argp; 5440 struct kvm_cpuid2 cpuid; 5441 5442 r = -EFAULT; 5443 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) 5444 goto out; 5445 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid, 5446 cpuid_arg->entries); 5447 break; 5448 } 5449 case KVM_GET_CPUID2: { 5450 struct kvm_cpuid2 __user *cpuid_arg = argp; 5451 struct kvm_cpuid2 cpuid; 5452 5453 r = -EFAULT; 5454 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) 5455 goto out; 5456 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid, 5457 cpuid_arg->entries); 5458 if (r) 5459 goto out; 5460 r = -EFAULT; 5461 if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid))) 5462 goto out; 5463 r = 0; 5464 break; 5465 } 5466 case KVM_GET_MSRS: { 5467 int idx = srcu_read_lock(&vcpu->kvm->srcu); 5468 r = msr_io(vcpu, argp, do_get_msr, 1); 5469 srcu_read_unlock(&vcpu->kvm->srcu, idx); 5470 break; 5471 } 5472 case KVM_SET_MSRS: { 5473 int idx = srcu_read_lock(&vcpu->kvm->srcu); 5474 r = msr_io(vcpu, argp, do_set_msr, 0); 5475 srcu_read_unlock(&vcpu->kvm->srcu, idx); 5476 break; 5477 } 5478 case KVM_TPR_ACCESS_REPORTING: { 5479 struct kvm_tpr_access_ctl tac; 5480 5481 r = -EFAULT; 5482 if (copy_from_user(&tac, argp, sizeof(tac))) 5483 goto out; 5484 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac); 5485 if (r) 5486 goto out; 5487 r = -EFAULT; 5488 if (copy_to_user(argp, &tac, sizeof(tac))) 5489 goto out; 5490 r = 0; 5491 break; 5492 }; 5493 case KVM_SET_VAPIC_ADDR: { 5494 struct kvm_vapic_addr va; 5495 int idx; 5496 5497 r = -EINVAL; 5498 if (!lapic_in_kernel(vcpu)) 5499 goto out; 5500 r = -EFAULT; 5501 if (copy_from_user(&va, argp, sizeof(va))) 5502 goto out; 5503 idx = srcu_read_lock(&vcpu->kvm->srcu); 5504 r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); 5505 srcu_read_unlock(&vcpu->kvm->srcu, idx); 5506 break; 5507 } 5508 case KVM_X86_SETUP_MCE: { 5509 u64 mcg_cap; 5510 5511 r = -EFAULT; 5512 if (copy_from_user(&mcg_cap, argp, sizeof(mcg_cap))) 5513 goto out; 5514 r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap); 5515 break; 5516 } 5517 case KVM_X86_SET_MCE: { 5518 struct kvm_x86_mce mce; 5519 5520 r = -EFAULT; 5521 if (copy_from_user(&mce, argp, sizeof(mce))) 5522 goto out; 5523 r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce); 5524 break; 5525 } 5526 case KVM_GET_VCPU_EVENTS: { 5527 struct kvm_vcpu_events events; 5528 5529 kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events); 5530 5531 r = -EFAULT; 5532 if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events))) 5533 break; 5534 r = 0; 5535 break; 5536 } 5537 case KVM_SET_VCPU_EVENTS: { 5538 struct kvm_vcpu_events events; 5539 5540 r = -EFAULT; 5541 if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events))) 5542 break; 5543 5544 r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events); 5545 break; 5546 } 5547 case KVM_GET_DEBUGREGS: { 5548 struct kvm_debugregs dbgregs; 5549 5550 kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs); 5551 5552 r = -EFAULT; 5553 if (copy_to_user(argp, &dbgregs, 5554 sizeof(struct kvm_debugregs))) 5555 break; 5556 r = 0; 5557 break; 5558 } 5559 case KVM_SET_DEBUGREGS: { 5560 struct kvm_debugregs dbgregs; 5561 5562 r = -EFAULT; 5563 if (copy_from_user(&dbgregs, argp, 5564 sizeof(struct kvm_debugregs))) 5565 break; 5566 5567 r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs); 5568 break; 5569 } 5570 case KVM_GET_XSAVE: { 5571 r = -EINVAL; 5572 if (vcpu->arch.guest_fpu.uabi_size > sizeof(struct kvm_xsave)) 5573 break; 5574 5575 u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL_ACCOUNT); 5576 r = -ENOMEM; 5577 if (!u.xsave) 5578 break; 5579 5580 kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave); 5581 5582 r = -EFAULT; 5583 if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave))) 5584 break; 5585 r = 0; 5586 break; 5587 } 5588 case KVM_SET_XSAVE: { 5589 int size = vcpu->arch.guest_fpu.uabi_size; 5590 5591 u.xsave = memdup_user(argp, size); 5592 if (IS_ERR(u.xsave)) { 5593 r = PTR_ERR(u.xsave); 5594 goto out_nofree; 5595 } 5596 5597 r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave); 5598 break; 5599 } 5600 5601 case KVM_GET_XSAVE2: { 5602 int size = vcpu->arch.guest_fpu.uabi_size; 5603 5604 u.xsave = kzalloc(size, GFP_KERNEL_ACCOUNT); 5605 r = -ENOMEM; 5606 if (!u.xsave) 5607 break; 5608 5609 kvm_vcpu_ioctl_x86_get_xsave2(vcpu, u.buffer, size); 5610 5611 r = -EFAULT; 5612 if (copy_to_user(argp, u.xsave, size)) 5613 break; 5614 5615 r = 0; 5616 break; 5617 } 5618 5619 case KVM_GET_XCRS: { 5620 u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL_ACCOUNT); 5621 r = -ENOMEM; 5622 if (!u.xcrs) 5623 break; 5624 5625 kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs); 5626 5627 r = -EFAULT; 5628 if (copy_to_user(argp, u.xcrs, 5629 sizeof(struct kvm_xcrs))) 5630 break; 5631 r = 0; 5632 break; 5633 } 5634 case KVM_SET_XCRS: { 5635 u.xcrs = memdup_user(argp, sizeof(*u.xcrs)); 5636 if (IS_ERR(u.xcrs)) { 5637 r = PTR_ERR(u.xcrs); 5638 goto out_nofree; 5639 } 5640 5641 r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs); 5642 break; 5643 } 5644 case KVM_SET_TSC_KHZ: { 5645 u32 user_tsc_khz; 5646 5647 r = -EINVAL; 5648 user_tsc_khz = (u32)arg; 5649 5650 if (kvm_caps.has_tsc_control && 5651 user_tsc_khz >= kvm_caps.max_guest_tsc_khz) 5652 goto out; 5653 5654 if (user_tsc_khz == 0) 5655 user_tsc_khz = tsc_khz; 5656 5657 if (!kvm_set_tsc_khz(vcpu, user_tsc_khz)) 5658 r = 0; 5659 5660 goto out; 5661 } 5662 case KVM_GET_TSC_KHZ: { 5663 r = vcpu->arch.virtual_tsc_khz; 5664 goto out; 5665 } 5666 case KVM_KVMCLOCK_CTRL: { 5667 r = kvm_set_guest_paused(vcpu); 5668 goto out; 5669 } 5670 case KVM_ENABLE_CAP: { 5671 struct kvm_enable_cap cap; 5672 5673 r = -EFAULT; 5674 if (copy_from_user(&cap, argp, sizeof(cap))) 5675 goto out; 5676 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); 5677 break; 5678 } 5679 case KVM_GET_NESTED_STATE: { 5680 struct kvm_nested_state __user *user_kvm_nested_state = argp; 5681 u32 user_data_size; 5682 5683 r = -EINVAL; 5684 if (!kvm_x86_ops.nested_ops->get_state) 5685 break; 5686 5687 BUILD_BUG_ON(sizeof(user_data_size) != sizeof(user_kvm_nested_state->size)); 5688 r = -EFAULT; 5689 if (get_user(user_data_size, &user_kvm_nested_state->size)) 5690 break; 5691 5692 r = kvm_x86_ops.nested_ops->get_state(vcpu, user_kvm_nested_state, 5693 user_data_size); 5694 if (r < 0) 5695 break; 5696 5697 if (r > user_data_size) { 5698 if (put_user(r, &user_kvm_nested_state->size)) 5699 r = -EFAULT; 5700 else 5701 r = -E2BIG; 5702 break; 5703 } 5704 5705 r = 0; 5706 break; 5707 } 5708 case KVM_SET_NESTED_STATE: { 5709 struct kvm_nested_state __user *user_kvm_nested_state = argp; 5710 struct kvm_nested_state kvm_state; 5711 int idx; 5712 5713 r = -EINVAL; 5714 if (!kvm_x86_ops.nested_ops->set_state) 5715 break; 5716 5717 r = -EFAULT; 5718 if (copy_from_user(&kvm_state, user_kvm_nested_state, sizeof(kvm_state))) 5719 break; 5720 5721 r = -EINVAL; 5722 if (kvm_state.size < sizeof(kvm_state)) 5723 break; 5724 5725 if (kvm_state.flags & 5726 ~(KVM_STATE_NESTED_RUN_PENDING | KVM_STATE_NESTED_GUEST_MODE 5727 | KVM_STATE_NESTED_EVMCS | KVM_STATE_NESTED_MTF_PENDING 5728 | KVM_STATE_NESTED_GIF_SET)) 5729 break; 5730 5731 /* nested_run_pending implies guest_mode. */ 5732 if ((kvm_state.flags & KVM_STATE_NESTED_RUN_PENDING) 5733 && !(kvm_state.flags & KVM_STATE_NESTED_GUEST_MODE)) 5734 break; 5735 5736 idx = srcu_read_lock(&vcpu->kvm->srcu); 5737 r = kvm_x86_ops.nested_ops->set_state(vcpu, user_kvm_nested_state, &kvm_state); 5738 srcu_read_unlock(&vcpu->kvm->srcu, idx); 5739 break; 5740 } 5741 case KVM_GET_SUPPORTED_HV_CPUID: 5742 r = kvm_ioctl_get_supported_hv_cpuid(vcpu, argp); 5743 break; 5744 #ifdef CONFIG_KVM_XEN 5745 case KVM_XEN_VCPU_GET_ATTR: { 5746 struct kvm_xen_vcpu_attr xva; 5747 5748 r = -EFAULT; 5749 if (copy_from_user(&xva, argp, sizeof(xva))) 5750 goto out; 5751 r = kvm_xen_vcpu_get_attr(vcpu, &xva); 5752 if (!r && copy_to_user(argp, &xva, sizeof(xva))) 5753 r = -EFAULT; 5754 break; 5755 } 5756 case KVM_XEN_VCPU_SET_ATTR: { 5757 struct kvm_xen_vcpu_attr xva; 5758 5759 r = -EFAULT; 5760 if (copy_from_user(&xva, argp, sizeof(xva))) 5761 goto out; 5762 r = kvm_xen_vcpu_set_attr(vcpu, &xva); 5763 break; 5764 } 5765 #endif 5766 case KVM_GET_SREGS2: { 5767 u.sregs2 = kzalloc(sizeof(struct kvm_sregs2), GFP_KERNEL); 5768 r = -ENOMEM; 5769 if (!u.sregs2) 5770 goto out; 5771 __get_sregs2(vcpu, u.sregs2); 5772 r = -EFAULT; 5773 if (copy_to_user(argp, u.sregs2, sizeof(struct kvm_sregs2))) 5774 goto out; 5775 r = 0; 5776 break; 5777 } 5778 case KVM_SET_SREGS2: { 5779 u.sregs2 = memdup_user(argp, sizeof(struct kvm_sregs2)); 5780 if (IS_ERR(u.sregs2)) { 5781 r = PTR_ERR(u.sregs2); 5782 u.sregs2 = NULL; 5783 goto out; 5784 } 5785 r = __set_sregs2(vcpu, u.sregs2); 5786 break; 5787 } 5788 case KVM_HAS_DEVICE_ATTR: 5789 case KVM_GET_DEVICE_ATTR: 5790 case KVM_SET_DEVICE_ATTR: 5791 r = kvm_vcpu_ioctl_device_attr(vcpu, ioctl, argp); 5792 break; 5793 default: 5794 r = -EINVAL; 5795 } 5796 out: 5797 kfree(u.buffer); 5798 out_nofree: 5799 vcpu_put(vcpu); 5800 return r; 5801 } 5802 5803 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) 5804 { 5805 return VM_FAULT_SIGBUS; 5806 } 5807 5808 static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr) 5809 { 5810 int ret; 5811 5812 if (addr > (unsigned int)(-3 * PAGE_SIZE)) 5813 return -EINVAL; 5814 ret = static_call(kvm_x86_set_tss_addr)(kvm, addr); 5815 return ret; 5816 } 5817 5818 static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm, 5819 u64 ident_addr) 5820 { 5821 return static_call(kvm_x86_set_identity_map_addr)(kvm, ident_addr); 5822 } 5823 5824 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, 5825 unsigned long kvm_nr_mmu_pages) 5826 { 5827 if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES) 5828 return -EINVAL; 5829 5830 mutex_lock(&kvm->slots_lock); 5831 5832 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages); 5833 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; 5834 5835 mutex_unlock(&kvm->slots_lock); 5836 return 0; 5837 } 5838 5839 static unsigned long kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm) 5840 { 5841 return kvm->arch.n_max_mmu_pages; 5842 } 5843 5844 static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) 5845 { 5846 struct kvm_pic *pic = kvm->arch.vpic; 5847 int r; 5848 5849 r = 0; 5850 switch (chip->chip_id) { 5851 case KVM_IRQCHIP_PIC_MASTER: 5852 memcpy(&chip->chip.pic, &pic->pics[0], 5853 sizeof(struct kvm_pic_state)); 5854 break; 5855 case KVM_IRQCHIP_PIC_SLAVE: 5856 memcpy(&chip->chip.pic, &pic->pics[1], 5857 sizeof(struct kvm_pic_state)); 5858 break; 5859 case KVM_IRQCHIP_IOAPIC: 5860 kvm_get_ioapic(kvm, &chip->chip.ioapic); 5861 break; 5862 default: 5863 r = -EINVAL; 5864 break; 5865 } 5866 return r; 5867 } 5868 5869 static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) 5870 { 5871 struct kvm_pic *pic = kvm->arch.vpic; 5872 int r; 5873 5874 r = 0; 5875 switch (chip->chip_id) { 5876 case KVM_IRQCHIP_PIC_MASTER: 5877 spin_lock(&pic->lock); 5878 memcpy(&pic->pics[0], &chip->chip.pic, 5879 sizeof(struct kvm_pic_state)); 5880 spin_unlock(&pic->lock); 5881 break; 5882 case KVM_IRQCHIP_PIC_SLAVE: 5883 spin_lock(&pic->lock); 5884 memcpy(&pic->pics[1], &chip->chip.pic, 5885 sizeof(struct kvm_pic_state)); 5886 spin_unlock(&pic->lock); 5887 break; 5888 case KVM_IRQCHIP_IOAPIC: 5889 kvm_set_ioapic(kvm, &chip->chip.ioapic); 5890 break; 5891 default: 5892 r = -EINVAL; 5893 break; 5894 } 5895 kvm_pic_update_irq(pic); 5896 return r; 5897 } 5898 5899 static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps) 5900 { 5901 struct kvm_kpit_state *kps = &kvm->arch.vpit->pit_state; 5902 5903 BUILD_BUG_ON(sizeof(*ps) != sizeof(kps->channels)); 5904 5905 mutex_lock(&kps->lock); 5906 memcpy(ps, &kps->channels, sizeof(*ps)); 5907 mutex_unlock(&kps->lock); 5908 return 0; 5909 } 5910 5911 static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps) 5912 { 5913 int i; 5914 struct kvm_pit *pit = kvm->arch.vpit; 5915 5916 mutex_lock(&pit->pit_state.lock); 5917 memcpy(&pit->pit_state.channels, ps, sizeof(*ps)); 5918 for (i = 0; i < 3; i++) 5919 kvm_pit_load_count(pit, i, ps->channels[i].count, 0); 5920 mutex_unlock(&pit->pit_state.lock); 5921 return 0; 5922 } 5923 5924 static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) 5925 { 5926 mutex_lock(&kvm->arch.vpit->pit_state.lock); 5927 memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels, 5928 sizeof(ps->channels)); 5929 ps->flags = kvm->arch.vpit->pit_state.flags; 5930 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 5931 memset(&ps->reserved, 0, sizeof(ps->reserved)); 5932 return 0; 5933 } 5934 5935 static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) 5936 { 5937 int start = 0; 5938 int i; 5939 u32 prev_legacy, cur_legacy; 5940 struct kvm_pit *pit = kvm->arch.vpit; 5941 5942 mutex_lock(&pit->pit_state.lock); 5943 prev_legacy = pit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY; 5944 cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY; 5945 if (!prev_legacy && cur_legacy) 5946 start = 1; 5947 memcpy(&pit->pit_state.channels, &ps->channels, 5948 sizeof(pit->pit_state.channels)); 5949 pit->pit_state.flags = ps->flags; 5950 for (i = 0; i < 3; i++) 5951 kvm_pit_load_count(pit, i, pit->pit_state.channels[i].count, 5952 start && i == 0); 5953 mutex_unlock(&pit->pit_state.lock); 5954 return 0; 5955 } 5956 5957 static int kvm_vm_ioctl_reinject(struct kvm *kvm, 5958 struct kvm_reinject_control *control) 5959 { 5960 struct kvm_pit *pit = kvm->arch.vpit; 5961 5962 /* pit->pit_state.lock was overloaded to prevent userspace from getting 5963 * an inconsistent state after running multiple KVM_REINJECT_CONTROL 5964 * ioctls in parallel. Use a separate lock if that ioctl isn't rare. 5965 */ 5966 mutex_lock(&pit->pit_state.lock); 5967 kvm_pit_set_reinject(pit, control->pit_reinject); 5968 mutex_unlock(&pit->pit_state.lock); 5969 5970 return 0; 5971 } 5972 5973 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) 5974 { 5975 5976 /* 5977 * Flush all CPUs' dirty log buffers to the dirty_bitmap. Called 5978 * before reporting dirty_bitmap to userspace. KVM flushes the buffers 5979 * on all VM-Exits, thus we only need to kick running vCPUs to force a 5980 * VM-Exit. 5981 */ 5982 struct kvm_vcpu *vcpu; 5983 unsigned long i; 5984 5985 kvm_for_each_vcpu(i, vcpu, kvm) 5986 kvm_vcpu_kick(vcpu); 5987 } 5988 5989 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, 5990 bool line_status) 5991 { 5992 if (!irqchip_in_kernel(kvm)) 5993 return -ENXIO; 5994 5995 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 5996 irq_event->irq, irq_event->level, 5997 line_status); 5998 return 0; 5999 } 6000 6001 int kvm_vm_ioctl_enable_cap(struct kvm *kvm, 6002 struct kvm_enable_cap *cap) 6003 { 6004 int r; 6005 6006 if (cap->flags) 6007 return -EINVAL; 6008 6009 switch (cap->cap) { 6010 case KVM_CAP_DISABLE_QUIRKS2: 6011 r = -EINVAL; 6012 if (cap->args[0] & ~KVM_X86_VALID_QUIRKS) 6013 break; 6014 fallthrough; 6015 case KVM_CAP_DISABLE_QUIRKS: 6016 kvm->arch.disabled_quirks = cap->args[0]; 6017 r = 0; 6018 break; 6019 case KVM_CAP_SPLIT_IRQCHIP: { 6020 mutex_lock(&kvm->lock); 6021 r = -EINVAL; 6022 if (cap->args[0] > MAX_NR_RESERVED_IOAPIC_PINS) 6023 goto split_irqchip_unlock; 6024 r = -EEXIST; 6025 if (irqchip_in_kernel(kvm)) 6026 goto split_irqchip_unlock; 6027 if (kvm->created_vcpus) 6028 goto split_irqchip_unlock; 6029 r = kvm_setup_empty_irq_routing(kvm); 6030 if (r) 6031 goto split_irqchip_unlock; 6032 /* Pairs with irqchip_in_kernel. */ 6033 smp_wmb(); 6034 kvm->arch.irqchip_mode = KVM_IRQCHIP_SPLIT; 6035 kvm->arch.nr_reserved_ioapic_pins = cap->args[0]; 6036 kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_ABSENT); 6037 r = 0; 6038 split_irqchip_unlock: 6039 mutex_unlock(&kvm->lock); 6040 break; 6041 } 6042 case KVM_CAP_X2APIC_API: 6043 r = -EINVAL; 6044 if (cap->args[0] & ~KVM_X2APIC_API_VALID_FLAGS) 6045 break; 6046 6047 if (cap->args[0] & KVM_X2APIC_API_USE_32BIT_IDS) 6048 kvm->arch.x2apic_format = true; 6049 if (cap->args[0] & KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK) 6050 kvm->arch.x2apic_broadcast_quirk_disabled = true; 6051 6052 r = 0; 6053 break; 6054 case KVM_CAP_X86_DISABLE_EXITS: 6055 r = -EINVAL; 6056 if (cap->args[0] & ~KVM_X86_DISABLE_VALID_EXITS) 6057 break; 6058 6059 if ((cap->args[0] & KVM_X86_DISABLE_EXITS_MWAIT) && 6060 kvm_can_mwait_in_guest()) 6061 kvm->arch.mwait_in_guest = true; 6062 if (cap->args[0] & KVM_X86_DISABLE_EXITS_HLT) 6063 kvm->arch.hlt_in_guest = true; 6064 if (cap->args[0] & KVM_X86_DISABLE_EXITS_PAUSE) 6065 kvm->arch.pause_in_guest = true; 6066 if (cap->args[0] & KVM_X86_DISABLE_EXITS_CSTATE) 6067 kvm->arch.cstate_in_guest = true; 6068 r = 0; 6069 break; 6070 case KVM_CAP_MSR_PLATFORM_INFO: 6071 kvm->arch.guest_can_read_msr_platform_info = cap->args[0]; 6072 r = 0; 6073 break; 6074 case KVM_CAP_EXCEPTION_PAYLOAD: 6075 kvm->arch.exception_payload_enabled = cap->args[0]; 6076 r = 0; 6077 break; 6078 case KVM_CAP_X86_TRIPLE_FAULT_EVENT: 6079 kvm->arch.triple_fault_event = cap->args[0]; 6080 r = 0; 6081 break; 6082 case KVM_CAP_X86_USER_SPACE_MSR: 6083 kvm->arch.user_space_msr_mask = cap->args[0]; 6084 r = 0; 6085 break; 6086 case KVM_CAP_X86_BUS_LOCK_EXIT: 6087 r = -EINVAL; 6088 if (cap->args[0] & ~KVM_BUS_LOCK_DETECTION_VALID_MODE) 6089 break; 6090 6091 if ((cap->args[0] & KVM_BUS_LOCK_DETECTION_OFF) && 6092 (cap->args[0] & KVM_BUS_LOCK_DETECTION_EXIT)) 6093 break; 6094 6095 if (kvm_caps.has_bus_lock_exit && 6096 cap->args[0] & KVM_BUS_LOCK_DETECTION_EXIT) 6097 kvm->arch.bus_lock_detection_enabled = true; 6098 r = 0; 6099 break; 6100 #ifdef CONFIG_X86_SGX_KVM 6101 case KVM_CAP_SGX_ATTRIBUTE: { 6102 unsigned long allowed_attributes = 0; 6103 6104 r = sgx_set_attribute(&allowed_attributes, cap->args[0]); 6105 if (r) 6106 break; 6107 6108 /* KVM only supports the PROVISIONKEY privileged attribute. */ 6109 if ((allowed_attributes & SGX_ATTR_PROVISIONKEY) && 6110 !(allowed_attributes & ~SGX_ATTR_PROVISIONKEY)) 6111 kvm->arch.sgx_provisioning_allowed = true; 6112 else 6113 r = -EINVAL; 6114 break; 6115 } 6116 #endif 6117 case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM: 6118 r = -EINVAL; 6119 if (!kvm_x86_ops.vm_copy_enc_context_from) 6120 break; 6121 6122 r = static_call(kvm_x86_vm_copy_enc_context_from)(kvm, cap->args[0]); 6123 break; 6124 case KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM: 6125 r = -EINVAL; 6126 if (!kvm_x86_ops.vm_move_enc_context_from) 6127 break; 6128 6129 r = static_call(kvm_x86_vm_move_enc_context_from)(kvm, cap->args[0]); 6130 break; 6131 case KVM_CAP_EXIT_HYPERCALL: 6132 if (cap->args[0] & ~KVM_EXIT_HYPERCALL_VALID_MASK) { 6133 r = -EINVAL; 6134 break; 6135 } 6136 kvm->arch.hypercall_exit_enabled = cap->args[0]; 6137 r = 0; 6138 break; 6139 case KVM_CAP_EXIT_ON_EMULATION_FAILURE: 6140 r = -EINVAL; 6141 if (cap->args[0] & ~1) 6142 break; 6143 kvm->arch.exit_on_emulation_error = cap->args[0]; 6144 r = 0; 6145 break; 6146 case KVM_CAP_PMU_CAPABILITY: 6147 r = -EINVAL; 6148 if (!enable_pmu || (cap->args[0] & ~KVM_CAP_PMU_VALID_MASK)) 6149 break; 6150 6151 mutex_lock(&kvm->lock); 6152 if (!kvm->created_vcpus) { 6153 kvm->arch.enable_pmu = !(cap->args[0] & KVM_PMU_CAP_DISABLE); 6154 r = 0; 6155 } 6156 mutex_unlock(&kvm->lock); 6157 break; 6158 case KVM_CAP_MAX_VCPU_ID: 6159 r = -EINVAL; 6160 if (cap->args[0] > KVM_MAX_VCPU_IDS) 6161 break; 6162 6163 mutex_lock(&kvm->lock); 6164 if (kvm->arch.max_vcpu_ids == cap->args[0]) { 6165 r = 0; 6166 } else if (!kvm->arch.max_vcpu_ids) { 6167 kvm->arch.max_vcpu_ids = cap->args[0]; 6168 r = 0; 6169 } 6170 mutex_unlock(&kvm->lock); 6171 break; 6172 case KVM_CAP_X86_NOTIFY_VMEXIT: 6173 r = -EINVAL; 6174 if ((u32)cap->args[0] & ~KVM_X86_NOTIFY_VMEXIT_VALID_BITS) 6175 break; 6176 if (!kvm_caps.has_notify_vmexit) 6177 break; 6178 if (!((u32)cap->args[0] & KVM_X86_NOTIFY_VMEXIT_ENABLED)) 6179 break; 6180 mutex_lock(&kvm->lock); 6181 if (!kvm->created_vcpus) { 6182 kvm->arch.notify_window = cap->args[0] >> 32; 6183 kvm->arch.notify_vmexit_flags = (u32)cap->args[0]; 6184 r = 0; 6185 } 6186 mutex_unlock(&kvm->lock); 6187 break; 6188 case KVM_CAP_VM_DISABLE_NX_HUGE_PAGES: 6189 r = -EINVAL; 6190 6191 /* 6192 * Since the risk of disabling NX hugepages is a guest crashing 6193 * the system, ensure the userspace process has permission to 6194 * reboot the system. 6195 * 6196 * Note that unlike the reboot() syscall, the process must have 6197 * this capability in the root namespace because exposing 6198 * /dev/kvm into a container does not limit the scope of the 6199 * iTLB multihit bug to that container. In other words, 6200 * this must use capable(), not ns_capable(). 6201 */ 6202 if (!capable(CAP_SYS_BOOT)) { 6203 r = -EPERM; 6204 break; 6205 } 6206 6207 if (cap->args[0]) 6208 break; 6209 6210 mutex_lock(&kvm->lock); 6211 if (!kvm->created_vcpus) { 6212 kvm->arch.disable_nx_huge_pages = true; 6213 r = 0; 6214 } 6215 mutex_unlock(&kvm->lock); 6216 break; 6217 default: 6218 r = -EINVAL; 6219 break; 6220 } 6221 return r; 6222 } 6223 6224 static struct kvm_x86_msr_filter *kvm_alloc_msr_filter(bool default_allow) 6225 { 6226 struct kvm_x86_msr_filter *msr_filter; 6227 6228 msr_filter = kzalloc(sizeof(*msr_filter), GFP_KERNEL_ACCOUNT); 6229 if (!msr_filter) 6230 return NULL; 6231 6232 msr_filter->default_allow = default_allow; 6233 return msr_filter; 6234 } 6235 6236 static void kvm_free_msr_filter(struct kvm_x86_msr_filter *msr_filter) 6237 { 6238 u32 i; 6239 6240 if (!msr_filter) 6241 return; 6242 6243 for (i = 0; i < msr_filter->count; i++) 6244 kfree(msr_filter->ranges[i].bitmap); 6245 6246 kfree(msr_filter); 6247 } 6248 6249 static int kvm_add_msr_filter(struct kvm_x86_msr_filter *msr_filter, 6250 struct kvm_msr_filter_range *user_range) 6251 { 6252 unsigned long *bitmap = NULL; 6253 size_t bitmap_size; 6254 6255 if (!user_range->nmsrs) 6256 return 0; 6257 6258 if (user_range->flags & ~(KVM_MSR_FILTER_READ | KVM_MSR_FILTER_WRITE)) 6259 return -EINVAL; 6260 6261 if (!user_range->flags) 6262 return -EINVAL; 6263 6264 bitmap_size = BITS_TO_LONGS(user_range->nmsrs) * sizeof(long); 6265 if (!bitmap_size || bitmap_size > KVM_MSR_FILTER_MAX_BITMAP_SIZE) 6266 return -EINVAL; 6267 6268 bitmap = memdup_user((__user u8*)user_range->bitmap, bitmap_size); 6269 if (IS_ERR(bitmap)) 6270 return PTR_ERR(bitmap); 6271 6272 msr_filter->ranges[msr_filter->count] = (struct msr_bitmap_range) { 6273 .flags = user_range->flags, 6274 .base = user_range->base, 6275 .nmsrs = user_range->nmsrs, 6276 .bitmap = bitmap, 6277 }; 6278 6279 msr_filter->count++; 6280 return 0; 6281 } 6282 6283 static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp) 6284 { 6285 struct kvm_msr_filter __user *user_msr_filter = argp; 6286 struct kvm_x86_msr_filter *new_filter, *old_filter; 6287 struct kvm_msr_filter filter; 6288 bool default_allow; 6289 bool empty = true; 6290 int r = 0; 6291 u32 i; 6292 6293 if (copy_from_user(&filter, user_msr_filter, sizeof(filter))) 6294 return -EFAULT; 6295 6296 for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) 6297 empty &= !filter.ranges[i].nmsrs; 6298 6299 default_allow = !(filter.flags & KVM_MSR_FILTER_DEFAULT_DENY); 6300 if (empty && !default_allow) 6301 return -EINVAL; 6302 6303 new_filter = kvm_alloc_msr_filter(default_allow); 6304 if (!new_filter) 6305 return -ENOMEM; 6306 6307 for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) { 6308 r = kvm_add_msr_filter(new_filter, &filter.ranges[i]); 6309 if (r) { 6310 kvm_free_msr_filter(new_filter); 6311 return r; 6312 } 6313 } 6314 6315 mutex_lock(&kvm->lock); 6316 6317 /* The per-VM filter is protected by kvm->lock... */ 6318 old_filter = srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1); 6319 6320 rcu_assign_pointer(kvm->arch.msr_filter, new_filter); 6321 synchronize_srcu(&kvm->srcu); 6322 6323 kvm_free_msr_filter(old_filter); 6324 6325 kvm_make_all_cpus_request(kvm, KVM_REQ_MSR_FILTER_CHANGED); 6326 mutex_unlock(&kvm->lock); 6327 6328 return 0; 6329 } 6330 6331 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER 6332 static int kvm_arch_suspend_notifier(struct kvm *kvm) 6333 { 6334 struct kvm_vcpu *vcpu; 6335 unsigned long i; 6336 int ret = 0; 6337 6338 mutex_lock(&kvm->lock); 6339 kvm_for_each_vcpu(i, vcpu, kvm) { 6340 if (!vcpu->arch.pv_time.active) 6341 continue; 6342 6343 ret = kvm_set_guest_paused(vcpu); 6344 if (ret) { 6345 kvm_err("Failed to pause guest VCPU%d: %d\n", 6346 vcpu->vcpu_id, ret); 6347 break; 6348 } 6349 } 6350 mutex_unlock(&kvm->lock); 6351 6352 return ret ? NOTIFY_BAD : NOTIFY_DONE; 6353 } 6354 6355 int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state) 6356 { 6357 switch (state) { 6358 case PM_HIBERNATION_PREPARE: 6359 case PM_SUSPEND_PREPARE: 6360 return kvm_arch_suspend_notifier(kvm); 6361 } 6362 6363 return NOTIFY_DONE; 6364 } 6365 #endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */ 6366 6367 static int kvm_vm_ioctl_get_clock(struct kvm *kvm, void __user *argp) 6368 { 6369 struct kvm_clock_data data = { 0 }; 6370 6371 get_kvmclock(kvm, &data); 6372 if (copy_to_user(argp, &data, sizeof(data))) 6373 return -EFAULT; 6374 6375 return 0; 6376 } 6377 6378 static int kvm_vm_ioctl_set_clock(struct kvm *kvm, void __user *argp) 6379 { 6380 struct kvm_arch *ka = &kvm->arch; 6381 struct kvm_clock_data data; 6382 u64 now_raw_ns; 6383 6384 if (copy_from_user(&data, argp, sizeof(data))) 6385 return -EFAULT; 6386 6387 /* 6388 * Only KVM_CLOCK_REALTIME is used, but allow passing the 6389 * result of KVM_GET_CLOCK back to KVM_SET_CLOCK. 6390 */ 6391 if (data.flags & ~KVM_CLOCK_VALID_FLAGS) 6392 return -EINVAL; 6393 6394 kvm_hv_request_tsc_page_update(kvm); 6395 kvm_start_pvclock_update(kvm); 6396 pvclock_update_vm_gtod_copy(kvm); 6397 6398 /* 6399 * This pairs with kvm_guest_time_update(): when masterclock is 6400 * in use, we use master_kernel_ns + kvmclock_offset to set 6401 * unsigned 'system_time' so if we use get_kvmclock_ns() (which 6402 * is slightly ahead) here we risk going negative on unsigned 6403 * 'system_time' when 'data.clock' is very small. 6404 */ 6405 if (data.flags & KVM_CLOCK_REALTIME) { 6406 u64 now_real_ns = ktime_get_real_ns(); 6407 6408 /* 6409 * Avoid stepping the kvmclock backwards. 6410 */ 6411 if (now_real_ns > data.realtime) 6412 data.clock += now_real_ns - data.realtime; 6413 } 6414 6415 if (ka->use_master_clock) 6416 now_raw_ns = ka->master_kernel_ns; 6417 else 6418 now_raw_ns = get_kvmclock_base_ns(); 6419 ka->kvmclock_offset = data.clock - now_raw_ns; 6420 kvm_end_pvclock_update(kvm); 6421 return 0; 6422 } 6423 6424 long kvm_arch_vm_ioctl(struct file *filp, 6425 unsigned int ioctl, unsigned long arg) 6426 { 6427 struct kvm *kvm = filp->private_data; 6428 void __user *argp = (void __user *)arg; 6429 int r = -ENOTTY; 6430 /* 6431 * This union makes it completely explicit to gcc-3.x 6432 * that these two variables' stack usage should be 6433 * combined, not added together. 6434 */ 6435 union { 6436 struct kvm_pit_state ps; 6437 struct kvm_pit_state2 ps2; 6438 struct kvm_pit_config pit_config; 6439 } u; 6440 6441 switch (ioctl) { 6442 case KVM_SET_TSS_ADDR: 6443 r = kvm_vm_ioctl_set_tss_addr(kvm, arg); 6444 break; 6445 case KVM_SET_IDENTITY_MAP_ADDR: { 6446 u64 ident_addr; 6447 6448 mutex_lock(&kvm->lock); 6449 r = -EINVAL; 6450 if (kvm->created_vcpus) 6451 goto set_identity_unlock; 6452 r = -EFAULT; 6453 if (copy_from_user(&ident_addr, argp, sizeof(ident_addr))) 6454 goto set_identity_unlock; 6455 r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr); 6456 set_identity_unlock: 6457 mutex_unlock(&kvm->lock); 6458 break; 6459 } 6460 case KVM_SET_NR_MMU_PAGES: 6461 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg); 6462 break; 6463 case KVM_GET_NR_MMU_PAGES: 6464 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm); 6465 break; 6466 case KVM_CREATE_IRQCHIP: { 6467 mutex_lock(&kvm->lock); 6468 6469 r = -EEXIST; 6470 if (irqchip_in_kernel(kvm)) 6471 goto create_irqchip_unlock; 6472 6473 r = -EINVAL; 6474 if (kvm->created_vcpus) 6475 goto create_irqchip_unlock; 6476 6477 r = kvm_pic_init(kvm); 6478 if (r) 6479 goto create_irqchip_unlock; 6480 6481 r = kvm_ioapic_init(kvm); 6482 if (r) { 6483 kvm_pic_destroy(kvm); 6484 goto create_irqchip_unlock; 6485 } 6486 6487 r = kvm_setup_default_irq_routing(kvm); 6488 if (r) { 6489 kvm_ioapic_destroy(kvm); 6490 kvm_pic_destroy(kvm); 6491 goto create_irqchip_unlock; 6492 } 6493 /* Write kvm->irq_routing before enabling irqchip_in_kernel. */ 6494 smp_wmb(); 6495 kvm->arch.irqchip_mode = KVM_IRQCHIP_KERNEL; 6496 kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_ABSENT); 6497 create_irqchip_unlock: 6498 mutex_unlock(&kvm->lock); 6499 break; 6500 } 6501 case KVM_CREATE_PIT: 6502 u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY; 6503 goto create_pit; 6504 case KVM_CREATE_PIT2: 6505 r = -EFAULT; 6506 if (copy_from_user(&u.pit_config, argp, 6507 sizeof(struct kvm_pit_config))) 6508 goto out; 6509 create_pit: 6510 mutex_lock(&kvm->lock); 6511 r = -EEXIST; 6512 if (kvm->arch.vpit) 6513 goto create_pit_unlock; 6514 r = -ENOMEM; 6515 kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags); 6516 if (kvm->arch.vpit) 6517 r = 0; 6518 create_pit_unlock: 6519 mutex_unlock(&kvm->lock); 6520 break; 6521 case KVM_GET_IRQCHIP: { 6522 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ 6523 struct kvm_irqchip *chip; 6524 6525 chip = memdup_user(argp, sizeof(*chip)); 6526 if (IS_ERR(chip)) { 6527 r = PTR_ERR(chip); 6528 goto out; 6529 } 6530 6531 r = -ENXIO; 6532 if (!irqchip_kernel(kvm)) 6533 goto get_irqchip_out; 6534 r = kvm_vm_ioctl_get_irqchip(kvm, chip); 6535 if (r) 6536 goto get_irqchip_out; 6537 r = -EFAULT; 6538 if (copy_to_user(argp, chip, sizeof(*chip))) 6539 goto get_irqchip_out; 6540 r = 0; 6541 get_irqchip_out: 6542 kfree(chip); 6543 break; 6544 } 6545 case KVM_SET_IRQCHIP: { 6546 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ 6547 struct kvm_irqchip *chip; 6548 6549 chip = memdup_user(argp, sizeof(*chip)); 6550 if (IS_ERR(chip)) { 6551 r = PTR_ERR(chip); 6552 goto out; 6553 } 6554 6555 r = -ENXIO; 6556 if (!irqchip_kernel(kvm)) 6557 goto set_irqchip_out; 6558 r = kvm_vm_ioctl_set_irqchip(kvm, chip); 6559 set_irqchip_out: 6560 kfree(chip); 6561 break; 6562 } 6563 case KVM_GET_PIT: { 6564 r = -EFAULT; 6565 if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state))) 6566 goto out; 6567 r = -ENXIO; 6568 if (!kvm->arch.vpit) 6569 goto out; 6570 r = kvm_vm_ioctl_get_pit(kvm, &u.ps); 6571 if (r) 6572 goto out; 6573 r = -EFAULT; 6574 if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state))) 6575 goto out; 6576 r = 0; 6577 break; 6578 } 6579 case KVM_SET_PIT: { 6580 r = -EFAULT; 6581 if (copy_from_user(&u.ps, argp, sizeof(u.ps))) 6582 goto out; 6583 mutex_lock(&kvm->lock); 6584 r = -ENXIO; 6585 if (!kvm->arch.vpit) 6586 goto set_pit_out; 6587 r = kvm_vm_ioctl_set_pit(kvm, &u.ps); 6588 set_pit_out: 6589 mutex_unlock(&kvm->lock); 6590 break; 6591 } 6592 case KVM_GET_PIT2: { 6593 r = -ENXIO; 6594 if (!kvm->arch.vpit) 6595 goto out; 6596 r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2); 6597 if (r) 6598 goto out; 6599 r = -EFAULT; 6600 if (copy_to_user(argp, &u.ps2, sizeof(u.ps2))) 6601 goto out; 6602 r = 0; 6603 break; 6604 } 6605 case KVM_SET_PIT2: { 6606 r = -EFAULT; 6607 if (copy_from_user(&u.ps2, argp, sizeof(u.ps2))) 6608 goto out; 6609 mutex_lock(&kvm->lock); 6610 r = -ENXIO; 6611 if (!kvm->arch.vpit) 6612 goto set_pit2_out; 6613 r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2); 6614 set_pit2_out: 6615 mutex_unlock(&kvm->lock); 6616 break; 6617 } 6618 case KVM_REINJECT_CONTROL: { 6619 struct kvm_reinject_control control; 6620 r = -EFAULT; 6621 if (copy_from_user(&control, argp, sizeof(control))) 6622 goto out; 6623 r = -ENXIO; 6624 if (!kvm->arch.vpit) 6625 goto out; 6626 r = kvm_vm_ioctl_reinject(kvm, &control); 6627 break; 6628 } 6629 case KVM_SET_BOOT_CPU_ID: 6630 r = 0; 6631 mutex_lock(&kvm->lock); 6632 if (kvm->created_vcpus) 6633 r = -EBUSY; 6634 else 6635 kvm->arch.bsp_vcpu_id = arg; 6636 mutex_unlock(&kvm->lock); 6637 break; 6638 #ifdef CONFIG_KVM_XEN 6639 case KVM_XEN_HVM_CONFIG: { 6640 struct kvm_xen_hvm_config xhc; 6641 r = -EFAULT; 6642 if (copy_from_user(&xhc, argp, sizeof(xhc))) 6643 goto out; 6644 r = kvm_xen_hvm_config(kvm, &xhc); 6645 break; 6646 } 6647 case KVM_XEN_HVM_GET_ATTR: { 6648 struct kvm_xen_hvm_attr xha; 6649 6650 r = -EFAULT; 6651 if (copy_from_user(&xha, argp, sizeof(xha))) 6652 goto out; 6653 r = kvm_xen_hvm_get_attr(kvm, &xha); 6654 if (!r && copy_to_user(argp, &xha, sizeof(xha))) 6655 r = -EFAULT; 6656 break; 6657 } 6658 case KVM_XEN_HVM_SET_ATTR: { 6659 struct kvm_xen_hvm_attr xha; 6660 6661 r = -EFAULT; 6662 if (copy_from_user(&xha, argp, sizeof(xha))) 6663 goto out; 6664 r = kvm_xen_hvm_set_attr(kvm, &xha); 6665 break; 6666 } 6667 case KVM_XEN_HVM_EVTCHN_SEND: { 6668 struct kvm_irq_routing_xen_evtchn uxe; 6669 6670 r = -EFAULT; 6671 if (copy_from_user(&uxe, argp, sizeof(uxe))) 6672 goto out; 6673 r = kvm_xen_hvm_evtchn_send(kvm, &uxe); 6674 break; 6675 } 6676 #endif 6677 case KVM_SET_CLOCK: 6678 r = kvm_vm_ioctl_set_clock(kvm, argp); 6679 break; 6680 case KVM_GET_CLOCK: 6681 r = kvm_vm_ioctl_get_clock(kvm, argp); 6682 break; 6683 case KVM_SET_TSC_KHZ: { 6684 u32 user_tsc_khz; 6685 6686 r = -EINVAL; 6687 user_tsc_khz = (u32)arg; 6688 6689 if (kvm_caps.has_tsc_control && 6690 user_tsc_khz >= kvm_caps.max_guest_tsc_khz) 6691 goto out; 6692 6693 if (user_tsc_khz == 0) 6694 user_tsc_khz = tsc_khz; 6695 6696 WRITE_ONCE(kvm->arch.default_tsc_khz, user_tsc_khz); 6697 r = 0; 6698 6699 goto out; 6700 } 6701 case KVM_GET_TSC_KHZ: { 6702 r = READ_ONCE(kvm->arch.default_tsc_khz); 6703 goto out; 6704 } 6705 case KVM_MEMORY_ENCRYPT_OP: { 6706 r = -ENOTTY; 6707 if (!kvm_x86_ops.mem_enc_ioctl) 6708 goto out; 6709 6710 r = static_call(kvm_x86_mem_enc_ioctl)(kvm, argp); 6711 break; 6712 } 6713 case KVM_MEMORY_ENCRYPT_REG_REGION: { 6714 struct kvm_enc_region region; 6715 6716 r = -EFAULT; 6717 if (copy_from_user(®ion, argp, sizeof(region))) 6718 goto out; 6719 6720 r = -ENOTTY; 6721 if (!kvm_x86_ops.mem_enc_register_region) 6722 goto out; 6723 6724 r = static_call(kvm_x86_mem_enc_register_region)(kvm, ®ion); 6725 break; 6726 } 6727 case KVM_MEMORY_ENCRYPT_UNREG_REGION: { 6728 struct kvm_enc_region region; 6729 6730 r = -EFAULT; 6731 if (copy_from_user(®ion, argp, sizeof(region))) 6732 goto out; 6733 6734 r = -ENOTTY; 6735 if (!kvm_x86_ops.mem_enc_unregister_region) 6736 goto out; 6737 6738 r = static_call(kvm_x86_mem_enc_unregister_region)(kvm, ®ion); 6739 break; 6740 } 6741 case KVM_HYPERV_EVENTFD: { 6742 struct kvm_hyperv_eventfd hvevfd; 6743 6744 r = -EFAULT; 6745 if (copy_from_user(&hvevfd, argp, sizeof(hvevfd))) 6746 goto out; 6747 r = kvm_vm_ioctl_hv_eventfd(kvm, &hvevfd); 6748 break; 6749 } 6750 case KVM_SET_PMU_EVENT_FILTER: 6751 r = kvm_vm_ioctl_set_pmu_event_filter(kvm, argp); 6752 break; 6753 case KVM_X86_SET_MSR_FILTER: 6754 r = kvm_vm_ioctl_set_msr_filter(kvm, argp); 6755 break; 6756 default: 6757 r = -ENOTTY; 6758 } 6759 out: 6760 return r; 6761 } 6762 6763 static void kvm_init_msr_list(void) 6764 { 6765 u32 dummy[2]; 6766 unsigned i; 6767 6768 BUILD_BUG_ON_MSG(KVM_PMC_MAX_FIXED != 3, 6769 "Please update the fixed PMCs in msrs_to_saved_all[]"); 6770 6771 num_msrs_to_save = 0; 6772 num_emulated_msrs = 0; 6773 num_msr_based_features = 0; 6774 6775 for (i = 0; i < ARRAY_SIZE(msrs_to_save_all); i++) { 6776 if (rdmsr_safe(msrs_to_save_all[i], &dummy[0], &dummy[1]) < 0) 6777 continue; 6778 6779 /* 6780 * Even MSRs that are valid in the host may not be exposed 6781 * to the guests in some cases. 6782 */ 6783 switch (msrs_to_save_all[i]) { 6784 case MSR_IA32_BNDCFGS: 6785 if (!kvm_mpx_supported()) 6786 continue; 6787 break; 6788 case MSR_TSC_AUX: 6789 if (!kvm_cpu_cap_has(X86_FEATURE_RDTSCP) && 6790 !kvm_cpu_cap_has(X86_FEATURE_RDPID)) 6791 continue; 6792 break; 6793 case MSR_IA32_UMWAIT_CONTROL: 6794 if (!kvm_cpu_cap_has(X86_FEATURE_WAITPKG)) 6795 continue; 6796 break; 6797 case MSR_IA32_RTIT_CTL: 6798 case MSR_IA32_RTIT_STATUS: 6799 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT)) 6800 continue; 6801 break; 6802 case MSR_IA32_RTIT_CR3_MATCH: 6803 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) || 6804 !intel_pt_validate_hw_cap(PT_CAP_cr3_filtering)) 6805 continue; 6806 break; 6807 case MSR_IA32_RTIT_OUTPUT_BASE: 6808 case MSR_IA32_RTIT_OUTPUT_MASK: 6809 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) || 6810 (!intel_pt_validate_hw_cap(PT_CAP_topa_output) && 6811 !intel_pt_validate_hw_cap(PT_CAP_single_range_output))) 6812 continue; 6813 break; 6814 case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: 6815 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) || 6816 msrs_to_save_all[i] - MSR_IA32_RTIT_ADDR0_A >= 6817 intel_pt_validate_hw_cap(PT_CAP_num_address_ranges) * 2) 6818 continue; 6819 break; 6820 case MSR_ARCH_PERFMON_PERFCTR0 ... MSR_ARCH_PERFMON_PERFCTR0 + 17: 6821 if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_PERFCTR0 >= 6822 min(INTEL_PMC_MAX_GENERIC, kvm_pmu_cap.num_counters_gp)) 6823 continue; 6824 break; 6825 case MSR_ARCH_PERFMON_EVENTSEL0 ... MSR_ARCH_PERFMON_EVENTSEL0 + 17: 6826 if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_EVENTSEL0 >= 6827 min(INTEL_PMC_MAX_GENERIC, kvm_pmu_cap.num_counters_gp)) 6828 continue; 6829 break; 6830 case MSR_IA32_XFD: 6831 case MSR_IA32_XFD_ERR: 6832 if (!kvm_cpu_cap_has(X86_FEATURE_XFD)) 6833 continue; 6834 break; 6835 default: 6836 break; 6837 } 6838 6839 msrs_to_save[num_msrs_to_save++] = msrs_to_save_all[i]; 6840 } 6841 6842 for (i = 0; i < ARRAY_SIZE(emulated_msrs_all); i++) { 6843 if (!static_call(kvm_x86_has_emulated_msr)(NULL, emulated_msrs_all[i])) 6844 continue; 6845 6846 emulated_msrs[num_emulated_msrs++] = emulated_msrs_all[i]; 6847 } 6848 6849 for (i = 0; i < ARRAY_SIZE(msr_based_features_all); i++) { 6850 struct kvm_msr_entry msr; 6851 6852 msr.index = msr_based_features_all[i]; 6853 if (kvm_get_msr_feature(&msr)) 6854 continue; 6855 6856 msr_based_features[num_msr_based_features++] = msr_based_features_all[i]; 6857 } 6858 } 6859 6860 static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len, 6861 const void *v) 6862 { 6863 int handled = 0; 6864 int n; 6865 6866 do { 6867 n = min(len, 8); 6868 if (!(lapic_in_kernel(vcpu) && 6869 !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v)) 6870 && kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, n, v)) 6871 break; 6872 handled += n; 6873 addr += n; 6874 len -= n; 6875 v += n; 6876 } while (len); 6877 6878 return handled; 6879 } 6880 6881 static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v) 6882 { 6883 int handled = 0; 6884 int n; 6885 6886 do { 6887 n = min(len, 8); 6888 if (!(lapic_in_kernel(vcpu) && 6889 !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev, 6890 addr, n, v)) 6891 && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v)) 6892 break; 6893 trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, v); 6894 handled += n; 6895 addr += n; 6896 len -= n; 6897 v += n; 6898 } while (len); 6899 6900 return handled; 6901 } 6902 6903 static void kvm_set_segment(struct kvm_vcpu *vcpu, 6904 struct kvm_segment *var, int seg) 6905 { 6906 static_call(kvm_x86_set_segment)(vcpu, var, seg); 6907 } 6908 6909 void kvm_get_segment(struct kvm_vcpu *vcpu, 6910 struct kvm_segment *var, int seg) 6911 { 6912 static_call(kvm_x86_get_segment)(vcpu, var, seg); 6913 } 6914 6915 gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u64 access, 6916 struct x86_exception *exception) 6917 { 6918 struct kvm_mmu *mmu = vcpu->arch.mmu; 6919 gpa_t t_gpa; 6920 6921 BUG_ON(!mmu_is_nested(vcpu)); 6922 6923 /* NPT walks are always user-walks */ 6924 access |= PFERR_USER_MASK; 6925 t_gpa = mmu->gva_to_gpa(vcpu, mmu, gpa, access, exception); 6926 6927 return t_gpa; 6928 } 6929 6930 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, 6931 struct x86_exception *exception) 6932 { 6933 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 6934 6935 u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; 6936 return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); 6937 } 6938 EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_read); 6939 6940 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, 6941 struct x86_exception *exception) 6942 { 6943 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 6944 6945 u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; 6946 access |= PFERR_FETCH_MASK; 6947 return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); 6948 } 6949 6950 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, 6951 struct x86_exception *exception) 6952 { 6953 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 6954 6955 u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; 6956 access |= PFERR_WRITE_MASK; 6957 return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); 6958 } 6959 EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_write); 6960 6961 /* uses this to access any guest's mapped memory without checking CPL */ 6962 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, 6963 struct x86_exception *exception) 6964 { 6965 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 6966 6967 return mmu->gva_to_gpa(vcpu, mmu, gva, 0, exception); 6968 } 6969 6970 static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, 6971 struct kvm_vcpu *vcpu, u64 access, 6972 struct x86_exception *exception) 6973 { 6974 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 6975 void *data = val; 6976 int r = X86EMUL_CONTINUE; 6977 6978 while (bytes) { 6979 gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access, exception); 6980 unsigned offset = addr & (PAGE_SIZE-1); 6981 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset); 6982 int ret; 6983 6984 if (gpa == UNMAPPED_GVA) 6985 return X86EMUL_PROPAGATE_FAULT; 6986 ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, data, 6987 offset, toread); 6988 if (ret < 0) { 6989 r = X86EMUL_IO_NEEDED; 6990 goto out; 6991 } 6992 6993 bytes -= toread; 6994 data += toread; 6995 addr += toread; 6996 } 6997 out: 6998 return r; 6999 } 7000 7001 /* used for instruction fetching */ 7002 static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt, 7003 gva_t addr, void *val, unsigned int bytes, 7004 struct x86_exception *exception) 7005 { 7006 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7007 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 7008 u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; 7009 unsigned offset; 7010 int ret; 7011 7012 /* Inline kvm_read_guest_virt_helper for speed. */ 7013 gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access|PFERR_FETCH_MASK, 7014 exception); 7015 if (unlikely(gpa == UNMAPPED_GVA)) 7016 return X86EMUL_PROPAGATE_FAULT; 7017 7018 offset = addr & (PAGE_SIZE-1); 7019 if (WARN_ON(offset + bytes > PAGE_SIZE)) 7020 bytes = (unsigned)PAGE_SIZE - offset; 7021 ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, val, 7022 offset, bytes); 7023 if (unlikely(ret < 0)) 7024 return X86EMUL_IO_NEEDED; 7025 7026 return X86EMUL_CONTINUE; 7027 } 7028 7029 int kvm_read_guest_virt(struct kvm_vcpu *vcpu, 7030 gva_t addr, void *val, unsigned int bytes, 7031 struct x86_exception *exception) 7032 { 7033 u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; 7034 7035 /* 7036 * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED 7037 * is returned, but our callers are not ready for that and they blindly 7038 * call kvm_inject_page_fault. Ensure that they at least do not leak 7039 * uninitialized kernel stack memory into cr2 and error code. 7040 */ 7041 memset(exception, 0, sizeof(*exception)); 7042 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, 7043 exception); 7044 } 7045 EXPORT_SYMBOL_GPL(kvm_read_guest_virt); 7046 7047 static int emulator_read_std(struct x86_emulate_ctxt *ctxt, 7048 gva_t addr, void *val, unsigned int bytes, 7049 struct x86_exception *exception, bool system) 7050 { 7051 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7052 u64 access = 0; 7053 7054 if (system) 7055 access |= PFERR_IMPLICIT_ACCESS; 7056 else if (static_call(kvm_x86_get_cpl)(vcpu) == 3) 7057 access |= PFERR_USER_MASK; 7058 7059 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception); 7060 } 7061 7062 static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt, 7063 unsigned long addr, void *val, unsigned int bytes) 7064 { 7065 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7066 int r = kvm_vcpu_read_guest(vcpu, addr, val, bytes); 7067 7068 return r < 0 ? X86EMUL_IO_NEEDED : X86EMUL_CONTINUE; 7069 } 7070 7071 static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, 7072 struct kvm_vcpu *vcpu, u64 access, 7073 struct x86_exception *exception) 7074 { 7075 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 7076 void *data = val; 7077 int r = X86EMUL_CONTINUE; 7078 7079 while (bytes) { 7080 gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access, exception); 7081 unsigned offset = addr & (PAGE_SIZE-1); 7082 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset); 7083 int ret; 7084 7085 if (gpa == UNMAPPED_GVA) 7086 return X86EMUL_PROPAGATE_FAULT; 7087 ret = kvm_vcpu_write_guest(vcpu, gpa, data, towrite); 7088 if (ret < 0) { 7089 r = X86EMUL_IO_NEEDED; 7090 goto out; 7091 } 7092 7093 bytes -= towrite; 7094 data += towrite; 7095 addr += towrite; 7096 } 7097 out: 7098 return r; 7099 } 7100 7101 static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, 7102 unsigned int bytes, struct x86_exception *exception, 7103 bool system) 7104 { 7105 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7106 u64 access = PFERR_WRITE_MASK; 7107 7108 if (system) 7109 access |= PFERR_IMPLICIT_ACCESS; 7110 else if (static_call(kvm_x86_get_cpl)(vcpu) == 3) 7111 access |= PFERR_USER_MASK; 7112 7113 return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, 7114 access, exception); 7115 } 7116 7117 int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val, 7118 unsigned int bytes, struct x86_exception *exception) 7119 { 7120 /* kvm_write_guest_virt_system can pull in tons of pages. */ 7121 vcpu->arch.l1tf_flush_l1d = true; 7122 7123 return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, 7124 PFERR_WRITE_MASK, exception); 7125 } 7126 EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system); 7127 7128 static int kvm_can_emulate_insn(struct kvm_vcpu *vcpu, int emul_type, 7129 void *insn, int insn_len) 7130 { 7131 return static_call(kvm_x86_can_emulate_instruction)(vcpu, emul_type, 7132 insn, insn_len); 7133 } 7134 7135 int handle_ud(struct kvm_vcpu *vcpu) 7136 { 7137 static const char kvm_emulate_prefix[] = { __KVM_EMULATE_PREFIX }; 7138 int emul_type = EMULTYPE_TRAP_UD; 7139 char sig[5]; /* ud2; .ascii "kvm" */ 7140 struct x86_exception e; 7141 7142 if (unlikely(!kvm_can_emulate_insn(vcpu, emul_type, NULL, 0))) 7143 return 1; 7144 7145 if (force_emulation_prefix && 7146 kvm_read_guest_virt(vcpu, kvm_get_linear_rip(vcpu), 7147 sig, sizeof(sig), &e) == 0 && 7148 memcmp(sig, kvm_emulate_prefix, sizeof(sig)) == 0) { 7149 kvm_rip_write(vcpu, kvm_rip_read(vcpu) + sizeof(sig)); 7150 emul_type = EMULTYPE_TRAP_UD_FORCED; 7151 } 7152 7153 return kvm_emulate_instruction(vcpu, emul_type); 7154 } 7155 EXPORT_SYMBOL_GPL(handle_ud); 7156 7157 static int vcpu_is_mmio_gpa(struct kvm_vcpu *vcpu, unsigned long gva, 7158 gpa_t gpa, bool write) 7159 { 7160 /* For APIC access vmexit */ 7161 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) 7162 return 1; 7163 7164 if (vcpu_match_mmio_gpa(vcpu, gpa)) { 7165 trace_vcpu_match_mmio(gva, gpa, write, true); 7166 return 1; 7167 } 7168 7169 return 0; 7170 } 7171 7172 static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, 7173 gpa_t *gpa, struct x86_exception *exception, 7174 bool write) 7175 { 7176 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 7177 u64 access = ((static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0) 7178 | (write ? PFERR_WRITE_MASK : 0); 7179 7180 /* 7181 * currently PKRU is only applied to ept enabled guest so 7182 * there is no pkey in EPT page table for L1 guest or EPT 7183 * shadow page table for L2 guest. 7184 */ 7185 if (vcpu_match_mmio_gva(vcpu, gva) && (!is_paging(vcpu) || 7186 !permission_fault(vcpu, vcpu->arch.walk_mmu, 7187 vcpu->arch.mmio_access, 0, access))) { 7188 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT | 7189 (gva & (PAGE_SIZE - 1)); 7190 trace_vcpu_match_mmio(gva, *gpa, write, false); 7191 return 1; 7192 } 7193 7194 *gpa = mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); 7195 7196 if (*gpa == UNMAPPED_GVA) 7197 return -1; 7198 7199 return vcpu_is_mmio_gpa(vcpu, gva, *gpa, write); 7200 } 7201 7202 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, 7203 const void *val, int bytes) 7204 { 7205 int ret; 7206 7207 ret = kvm_vcpu_write_guest(vcpu, gpa, val, bytes); 7208 if (ret < 0) 7209 return 0; 7210 kvm_page_track_write(vcpu, gpa, val, bytes); 7211 return 1; 7212 } 7213 7214 struct read_write_emulator_ops { 7215 int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val, 7216 int bytes); 7217 int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa, 7218 void *val, int bytes); 7219 int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa, 7220 int bytes, void *val); 7221 int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa, 7222 void *val, int bytes); 7223 bool write; 7224 }; 7225 7226 static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes) 7227 { 7228 if (vcpu->mmio_read_completed) { 7229 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, 7230 vcpu->mmio_fragments[0].gpa, val); 7231 vcpu->mmio_read_completed = 0; 7232 return 1; 7233 } 7234 7235 return 0; 7236 } 7237 7238 static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, 7239 void *val, int bytes) 7240 { 7241 return !kvm_vcpu_read_guest(vcpu, gpa, val, bytes); 7242 } 7243 7244 static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, 7245 void *val, int bytes) 7246 { 7247 return emulator_write_phys(vcpu, gpa, val, bytes); 7248 } 7249 7250 static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val) 7251 { 7252 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, val); 7253 return vcpu_mmio_write(vcpu, gpa, bytes, val); 7254 } 7255 7256 static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, 7257 void *val, int bytes) 7258 { 7259 trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, NULL); 7260 return X86EMUL_IO_NEEDED; 7261 } 7262 7263 static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, 7264 void *val, int bytes) 7265 { 7266 struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0]; 7267 7268 memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); 7269 return X86EMUL_CONTINUE; 7270 } 7271 7272 static const struct read_write_emulator_ops read_emultor = { 7273 .read_write_prepare = read_prepare, 7274 .read_write_emulate = read_emulate, 7275 .read_write_mmio = vcpu_mmio_read, 7276 .read_write_exit_mmio = read_exit_mmio, 7277 }; 7278 7279 static const struct read_write_emulator_ops write_emultor = { 7280 .read_write_emulate = write_emulate, 7281 .read_write_mmio = write_mmio, 7282 .read_write_exit_mmio = write_exit_mmio, 7283 .write = true, 7284 }; 7285 7286 static int emulator_read_write_onepage(unsigned long addr, void *val, 7287 unsigned int bytes, 7288 struct x86_exception *exception, 7289 struct kvm_vcpu *vcpu, 7290 const struct read_write_emulator_ops *ops) 7291 { 7292 gpa_t gpa; 7293 int handled, ret; 7294 bool write = ops->write; 7295 struct kvm_mmio_fragment *frag; 7296 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 7297 7298 /* 7299 * If the exit was due to a NPF we may already have a GPA. 7300 * If the GPA is present, use it to avoid the GVA to GPA table walk. 7301 * Note, this cannot be used on string operations since string 7302 * operation using rep will only have the initial GPA from the NPF 7303 * occurred. 7304 */ 7305 if (ctxt->gpa_available && emulator_can_use_gpa(ctxt) && 7306 (addr & ~PAGE_MASK) == (ctxt->gpa_val & ~PAGE_MASK)) { 7307 gpa = ctxt->gpa_val; 7308 ret = vcpu_is_mmio_gpa(vcpu, addr, gpa, write); 7309 } else { 7310 ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write); 7311 if (ret < 0) 7312 return X86EMUL_PROPAGATE_FAULT; 7313 } 7314 7315 if (!ret && ops->read_write_emulate(vcpu, gpa, val, bytes)) 7316 return X86EMUL_CONTINUE; 7317 7318 /* 7319 * Is this MMIO handled locally? 7320 */ 7321 handled = ops->read_write_mmio(vcpu, gpa, bytes, val); 7322 if (handled == bytes) 7323 return X86EMUL_CONTINUE; 7324 7325 gpa += handled; 7326 bytes -= handled; 7327 val += handled; 7328 7329 WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS); 7330 frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++]; 7331 frag->gpa = gpa; 7332 frag->data = val; 7333 frag->len = bytes; 7334 return X86EMUL_CONTINUE; 7335 } 7336 7337 static int emulator_read_write(struct x86_emulate_ctxt *ctxt, 7338 unsigned long addr, 7339 void *val, unsigned int bytes, 7340 struct x86_exception *exception, 7341 const struct read_write_emulator_ops *ops) 7342 { 7343 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7344 gpa_t gpa; 7345 int rc; 7346 7347 if (ops->read_write_prepare && 7348 ops->read_write_prepare(vcpu, val, bytes)) 7349 return X86EMUL_CONTINUE; 7350 7351 vcpu->mmio_nr_fragments = 0; 7352 7353 /* Crossing a page boundary? */ 7354 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) { 7355 int now; 7356 7357 now = -addr & ~PAGE_MASK; 7358 rc = emulator_read_write_onepage(addr, val, now, exception, 7359 vcpu, ops); 7360 7361 if (rc != X86EMUL_CONTINUE) 7362 return rc; 7363 addr += now; 7364 if (ctxt->mode != X86EMUL_MODE_PROT64) 7365 addr = (u32)addr; 7366 val += now; 7367 bytes -= now; 7368 } 7369 7370 rc = emulator_read_write_onepage(addr, val, bytes, exception, 7371 vcpu, ops); 7372 if (rc != X86EMUL_CONTINUE) 7373 return rc; 7374 7375 if (!vcpu->mmio_nr_fragments) 7376 return rc; 7377 7378 gpa = vcpu->mmio_fragments[0].gpa; 7379 7380 vcpu->mmio_needed = 1; 7381 vcpu->mmio_cur_fragment = 0; 7382 7383 vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len); 7384 vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write; 7385 vcpu->run->exit_reason = KVM_EXIT_MMIO; 7386 vcpu->run->mmio.phys_addr = gpa; 7387 7388 return ops->read_write_exit_mmio(vcpu, gpa, val, bytes); 7389 } 7390 7391 static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt, 7392 unsigned long addr, 7393 void *val, 7394 unsigned int bytes, 7395 struct x86_exception *exception) 7396 { 7397 return emulator_read_write(ctxt, addr, val, bytes, 7398 exception, &read_emultor); 7399 } 7400 7401 static int emulator_write_emulated(struct x86_emulate_ctxt *ctxt, 7402 unsigned long addr, 7403 const void *val, 7404 unsigned int bytes, 7405 struct x86_exception *exception) 7406 { 7407 return emulator_read_write(ctxt, addr, (void *)val, bytes, 7408 exception, &write_emultor); 7409 } 7410 7411 #define emulator_try_cmpxchg_user(t, ptr, old, new) \ 7412 (__try_cmpxchg_user((t __user *)(ptr), (t *)(old), *(t *)(new), efault ## t)) 7413 7414 static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt, 7415 unsigned long addr, 7416 const void *old, 7417 const void *new, 7418 unsigned int bytes, 7419 struct x86_exception *exception) 7420 { 7421 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7422 u64 page_line_mask; 7423 unsigned long hva; 7424 gpa_t gpa; 7425 int r; 7426 7427 /* guests cmpxchg8b have to be emulated atomically */ 7428 if (bytes > 8 || (bytes & (bytes - 1))) 7429 goto emul_write; 7430 7431 gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL); 7432 7433 if (gpa == UNMAPPED_GVA || 7434 (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) 7435 goto emul_write; 7436 7437 /* 7438 * Emulate the atomic as a straight write to avoid #AC if SLD is 7439 * enabled in the host and the access splits a cache line. 7440 */ 7441 if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) 7442 page_line_mask = ~(cache_line_size() - 1); 7443 else 7444 page_line_mask = PAGE_MASK; 7445 7446 if (((gpa + bytes - 1) & page_line_mask) != (gpa & page_line_mask)) 7447 goto emul_write; 7448 7449 hva = kvm_vcpu_gfn_to_hva(vcpu, gpa_to_gfn(gpa)); 7450 if (kvm_is_error_hva(hva)) 7451 goto emul_write; 7452 7453 hva += offset_in_page(gpa); 7454 7455 switch (bytes) { 7456 case 1: 7457 r = emulator_try_cmpxchg_user(u8, hva, old, new); 7458 break; 7459 case 2: 7460 r = emulator_try_cmpxchg_user(u16, hva, old, new); 7461 break; 7462 case 4: 7463 r = emulator_try_cmpxchg_user(u32, hva, old, new); 7464 break; 7465 case 8: 7466 r = emulator_try_cmpxchg_user(u64, hva, old, new); 7467 break; 7468 default: 7469 BUG(); 7470 } 7471 7472 if (r < 0) 7473 return X86EMUL_UNHANDLEABLE; 7474 if (r) 7475 return X86EMUL_CMPXCHG_FAILED; 7476 7477 kvm_page_track_write(vcpu, gpa, new, bytes); 7478 7479 return X86EMUL_CONTINUE; 7480 7481 emul_write: 7482 printk_once(KERN_WARNING "kvm: emulating exchange as write\n"); 7483 7484 return emulator_write_emulated(ctxt, addr, new, bytes, exception); 7485 } 7486 7487 static int kernel_pio(struct kvm_vcpu *vcpu, void *pd) 7488 { 7489 int r = 0, i; 7490 7491 for (i = 0; i < vcpu->arch.pio.count; i++) { 7492 if (vcpu->arch.pio.in) 7493 r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port, 7494 vcpu->arch.pio.size, pd); 7495 else 7496 r = kvm_io_bus_write(vcpu, KVM_PIO_BUS, 7497 vcpu->arch.pio.port, vcpu->arch.pio.size, 7498 pd); 7499 if (r) 7500 break; 7501 pd += vcpu->arch.pio.size; 7502 } 7503 return r; 7504 } 7505 7506 static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size, 7507 unsigned short port, 7508 unsigned int count, bool in) 7509 { 7510 vcpu->arch.pio.port = port; 7511 vcpu->arch.pio.in = in; 7512 vcpu->arch.pio.count = count; 7513 vcpu->arch.pio.size = size; 7514 7515 if (!kernel_pio(vcpu, vcpu->arch.pio_data)) 7516 return 1; 7517 7518 vcpu->run->exit_reason = KVM_EXIT_IO; 7519 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; 7520 vcpu->run->io.size = size; 7521 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE; 7522 vcpu->run->io.count = count; 7523 vcpu->run->io.port = port; 7524 7525 return 0; 7526 } 7527 7528 static int __emulator_pio_in(struct kvm_vcpu *vcpu, int size, 7529 unsigned short port, unsigned int count) 7530 { 7531 WARN_ON(vcpu->arch.pio.count); 7532 memset(vcpu->arch.pio_data, 0, size * count); 7533 return emulator_pio_in_out(vcpu, size, port, count, true); 7534 } 7535 7536 static void complete_emulator_pio_in(struct kvm_vcpu *vcpu, void *val) 7537 { 7538 int size = vcpu->arch.pio.size; 7539 unsigned count = vcpu->arch.pio.count; 7540 memcpy(val, vcpu->arch.pio_data, size * count); 7541 trace_kvm_pio(KVM_PIO_IN, vcpu->arch.pio.port, size, count, vcpu->arch.pio_data); 7542 vcpu->arch.pio.count = 0; 7543 } 7544 7545 static int emulator_pio_in(struct kvm_vcpu *vcpu, int size, 7546 unsigned short port, void *val, unsigned int count) 7547 { 7548 if (vcpu->arch.pio.count) { 7549 /* 7550 * Complete a previous iteration that required userspace I/O. 7551 * Note, @count isn't guaranteed to match pio.count as userspace 7552 * can modify ECX before rerunning the vCPU. Ignore any such 7553 * shenanigans as KVM doesn't support modifying the rep count, 7554 * and the emulator ensures @count doesn't overflow the buffer. 7555 */ 7556 } else { 7557 int r = __emulator_pio_in(vcpu, size, port, count); 7558 if (!r) 7559 return r; 7560 7561 /* Results already available, fall through. */ 7562 } 7563 7564 complete_emulator_pio_in(vcpu, val); 7565 return 1; 7566 } 7567 7568 static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt, 7569 int size, unsigned short port, void *val, 7570 unsigned int count) 7571 { 7572 return emulator_pio_in(emul_to_vcpu(ctxt), size, port, val, count); 7573 7574 } 7575 7576 static int emulator_pio_out(struct kvm_vcpu *vcpu, int size, 7577 unsigned short port, const void *val, 7578 unsigned int count) 7579 { 7580 int ret; 7581 7582 memcpy(vcpu->arch.pio_data, val, size * count); 7583 trace_kvm_pio(KVM_PIO_OUT, port, size, count, vcpu->arch.pio_data); 7584 ret = emulator_pio_in_out(vcpu, size, port, count, false); 7585 if (ret) 7586 vcpu->arch.pio.count = 0; 7587 7588 return ret; 7589 } 7590 7591 static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt, 7592 int size, unsigned short port, 7593 const void *val, unsigned int count) 7594 { 7595 return emulator_pio_out(emul_to_vcpu(ctxt), size, port, val, count); 7596 } 7597 7598 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg) 7599 { 7600 return static_call(kvm_x86_get_segment_base)(vcpu, seg); 7601 } 7602 7603 static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address) 7604 { 7605 kvm_mmu_invlpg(emul_to_vcpu(ctxt), address); 7606 } 7607 7608 static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu) 7609 { 7610 if (!need_emulate_wbinvd(vcpu)) 7611 return X86EMUL_CONTINUE; 7612 7613 if (static_call(kvm_x86_has_wbinvd_exit)()) { 7614 int cpu = get_cpu(); 7615 7616 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); 7617 on_each_cpu_mask(vcpu->arch.wbinvd_dirty_mask, 7618 wbinvd_ipi, NULL, 1); 7619 put_cpu(); 7620 cpumask_clear(vcpu->arch.wbinvd_dirty_mask); 7621 } else 7622 wbinvd(); 7623 return X86EMUL_CONTINUE; 7624 } 7625 7626 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu) 7627 { 7628 kvm_emulate_wbinvd_noskip(vcpu); 7629 return kvm_skip_emulated_instruction(vcpu); 7630 } 7631 EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd); 7632 7633 7634 7635 static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt) 7636 { 7637 kvm_emulate_wbinvd_noskip(emul_to_vcpu(ctxt)); 7638 } 7639 7640 static void emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, 7641 unsigned long *dest) 7642 { 7643 kvm_get_dr(emul_to_vcpu(ctxt), dr, dest); 7644 } 7645 7646 static int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, 7647 unsigned long value) 7648 { 7649 7650 return kvm_set_dr(emul_to_vcpu(ctxt), dr, value); 7651 } 7652 7653 static u64 mk_cr_64(u64 curr_cr, u32 new_val) 7654 { 7655 return (curr_cr & ~((1ULL << 32) - 1)) | new_val; 7656 } 7657 7658 static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr) 7659 { 7660 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7661 unsigned long value; 7662 7663 switch (cr) { 7664 case 0: 7665 value = kvm_read_cr0(vcpu); 7666 break; 7667 case 2: 7668 value = vcpu->arch.cr2; 7669 break; 7670 case 3: 7671 value = kvm_read_cr3(vcpu); 7672 break; 7673 case 4: 7674 value = kvm_read_cr4(vcpu); 7675 break; 7676 case 8: 7677 value = kvm_get_cr8(vcpu); 7678 break; 7679 default: 7680 kvm_err("%s: unexpected cr %u\n", __func__, cr); 7681 return 0; 7682 } 7683 7684 return value; 7685 } 7686 7687 static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val) 7688 { 7689 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7690 int res = 0; 7691 7692 switch (cr) { 7693 case 0: 7694 res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val)); 7695 break; 7696 case 2: 7697 vcpu->arch.cr2 = val; 7698 break; 7699 case 3: 7700 res = kvm_set_cr3(vcpu, val); 7701 break; 7702 case 4: 7703 res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val)); 7704 break; 7705 case 8: 7706 res = kvm_set_cr8(vcpu, val); 7707 break; 7708 default: 7709 kvm_err("%s: unexpected cr %u\n", __func__, cr); 7710 res = -1; 7711 } 7712 7713 return res; 7714 } 7715 7716 static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt) 7717 { 7718 return static_call(kvm_x86_get_cpl)(emul_to_vcpu(ctxt)); 7719 } 7720 7721 static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) 7722 { 7723 static_call(kvm_x86_get_gdt)(emul_to_vcpu(ctxt), dt); 7724 } 7725 7726 static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) 7727 { 7728 static_call(kvm_x86_get_idt)(emul_to_vcpu(ctxt), dt); 7729 } 7730 7731 static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) 7732 { 7733 static_call(kvm_x86_set_gdt)(emul_to_vcpu(ctxt), dt); 7734 } 7735 7736 static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) 7737 { 7738 static_call(kvm_x86_set_idt)(emul_to_vcpu(ctxt), dt); 7739 } 7740 7741 static unsigned long emulator_get_cached_segment_base( 7742 struct x86_emulate_ctxt *ctxt, int seg) 7743 { 7744 return get_segment_base(emul_to_vcpu(ctxt), seg); 7745 } 7746 7747 static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector, 7748 struct desc_struct *desc, u32 *base3, 7749 int seg) 7750 { 7751 struct kvm_segment var; 7752 7753 kvm_get_segment(emul_to_vcpu(ctxt), &var, seg); 7754 *selector = var.selector; 7755 7756 if (var.unusable) { 7757 memset(desc, 0, sizeof(*desc)); 7758 if (base3) 7759 *base3 = 0; 7760 return false; 7761 } 7762 7763 if (var.g) 7764 var.limit >>= 12; 7765 set_desc_limit(desc, var.limit); 7766 set_desc_base(desc, (unsigned long)var.base); 7767 #ifdef CONFIG_X86_64 7768 if (base3) 7769 *base3 = var.base >> 32; 7770 #endif 7771 desc->type = var.type; 7772 desc->s = var.s; 7773 desc->dpl = var.dpl; 7774 desc->p = var.present; 7775 desc->avl = var.avl; 7776 desc->l = var.l; 7777 desc->d = var.db; 7778 desc->g = var.g; 7779 7780 return true; 7781 } 7782 7783 static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector, 7784 struct desc_struct *desc, u32 base3, 7785 int seg) 7786 { 7787 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7788 struct kvm_segment var; 7789 7790 var.selector = selector; 7791 var.base = get_desc_base(desc); 7792 #ifdef CONFIG_X86_64 7793 var.base |= ((u64)base3) << 32; 7794 #endif 7795 var.limit = get_desc_limit(desc); 7796 if (desc->g) 7797 var.limit = (var.limit << 12) | 0xfff; 7798 var.type = desc->type; 7799 var.dpl = desc->dpl; 7800 var.db = desc->d; 7801 var.s = desc->s; 7802 var.l = desc->l; 7803 var.g = desc->g; 7804 var.avl = desc->avl; 7805 var.present = desc->p; 7806 var.unusable = !var.present; 7807 var.padding = 0; 7808 7809 kvm_set_segment(vcpu, &var, seg); 7810 return; 7811 } 7812 7813 static int emulator_get_msr_with_filter(struct x86_emulate_ctxt *ctxt, 7814 u32 msr_index, u64 *pdata) 7815 { 7816 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7817 int r; 7818 7819 r = kvm_get_msr_with_filter(vcpu, msr_index, pdata); 7820 7821 if (r && kvm_msr_user_space(vcpu, msr_index, KVM_EXIT_X86_RDMSR, 0, 7822 complete_emulated_rdmsr, r)) { 7823 /* Bounce to user space */ 7824 return X86EMUL_IO_NEEDED; 7825 } 7826 7827 return r; 7828 } 7829 7830 static int emulator_set_msr_with_filter(struct x86_emulate_ctxt *ctxt, 7831 u32 msr_index, u64 data) 7832 { 7833 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7834 int r; 7835 7836 r = kvm_set_msr_with_filter(vcpu, msr_index, data); 7837 7838 if (r && kvm_msr_user_space(vcpu, msr_index, KVM_EXIT_X86_WRMSR, data, 7839 complete_emulated_msr_access, r)) { 7840 /* Bounce to user space */ 7841 return X86EMUL_IO_NEEDED; 7842 } 7843 7844 return r; 7845 } 7846 7847 static int emulator_get_msr(struct x86_emulate_ctxt *ctxt, 7848 u32 msr_index, u64 *pdata) 7849 { 7850 return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata); 7851 } 7852 7853 static int emulator_set_msr(struct x86_emulate_ctxt *ctxt, 7854 u32 msr_index, u64 data) 7855 { 7856 return kvm_set_msr(emul_to_vcpu(ctxt), msr_index, data); 7857 } 7858 7859 static u64 emulator_get_smbase(struct x86_emulate_ctxt *ctxt) 7860 { 7861 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7862 7863 return vcpu->arch.smbase; 7864 } 7865 7866 static void emulator_set_smbase(struct x86_emulate_ctxt *ctxt, u64 smbase) 7867 { 7868 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7869 7870 vcpu->arch.smbase = smbase; 7871 } 7872 7873 static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt, 7874 u32 pmc) 7875 { 7876 if (kvm_pmu_is_valid_rdpmc_ecx(emul_to_vcpu(ctxt), pmc)) 7877 return 0; 7878 return -EINVAL; 7879 } 7880 7881 static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt, 7882 u32 pmc, u64 *pdata) 7883 { 7884 return kvm_pmu_rdpmc(emul_to_vcpu(ctxt), pmc, pdata); 7885 } 7886 7887 static void emulator_halt(struct x86_emulate_ctxt *ctxt) 7888 { 7889 emul_to_vcpu(ctxt)->arch.halt_request = 1; 7890 } 7891 7892 static int emulator_intercept(struct x86_emulate_ctxt *ctxt, 7893 struct x86_instruction_info *info, 7894 enum x86_intercept_stage stage) 7895 { 7896 return static_call(kvm_x86_check_intercept)(emul_to_vcpu(ctxt), info, stage, 7897 &ctxt->exception); 7898 } 7899 7900 static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt, 7901 u32 *eax, u32 *ebx, u32 *ecx, u32 *edx, 7902 bool exact_only) 7903 { 7904 return kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx, exact_only); 7905 } 7906 7907 static bool emulator_guest_has_long_mode(struct x86_emulate_ctxt *ctxt) 7908 { 7909 return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_LM); 7910 } 7911 7912 static bool emulator_guest_has_movbe(struct x86_emulate_ctxt *ctxt) 7913 { 7914 return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_MOVBE); 7915 } 7916 7917 static bool emulator_guest_has_fxsr(struct x86_emulate_ctxt *ctxt) 7918 { 7919 return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_FXSR); 7920 } 7921 7922 static bool emulator_guest_has_rdpid(struct x86_emulate_ctxt *ctxt) 7923 { 7924 return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_RDPID); 7925 } 7926 7927 static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg) 7928 { 7929 return kvm_register_read_raw(emul_to_vcpu(ctxt), reg); 7930 } 7931 7932 static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val) 7933 { 7934 kvm_register_write_raw(emul_to_vcpu(ctxt), reg, val); 7935 } 7936 7937 static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked) 7938 { 7939 static_call(kvm_x86_set_nmi_mask)(emul_to_vcpu(ctxt), masked); 7940 } 7941 7942 static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt) 7943 { 7944 return emul_to_vcpu(ctxt)->arch.hflags; 7945 } 7946 7947 static void emulator_exiting_smm(struct x86_emulate_ctxt *ctxt) 7948 { 7949 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7950 7951 kvm_smm_changed(vcpu, false); 7952 } 7953 7954 static int emulator_leave_smm(struct x86_emulate_ctxt *ctxt, 7955 const char *smstate) 7956 { 7957 return static_call(kvm_x86_leave_smm)(emul_to_vcpu(ctxt), smstate); 7958 } 7959 7960 static void emulator_triple_fault(struct x86_emulate_ctxt *ctxt) 7961 { 7962 kvm_make_request(KVM_REQ_TRIPLE_FAULT, emul_to_vcpu(ctxt)); 7963 } 7964 7965 static int emulator_set_xcr(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr) 7966 { 7967 return __kvm_set_xcr(emul_to_vcpu(ctxt), index, xcr); 7968 } 7969 7970 static void emulator_vm_bugged(struct x86_emulate_ctxt *ctxt) 7971 { 7972 struct kvm *kvm = emul_to_vcpu(ctxt)->kvm; 7973 7974 if (!kvm->vm_bugged) 7975 kvm_vm_bugged(kvm); 7976 } 7977 7978 static const struct x86_emulate_ops emulate_ops = { 7979 .vm_bugged = emulator_vm_bugged, 7980 .read_gpr = emulator_read_gpr, 7981 .write_gpr = emulator_write_gpr, 7982 .read_std = emulator_read_std, 7983 .write_std = emulator_write_std, 7984 .read_phys = kvm_read_guest_phys_system, 7985 .fetch = kvm_fetch_guest_virt, 7986 .read_emulated = emulator_read_emulated, 7987 .write_emulated = emulator_write_emulated, 7988 .cmpxchg_emulated = emulator_cmpxchg_emulated, 7989 .invlpg = emulator_invlpg, 7990 .pio_in_emulated = emulator_pio_in_emulated, 7991 .pio_out_emulated = emulator_pio_out_emulated, 7992 .get_segment = emulator_get_segment, 7993 .set_segment = emulator_set_segment, 7994 .get_cached_segment_base = emulator_get_cached_segment_base, 7995 .get_gdt = emulator_get_gdt, 7996 .get_idt = emulator_get_idt, 7997 .set_gdt = emulator_set_gdt, 7998 .set_idt = emulator_set_idt, 7999 .get_cr = emulator_get_cr, 8000 .set_cr = emulator_set_cr, 8001 .cpl = emulator_get_cpl, 8002 .get_dr = emulator_get_dr, 8003 .set_dr = emulator_set_dr, 8004 .get_smbase = emulator_get_smbase, 8005 .set_smbase = emulator_set_smbase, 8006 .set_msr_with_filter = emulator_set_msr_with_filter, 8007 .get_msr_with_filter = emulator_get_msr_with_filter, 8008 .set_msr = emulator_set_msr, 8009 .get_msr = emulator_get_msr, 8010 .check_pmc = emulator_check_pmc, 8011 .read_pmc = emulator_read_pmc, 8012 .halt = emulator_halt, 8013 .wbinvd = emulator_wbinvd, 8014 .fix_hypercall = emulator_fix_hypercall, 8015 .intercept = emulator_intercept, 8016 .get_cpuid = emulator_get_cpuid, 8017 .guest_has_long_mode = emulator_guest_has_long_mode, 8018 .guest_has_movbe = emulator_guest_has_movbe, 8019 .guest_has_fxsr = emulator_guest_has_fxsr, 8020 .guest_has_rdpid = emulator_guest_has_rdpid, 8021 .set_nmi_mask = emulator_set_nmi_mask, 8022 .get_hflags = emulator_get_hflags, 8023 .exiting_smm = emulator_exiting_smm, 8024 .leave_smm = emulator_leave_smm, 8025 .triple_fault = emulator_triple_fault, 8026 .set_xcr = emulator_set_xcr, 8027 }; 8028 8029 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) 8030 { 8031 u32 int_shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu); 8032 /* 8033 * an sti; sti; sequence only disable interrupts for the first 8034 * instruction. So, if the last instruction, be it emulated or 8035 * not, left the system with the INT_STI flag enabled, it 8036 * means that the last instruction is an sti. We should not 8037 * leave the flag on in this case. The same goes for mov ss 8038 */ 8039 if (int_shadow & mask) 8040 mask = 0; 8041 if (unlikely(int_shadow || mask)) { 8042 static_call(kvm_x86_set_interrupt_shadow)(vcpu, mask); 8043 if (!mask) 8044 kvm_make_request(KVM_REQ_EVENT, vcpu); 8045 } 8046 } 8047 8048 static bool inject_emulated_exception(struct kvm_vcpu *vcpu) 8049 { 8050 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 8051 if (ctxt->exception.vector == PF_VECTOR) 8052 return kvm_inject_emulated_page_fault(vcpu, &ctxt->exception); 8053 8054 if (ctxt->exception.error_code_valid) 8055 kvm_queue_exception_e(vcpu, ctxt->exception.vector, 8056 ctxt->exception.error_code); 8057 else 8058 kvm_queue_exception(vcpu, ctxt->exception.vector); 8059 return false; 8060 } 8061 8062 static struct x86_emulate_ctxt *alloc_emulate_ctxt(struct kvm_vcpu *vcpu) 8063 { 8064 struct x86_emulate_ctxt *ctxt; 8065 8066 ctxt = kmem_cache_zalloc(x86_emulator_cache, GFP_KERNEL_ACCOUNT); 8067 if (!ctxt) { 8068 pr_err("kvm: failed to allocate vcpu's emulator\n"); 8069 return NULL; 8070 } 8071 8072 ctxt->vcpu = vcpu; 8073 ctxt->ops = &emulate_ops; 8074 vcpu->arch.emulate_ctxt = ctxt; 8075 8076 return ctxt; 8077 } 8078 8079 static void init_emulate_ctxt(struct kvm_vcpu *vcpu) 8080 { 8081 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 8082 int cs_db, cs_l; 8083 8084 static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l); 8085 8086 ctxt->gpa_available = false; 8087 ctxt->eflags = kvm_get_rflags(vcpu); 8088 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0; 8089 8090 ctxt->eip = kvm_rip_read(vcpu); 8091 ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : 8092 (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 : 8093 (cs_l && is_long_mode(vcpu)) ? X86EMUL_MODE_PROT64 : 8094 cs_db ? X86EMUL_MODE_PROT32 : 8095 X86EMUL_MODE_PROT16; 8096 BUILD_BUG_ON(HF_GUEST_MASK != X86EMUL_GUEST_MASK); 8097 BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK); 8098 BUILD_BUG_ON(HF_SMM_INSIDE_NMI_MASK != X86EMUL_SMM_INSIDE_NMI_MASK); 8099 8100 ctxt->interruptibility = 0; 8101 ctxt->have_exception = false; 8102 ctxt->exception.vector = -1; 8103 ctxt->perm_ok = false; 8104 8105 init_decode_cache(ctxt); 8106 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; 8107 } 8108 8109 void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip) 8110 { 8111 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 8112 int ret; 8113 8114 init_emulate_ctxt(vcpu); 8115 8116 ctxt->op_bytes = 2; 8117 ctxt->ad_bytes = 2; 8118 ctxt->_eip = ctxt->eip + inc_eip; 8119 ret = emulate_int_real(ctxt, irq); 8120 8121 if (ret != X86EMUL_CONTINUE) { 8122 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 8123 } else { 8124 ctxt->eip = ctxt->_eip; 8125 kvm_rip_write(vcpu, ctxt->eip); 8126 kvm_set_rflags(vcpu, ctxt->eflags); 8127 } 8128 } 8129 EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt); 8130 8131 static void prepare_emulation_failure_exit(struct kvm_vcpu *vcpu, u64 *data, 8132 u8 ndata, u8 *insn_bytes, u8 insn_size) 8133 { 8134 struct kvm_run *run = vcpu->run; 8135 u64 info[5]; 8136 u8 info_start; 8137 8138 /* 8139 * Zero the whole array used to retrieve the exit info, as casting to 8140 * u32 for select entries will leave some chunks uninitialized. 8141 */ 8142 memset(&info, 0, sizeof(info)); 8143 8144 static_call(kvm_x86_get_exit_info)(vcpu, (u32 *)&info[0], &info[1], 8145 &info[2], (u32 *)&info[3], 8146 (u32 *)&info[4]); 8147 8148 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 8149 run->emulation_failure.suberror = KVM_INTERNAL_ERROR_EMULATION; 8150 8151 /* 8152 * There's currently space for 13 entries, but 5 are used for the exit 8153 * reason and info. Restrict to 4 to reduce the maintenance burden 8154 * when expanding kvm_run.emulation_failure in the future. 8155 */ 8156 if (WARN_ON_ONCE(ndata > 4)) 8157 ndata = 4; 8158 8159 /* Always include the flags as a 'data' entry. */ 8160 info_start = 1; 8161 run->emulation_failure.flags = 0; 8162 8163 if (insn_size) { 8164 BUILD_BUG_ON((sizeof(run->emulation_failure.insn_size) + 8165 sizeof(run->emulation_failure.insn_bytes) != 16)); 8166 info_start += 2; 8167 run->emulation_failure.flags |= 8168 KVM_INTERNAL_ERROR_EMULATION_FLAG_INSTRUCTION_BYTES; 8169 run->emulation_failure.insn_size = insn_size; 8170 memset(run->emulation_failure.insn_bytes, 0x90, 8171 sizeof(run->emulation_failure.insn_bytes)); 8172 memcpy(run->emulation_failure.insn_bytes, insn_bytes, insn_size); 8173 } 8174 8175 memcpy(&run->internal.data[info_start], info, sizeof(info)); 8176 memcpy(&run->internal.data[info_start + ARRAY_SIZE(info)], data, 8177 ndata * sizeof(data[0])); 8178 8179 run->emulation_failure.ndata = info_start + ARRAY_SIZE(info) + ndata; 8180 } 8181 8182 static void prepare_emulation_ctxt_failure_exit(struct kvm_vcpu *vcpu) 8183 { 8184 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 8185 8186 prepare_emulation_failure_exit(vcpu, NULL, 0, ctxt->fetch.data, 8187 ctxt->fetch.end - ctxt->fetch.data); 8188 } 8189 8190 void __kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu, u64 *data, 8191 u8 ndata) 8192 { 8193 prepare_emulation_failure_exit(vcpu, data, ndata, NULL, 0); 8194 } 8195 EXPORT_SYMBOL_GPL(__kvm_prepare_emulation_failure_exit); 8196 8197 void kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu) 8198 { 8199 __kvm_prepare_emulation_failure_exit(vcpu, NULL, 0); 8200 } 8201 EXPORT_SYMBOL_GPL(kvm_prepare_emulation_failure_exit); 8202 8203 static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type) 8204 { 8205 struct kvm *kvm = vcpu->kvm; 8206 8207 ++vcpu->stat.insn_emulation_fail; 8208 trace_kvm_emulate_insn_failed(vcpu); 8209 8210 if (emulation_type & EMULTYPE_VMWARE_GP) { 8211 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); 8212 return 1; 8213 } 8214 8215 if (kvm->arch.exit_on_emulation_error || 8216 (emulation_type & EMULTYPE_SKIP)) { 8217 prepare_emulation_ctxt_failure_exit(vcpu); 8218 return 0; 8219 } 8220 8221 kvm_queue_exception(vcpu, UD_VECTOR); 8222 8223 if (!is_guest_mode(vcpu) && static_call(kvm_x86_get_cpl)(vcpu) == 0) { 8224 prepare_emulation_ctxt_failure_exit(vcpu); 8225 return 0; 8226 } 8227 8228 return 1; 8229 } 8230 8231 static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, 8232 bool write_fault_to_shadow_pgtable, 8233 int emulation_type) 8234 { 8235 gpa_t gpa = cr2_or_gpa; 8236 kvm_pfn_t pfn; 8237 8238 if (!(emulation_type & EMULTYPE_ALLOW_RETRY_PF)) 8239 return false; 8240 8241 if (WARN_ON_ONCE(is_guest_mode(vcpu)) || 8242 WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF))) 8243 return false; 8244 8245 if (!vcpu->arch.mmu->root_role.direct) { 8246 /* 8247 * Write permission should be allowed since only 8248 * write access need to be emulated. 8249 */ 8250 gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL); 8251 8252 /* 8253 * If the mapping is invalid in guest, let cpu retry 8254 * it to generate fault. 8255 */ 8256 if (gpa == UNMAPPED_GVA) 8257 return true; 8258 } 8259 8260 /* 8261 * Do not retry the unhandleable instruction if it faults on the 8262 * readonly host memory, otherwise it will goto a infinite loop: 8263 * retry instruction -> write #PF -> emulation fail -> retry 8264 * instruction -> ... 8265 */ 8266 pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa)); 8267 8268 /* 8269 * If the instruction failed on the error pfn, it can not be fixed, 8270 * report the error to userspace. 8271 */ 8272 if (is_error_noslot_pfn(pfn)) 8273 return false; 8274 8275 kvm_release_pfn_clean(pfn); 8276 8277 /* The instructions are well-emulated on direct mmu. */ 8278 if (vcpu->arch.mmu->root_role.direct) { 8279 unsigned int indirect_shadow_pages; 8280 8281 write_lock(&vcpu->kvm->mmu_lock); 8282 indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages; 8283 write_unlock(&vcpu->kvm->mmu_lock); 8284 8285 if (indirect_shadow_pages) 8286 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); 8287 8288 return true; 8289 } 8290 8291 /* 8292 * if emulation was due to access to shadowed page table 8293 * and it failed try to unshadow page and re-enter the 8294 * guest to let CPU execute the instruction. 8295 */ 8296 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); 8297 8298 /* 8299 * If the access faults on its page table, it can not 8300 * be fixed by unprotecting shadow page and it should 8301 * be reported to userspace. 8302 */ 8303 return !write_fault_to_shadow_pgtable; 8304 } 8305 8306 static bool retry_instruction(struct x86_emulate_ctxt *ctxt, 8307 gpa_t cr2_or_gpa, int emulation_type) 8308 { 8309 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 8310 unsigned long last_retry_eip, last_retry_addr, gpa = cr2_or_gpa; 8311 8312 last_retry_eip = vcpu->arch.last_retry_eip; 8313 last_retry_addr = vcpu->arch.last_retry_addr; 8314 8315 /* 8316 * If the emulation is caused by #PF and it is non-page_table 8317 * writing instruction, it means the VM-EXIT is caused by shadow 8318 * page protected, we can zap the shadow page and retry this 8319 * instruction directly. 8320 * 8321 * Note: if the guest uses a non-page-table modifying instruction 8322 * on the PDE that points to the instruction, then we will unmap 8323 * the instruction and go to an infinite loop. So, we cache the 8324 * last retried eip and the last fault address, if we meet the eip 8325 * and the address again, we can break out of the potential infinite 8326 * loop. 8327 */ 8328 vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0; 8329 8330 if (!(emulation_type & EMULTYPE_ALLOW_RETRY_PF)) 8331 return false; 8332 8333 if (WARN_ON_ONCE(is_guest_mode(vcpu)) || 8334 WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF))) 8335 return false; 8336 8337 if (x86_page_table_writing_insn(ctxt)) 8338 return false; 8339 8340 if (ctxt->eip == last_retry_eip && last_retry_addr == cr2_or_gpa) 8341 return false; 8342 8343 vcpu->arch.last_retry_eip = ctxt->eip; 8344 vcpu->arch.last_retry_addr = cr2_or_gpa; 8345 8346 if (!vcpu->arch.mmu->root_role.direct) 8347 gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL); 8348 8349 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); 8350 8351 return true; 8352 } 8353 8354 static int complete_emulated_mmio(struct kvm_vcpu *vcpu); 8355 static int complete_emulated_pio(struct kvm_vcpu *vcpu); 8356 8357 static void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm) 8358 { 8359 trace_kvm_smm_transition(vcpu->vcpu_id, vcpu->arch.smbase, entering_smm); 8360 8361 if (entering_smm) { 8362 vcpu->arch.hflags |= HF_SMM_MASK; 8363 } else { 8364 vcpu->arch.hflags &= ~(HF_SMM_MASK | HF_SMM_INSIDE_NMI_MASK); 8365 8366 /* Process a latched INIT or SMI, if any. */ 8367 kvm_make_request(KVM_REQ_EVENT, vcpu); 8368 8369 /* 8370 * Even if KVM_SET_SREGS2 loaded PDPTRs out of band, 8371 * on SMM exit we still need to reload them from 8372 * guest memory 8373 */ 8374 vcpu->arch.pdptrs_from_userspace = false; 8375 } 8376 8377 kvm_mmu_reset_context(vcpu); 8378 } 8379 8380 static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7, 8381 unsigned long *db) 8382 { 8383 u32 dr6 = 0; 8384 int i; 8385 u32 enable, rwlen; 8386 8387 enable = dr7; 8388 rwlen = dr7 >> 16; 8389 for (i = 0; i < 4; i++, enable >>= 2, rwlen >>= 4) 8390 if ((enable & 3) && (rwlen & 15) == type && db[i] == addr) 8391 dr6 |= (1 << i); 8392 return dr6; 8393 } 8394 8395 static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu) 8396 { 8397 struct kvm_run *kvm_run = vcpu->run; 8398 8399 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { 8400 kvm_run->debug.arch.dr6 = DR6_BS | DR6_ACTIVE_LOW; 8401 kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu); 8402 kvm_run->debug.arch.exception = DB_VECTOR; 8403 kvm_run->exit_reason = KVM_EXIT_DEBUG; 8404 return 0; 8405 } 8406 kvm_queue_exception_p(vcpu, DB_VECTOR, DR6_BS); 8407 return 1; 8408 } 8409 8410 int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu) 8411 { 8412 unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu); 8413 int r; 8414 8415 r = static_call(kvm_x86_skip_emulated_instruction)(vcpu); 8416 if (unlikely(!r)) 8417 return 0; 8418 8419 kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_INSTRUCTIONS); 8420 8421 /* 8422 * rflags is the old, "raw" value of the flags. The new value has 8423 * not been saved yet. 8424 * 8425 * This is correct even for TF set by the guest, because "the 8426 * processor will not generate this exception after the instruction 8427 * that sets the TF flag". 8428 */ 8429 if (unlikely(rflags & X86_EFLAGS_TF)) 8430 r = kvm_vcpu_do_singlestep(vcpu); 8431 return r; 8432 } 8433 EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction); 8434 8435 static bool kvm_vcpu_check_code_breakpoint(struct kvm_vcpu *vcpu, int *r) 8436 { 8437 if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) && 8438 (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) { 8439 struct kvm_run *kvm_run = vcpu->run; 8440 unsigned long eip = kvm_get_linear_rip(vcpu); 8441 u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0, 8442 vcpu->arch.guest_debug_dr7, 8443 vcpu->arch.eff_db); 8444 8445 if (dr6 != 0) { 8446 kvm_run->debug.arch.dr6 = dr6 | DR6_ACTIVE_LOW; 8447 kvm_run->debug.arch.pc = eip; 8448 kvm_run->debug.arch.exception = DB_VECTOR; 8449 kvm_run->exit_reason = KVM_EXIT_DEBUG; 8450 *r = 0; 8451 return true; 8452 } 8453 } 8454 8455 if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) && 8456 !(kvm_get_rflags(vcpu) & X86_EFLAGS_RF)) { 8457 unsigned long eip = kvm_get_linear_rip(vcpu); 8458 u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0, 8459 vcpu->arch.dr7, 8460 vcpu->arch.db); 8461 8462 if (dr6 != 0) { 8463 kvm_queue_exception_p(vcpu, DB_VECTOR, dr6); 8464 *r = 1; 8465 return true; 8466 } 8467 } 8468 8469 return false; 8470 } 8471 8472 static bool is_vmware_backdoor_opcode(struct x86_emulate_ctxt *ctxt) 8473 { 8474 switch (ctxt->opcode_len) { 8475 case 1: 8476 switch (ctxt->b) { 8477 case 0xe4: /* IN */ 8478 case 0xe5: 8479 case 0xec: 8480 case 0xed: 8481 case 0xe6: /* OUT */ 8482 case 0xe7: 8483 case 0xee: 8484 case 0xef: 8485 case 0x6c: /* INS */ 8486 case 0x6d: 8487 case 0x6e: /* OUTS */ 8488 case 0x6f: 8489 return true; 8490 } 8491 break; 8492 case 2: 8493 switch (ctxt->b) { 8494 case 0x33: /* RDPMC */ 8495 return true; 8496 } 8497 break; 8498 } 8499 8500 return false; 8501 } 8502 8503 /* 8504 * Decode an instruction for emulation. The caller is responsible for handling 8505 * code breakpoints. Note, manually detecting code breakpoints is unnecessary 8506 * (and wrong) when emulating on an intercepted fault-like exception[*], as 8507 * code breakpoints have higher priority and thus have already been done by 8508 * hardware. 8509 * 8510 * [*] Except #MC, which is higher priority, but KVM should never emulate in 8511 * response to a machine check. 8512 */ 8513 int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type, 8514 void *insn, int insn_len) 8515 { 8516 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 8517 int r; 8518 8519 init_emulate_ctxt(vcpu); 8520 8521 r = x86_decode_insn(ctxt, insn, insn_len, emulation_type); 8522 8523 trace_kvm_emulate_insn_start(vcpu); 8524 ++vcpu->stat.insn_emulation; 8525 8526 return r; 8527 } 8528 EXPORT_SYMBOL_GPL(x86_decode_emulated_instruction); 8529 8530 int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, 8531 int emulation_type, void *insn, int insn_len) 8532 { 8533 int r; 8534 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 8535 bool writeback = true; 8536 bool write_fault_to_spt; 8537 8538 if (unlikely(!kvm_can_emulate_insn(vcpu, emulation_type, insn, insn_len))) 8539 return 1; 8540 8541 vcpu->arch.l1tf_flush_l1d = true; 8542 8543 /* 8544 * Clear write_fault_to_shadow_pgtable here to ensure it is 8545 * never reused. 8546 */ 8547 write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable; 8548 vcpu->arch.write_fault_to_shadow_pgtable = false; 8549 8550 if (!(emulation_type & EMULTYPE_NO_DECODE)) { 8551 kvm_clear_exception_queue(vcpu); 8552 8553 /* 8554 * Return immediately if RIP hits a code breakpoint, such #DBs 8555 * are fault-like and are higher priority than any faults on 8556 * the code fetch itself. 8557 */ 8558 if (!(emulation_type & EMULTYPE_SKIP) && 8559 kvm_vcpu_check_code_breakpoint(vcpu, &r)) 8560 return r; 8561 8562 r = x86_decode_emulated_instruction(vcpu, emulation_type, 8563 insn, insn_len); 8564 if (r != EMULATION_OK) { 8565 if ((emulation_type & EMULTYPE_TRAP_UD) || 8566 (emulation_type & EMULTYPE_TRAP_UD_FORCED)) { 8567 kvm_queue_exception(vcpu, UD_VECTOR); 8568 return 1; 8569 } 8570 if (reexecute_instruction(vcpu, cr2_or_gpa, 8571 write_fault_to_spt, 8572 emulation_type)) 8573 return 1; 8574 if (ctxt->have_exception) { 8575 /* 8576 * #UD should result in just EMULATION_FAILED, and trap-like 8577 * exception should not be encountered during decode. 8578 */ 8579 WARN_ON_ONCE(ctxt->exception.vector == UD_VECTOR || 8580 exception_type(ctxt->exception.vector) == EXCPT_TRAP); 8581 inject_emulated_exception(vcpu); 8582 return 1; 8583 } 8584 return handle_emulation_failure(vcpu, emulation_type); 8585 } 8586 } 8587 8588 if ((emulation_type & EMULTYPE_VMWARE_GP) && 8589 !is_vmware_backdoor_opcode(ctxt)) { 8590 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); 8591 return 1; 8592 } 8593 8594 /* 8595 * EMULTYPE_SKIP without EMULTYPE_COMPLETE_USER_EXIT is intended for 8596 * use *only* by vendor callbacks for kvm_skip_emulated_instruction(). 8597 * The caller is responsible for updating interruptibility state and 8598 * injecting single-step #DBs. 8599 */ 8600 if (emulation_type & EMULTYPE_SKIP) { 8601 if (ctxt->mode != X86EMUL_MODE_PROT64) 8602 ctxt->eip = (u32)ctxt->_eip; 8603 else 8604 ctxt->eip = ctxt->_eip; 8605 8606 if (emulation_type & EMULTYPE_COMPLETE_USER_EXIT) { 8607 r = 1; 8608 goto writeback; 8609 } 8610 8611 kvm_rip_write(vcpu, ctxt->eip); 8612 if (ctxt->eflags & X86_EFLAGS_RF) 8613 kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF); 8614 return 1; 8615 } 8616 8617 if (retry_instruction(ctxt, cr2_or_gpa, emulation_type)) 8618 return 1; 8619 8620 /* this is needed for vmware backdoor interface to work since it 8621 changes registers values during IO operation */ 8622 if (vcpu->arch.emulate_regs_need_sync_from_vcpu) { 8623 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; 8624 emulator_invalidate_register_cache(ctxt); 8625 } 8626 8627 restart: 8628 if (emulation_type & EMULTYPE_PF) { 8629 /* Save the faulting GPA (cr2) in the address field */ 8630 ctxt->exception.address = cr2_or_gpa; 8631 8632 /* With shadow page tables, cr2 contains a GVA or nGPA. */ 8633 if (vcpu->arch.mmu->root_role.direct) { 8634 ctxt->gpa_available = true; 8635 ctxt->gpa_val = cr2_or_gpa; 8636 } 8637 } else { 8638 /* Sanitize the address out of an abundance of paranoia. */ 8639 ctxt->exception.address = 0; 8640 } 8641 8642 r = x86_emulate_insn(ctxt); 8643 8644 if (r == EMULATION_INTERCEPTED) 8645 return 1; 8646 8647 if (r == EMULATION_FAILED) { 8648 if (reexecute_instruction(vcpu, cr2_or_gpa, write_fault_to_spt, 8649 emulation_type)) 8650 return 1; 8651 8652 return handle_emulation_failure(vcpu, emulation_type); 8653 } 8654 8655 if (ctxt->have_exception) { 8656 r = 1; 8657 if (inject_emulated_exception(vcpu)) 8658 return r; 8659 } else if (vcpu->arch.pio.count) { 8660 if (!vcpu->arch.pio.in) { 8661 /* FIXME: return into emulator if single-stepping. */ 8662 vcpu->arch.pio.count = 0; 8663 } else { 8664 writeback = false; 8665 vcpu->arch.complete_userspace_io = complete_emulated_pio; 8666 } 8667 r = 0; 8668 } else if (vcpu->mmio_needed) { 8669 ++vcpu->stat.mmio_exits; 8670 8671 if (!vcpu->mmio_is_write) 8672 writeback = false; 8673 r = 0; 8674 vcpu->arch.complete_userspace_io = complete_emulated_mmio; 8675 } else if (vcpu->arch.complete_userspace_io) { 8676 writeback = false; 8677 r = 0; 8678 } else if (r == EMULATION_RESTART) 8679 goto restart; 8680 else 8681 r = 1; 8682 8683 writeback: 8684 if (writeback) { 8685 unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu); 8686 toggle_interruptibility(vcpu, ctxt->interruptibility); 8687 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; 8688 if (!ctxt->have_exception || 8689 exception_type(ctxt->exception.vector) == EXCPT_TRAP) { 8690 kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_INSTRUCTIONS); 8691 if (ctxt->is_branch) 8692 kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_BRANCH_INSTRUCTIONS); 8693 kvm_rip_write(vcpu, ctxt->eip); 8694 if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP))) 8695 r = kvm_vcpu_do_singlestep(vcpu); 8696 static_call_cond(kvm_x86_update_emulated_instruction)(vcpu); 8697 __kvm_set_rflags(vcpu, ctxt->eflags); 8698 } 8699 8700 /* 8701 * For STI, interrupts are shadowed; so KVM_REQ_EVENT will 8702 * do nothing, and it will be requested again as soon as 8703 * the shadow expires. But we still need to check here, 8704 * because POPF has no interrupt shadow. 8705 */ 8706 if (unlikely((ctxt->eflags & ~rflags) & X86_EFLAGS_IF)) 8707 kvm_make_request(KVM_REQ_EVENT, vcpu); 8708 } else 8709 vcpu->arch.emulate_regs_need_sync_to_vcpu = true; 8710 8711 return r; 8712 } 8713 8714 int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type) 8715 { 8716 return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0); 8717 } 8718 EXPORT_SYMBOL_GPL(kvm_emulate_instruction); 8719 8720 int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu, 8721 void *insn, int insn_len) 8722 { 8723 return x86_emulate_instruction(vcpu, 0, 0, insn, insn_len); 8724 } 8725 EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer); 8726 8727 static int complete_fast_pio_out_port_0x7e(struct kvm_vcpu *vcpu) 8728 { 8729 vcpu->arch.pio.count = 0; 8730 return 1; 8731 } 8732 8733 static int complete_fast_pio_out(struct kvm_vcpu *vcpu) 8734 { 8735 vcpu->arch.pio.count = 0; 8736 8737 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) 8738 return 1; 8739 8740 return kvm_skip_emulated_instruction(vcpu); 8741 } 8742 8743 static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, 8744 unsigned short port) 8745 { 8746 unsigned long val = kvm_rax_read(vcpu); 8747 int ret = emulator_pio_out(vcpu, size, port, &val, 1); 8748 8749 if (ret) 8750 return ret; 8751 8752 /* 8753 * Workaround userspace that relies on old KVM behavior of %rip being 8754 * incremented prior to exiting to userspace to handle "OUT 0x7e". 8755 */ 8756 if (port == 0x7e && 8757 kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_OUT_7E_INC_RIP)) { 8758 vcpu->arch.complete_userspace_io = 8759 complete_fast_pio_out_port_0x7e; 8760 kvm_skip_emulated_instruction(vcpu); 8761 } else { 8762 vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu); 8763 vcpu->arch.complete_userspace_io = complete_fast_pio_out; 8764 } 8765 return 0; 8766 } 8767 8768 static int complete_fast_pio_in(struct kvm_vcpu *vcpu) 8769 { 8770 unsigned long val; 8771 8772 /* We should only ever be called with arch.pio.count equal to 1 */ 8773 BUG_ON(vcpu->arch.pio.count != 1); 8774 8775 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) { 8776 vcpu->arch.pio.count = 0; 8777 return 1; 8778 } 8779 8780 /* For size less than 4 we merge, else we zero extend */ 8781 val = (vcpu->arch.pio.size < 4) ? kvm_rax_read(vcpu) : 0; 8782 8783 /* 8784 * Since vcpu->arch.pio.count == 1 let emulator_pio_in perform 8785 * the copy and tracing 8786 */ 8787 emulator_pio_in(vcpu, vcpu->arch.pio.size, vcpu->arch.pio.port, &val, 1); 8788 kvm_rax_write(vcpu, val); 8789 8790 return kvm_skip_emulated_instruction(vcpu); 8791 } 8792 8793 static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size, 8794 unsigned short port) 8795 { 8796 unsigned long val; 8797 int ret; 8798 8799 /* For size less than 4 we merge, else we zero extend */ 8800 val = (size < 4) ? kvm_rax_read(vcpu) : 0; 8801 8802 ret = emulator_pio_in(vcpu, size, port, &val, 1); 8803 if (ret) { 8804 kvm_rax_write(vcpu, val); 8805 return ret; 8806 } 8807 8808 vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu); 8809 vcpu->arch.complete_userspace_io = complete_fast_pio_in; 8810 8811 return 0; 8812 } 8813 8814 int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in) 8815 { 8816 int ret; 8817 8818 if (in) 8819 ret = kvm_fast_pio_in(vcpu, size, port); 8820 else 8821 ret = kvm_fast_pio_out(vcpu, size, port); 8822 return ret && kvm_skip_emulated_instruction(vcpu); 8823 } 8824 EXPORT_SYMBOL_GPL(kvm_fast_pio); 8825 8826 static int kvmclock_cpu_down_prep(unsigned int cpu) 8827 { 8828 __this_cpu_write(cpu_tsc_khz, 0); 8829 return 0; 8830 } 8831 8832 static void tsc_khz_changed(void *data) 8833 { 8834 struct cpufreq_freqs *freq = data; 8835 unsigned long khz = 0; 8836 8837 if (data) 8838 khz = freq->new; 8839 else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) 8840 khz = cpufreq_quick_get(raw_smp_processor_id()); 8841 if (!khz) 8842 khz = tsc_khz; 8843 __this_cpu_write(cpu_tsc_khz, khz); 8844 } 8845 8846 #ifdef CONFIG_X86_64 8847 static void kvm_hyperv_tsc_notifier(void) 8848 { 8849 struct kvm *kvm; 8850 int cpu; 8851 8852 mutex_lock(&kvm_lock); 8853 list_for_each_entry(kvm, &vm_list, vm_list) 8854 kvm_make_mclock_inprogress_request(kvm); 8855 8856 /* no guest entries from this point */ 8857 hyperv_stop_tsc_emulation(); 8858 8859 /* TSC frequency always matches when on Hyper-V */ 8860 for_each_present_cpu(cpu) 8861 per_cpu(cpu_tsc_khz, cpu) = tsc_khz; 8862 kvm_caps.max_guest_tsc_khz = tsc_khz; 8863 8864 list_for_each_entry(kvm, &vm_list, vm_list) { 8865 __kvm_start_pvclock_update(kvm); 8866 pvclock_update_vm_gtod_copy(kvm); 8867 kvm_end_pvclock_update(kvm); 8868 } 8869 8870 mutex_unlock(&kvm_lock); 8871 } 8872 #endif 8873 8874 static void __kvmclock_cpufreq_notifier(struct cpufreq_freqs *freq, int cpu) 8875 { 8876 struct kvm *kvm; 8877 struct kvm_vcpu *vcpu; 8878 int send_ipi = 0; 8879 unsigned long i; 8880 8881 /* 8882 * We allow guests to temporarily run on slowing clocks, 8883 * provided we notify them after, or to run on accelerating 8884 * clocks, provided we notify them before. Thus time never 8885 * goes backwards. 8886 * 8887 * However, we have a problem. We can't atomically update 8888 * the frequency of a given CPU from this function; it is 8889 * merely a notifier, which can be called from any CPU. 8890 * Changing the TSC frequency at arbitrary points in time 8891 * requires a recomputation of local variables related to 8892 * the TSC for each VCPU. We must flag these local variables 8893 * to be updated and be sure the update takes place with the 8894 * new frequency before any guests proceed. 8895 * 8896 * Unfortunately, the combination of hotplug CPU and frequency 8897 * change creates an intractable locking scenario; the order 8898 * of when these callouts happen is undefined with respect to 8899 * CPU hotplug, and they can race with each other. As such, 8900 * merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is 8901 * undefined; you can actually have a CPU frequency change take 8902 * place in between the computation of X and the setting of the 8903 * variable. To protect against this problem, all updates of 8904 * the per_cpu tsc_khz variable are done in an interrupt 8905 * protected IPI, and all callers wishing to update the value 8906 * must wait for a synchronous IPI to complete (which is trivial 8907 * if the caller is on the CPU already). This establishes the 8908 * necessary total order on variable updates. 8909 * 8910 * Note that because a guest time update may take place 8911 * anytime after the setting of the VCPU's request bit, the 8912 * correct TSC value must be set before the request. However, 8913 * to ensure the update actually makes it to any guest which 8914 * starts running in hardware virtualization between the set 8915 * and the acquisition of the spinlock, we must also ping the 8916 * CPU after setting the request bit. 8917 * 8918 */ 8919 8920 smp_call_function_single(cpu, tsc_khz_changed, freq, 1); 8921 8922 mutex_lock(&kvm_lock); 8923 list_for_each_entry(kvm, &vm_list, vm_list) { 8924 kvm_for_each_vcpu(i, vcpu, kvm) { 8925 if (vcpu->cpu != cpu) 8926 continue; 8927 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 8928 if (vcpu->cpu != raw_smp_processor_id()) 8929 send_ipi = 1; 8930 } 8931 } 8932 mutex_unlock(&kvm_lock); 8933 8934 if (freq->old < freq->new && send_ipi) { 8935 /* 8936 * We upscale the frequency. Must make the guest 8937 * doesn't see old kvmclock values while running with 8938 * the new frequency, otherwise we risk the guest sees 8939 * time go backwards. 8940 * 8941 * In case we update the frequency for another cpu 8942 * (which might be in guest context) send an interrupt 8943 * to kick the cpu out of guest context. Next time 8944 * guest context is entered kvmclock will be updated, 8945 * so the guest will not see stale values. 8946 */ 8947 smp_call_function_single(cpu, tsc_khz_changed, freq, 1); 8948 } 8949 } 8950 8951 static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val, 8952 void *data) 8953 { 8954 struct cpufreq_freqs *freq = data; 8955 int cpu; 8956 8957 if (val == CPUFREQ_PRECHANGE && freq->old > freq->new) 8958 return 0; 8959 if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new) 8960 return 0; 8961 8962 for_each_cpu(cpu, freq->policy->cpus) 8963 __kvmclock_cpufreq_notifier(freq, cpu); 8964 8965 return 0; 8966 } 8967 8968 static struct notifier_block kvmclock_cpufreq_notifier_block = { 8969 .notifier_call = kvmclock_cpufreq_notifier 8970 }; 8971 8972 static int kvmclock_cpu_online(unsigned int cpu) 8973 { 8974 tsc_khz_changed(NULL); 8975 return 0; 8976 } 8977 8978 static void kvm_timer_init(void) 8979 { 8980 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { 8981 max_tsc_khz = tsc_khz; 8982 8983 if (IS_ENABLED(CONFIG_CPU_FREQ)) { 8984 struct cpufreq_policy *policy; 8985 int cpu; 8986 8987 cpu = get_cpu(); 8988 policy = cpufreq_cpu_get(cpu); 8989 if (policy) { 8990 if (policy->cpuinfo.max_freq) 8991 max_tsc_khz = policy->cpuinfo.max_freq; 8992 cpufreq_cpu_put(policy); 8993 } 8994 put_cpu(); 8995 } 8996 cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block, 8997 CPUFREQ_TRANSITION_NOTIFIER); 8998 } 8999 9000 cpuhp_setup_state(CPUHP_AP_X86_KVM_CLK_ONLINE, "x86/kvm/clk:online", 9001 kvmclock_cpu_online, kvmclock_cpu_down_prep); 9002 } 9003 9004 #ifdef CONFIG_X86_64 9005 static void pvclock_gtod_update_fn(struct work_struct *work) 9006 { 9007 struct kvm *kvm; 9008 struct kvm_vcpu *vcpu; 9009 unsigned long i; 9010 9011 mutex_lock(&kvm_lock); 9012 list_for_each_entry(kvm, &vm_list, vm_list) 9013 kvm_for_each_vcpu(i, vcpu, kvm) 9014 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); 9015 atomic_set(&kvm_guest_has_master_clock, 0); 9016 mutex_unlock(&kvm_lock); 9017 } 9018 9019 static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn); 9020 9021 /* 9022 * Indirection to move queue_work() out of the tk_core.seq write held 9023 * region to prevent possible deadlocks against time accessors which 9024 * are invoked with work related locks held. 9025 */ 9026 static void pvclock_irq_work_fn(struct irq_work *w) 9027 { 9028 queue_work(system_long_wq, &pvclock_gtod_work); 9029 } 9030 9031 static DEFINE_IRQ_WORK(pvclock_irq_work, pvclock_irq_work_fn); 9032 9033 /* 9034 * Notification about pvclock gtod data update. 9035 */ 9036 static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused, 9037 void *priv) 9038 { 9039 struct pvclock_gtod_data *gtod = &pvclock_gtod_data; 9040 struct timekeeper *tk = priv; 9041 9042 update_pvclock_gtod(tk); 9043 9044 /* 9045 * Disable master clock if host does not trust, or does not use, 9046 * TSC based clocksource. Delegate queue_work() to irq_work as 9047 * this is invoked with tk_core.seq write held. 9048 */ 9049 if (!gtod_is_based_on_tsc(gtod->clock.vclock_mode) && 9050 atomic_read(&kvm_guest_has_master_clock) != 0) 9051 irq_work_queue(&pvclock_irq_work); 9052 return 0; 9053 } 9054 9055 static struct notifier_block pvclock_gtod_notifier = { 9056 .notifier_call = pvclock_gtod_notify, 9057 }; 9058 #endif 9059 9060 int kvm_arch_init(void *opaque) 9061 { 9062 struct kvm_x86_init_ops *ops = opaque; 9063 int r; 9064 9065 if (kvm_x86_ops.hardware_enable) { 9066 pr_err("kvm: already loaded vendor module '%s'\n", kvm_x86_ops.name); 9067 r = -EEXIST; 9068 goto out; 9069 } 9070 9071 if (!ops->cpu_has_kvm_support()) { 9072 pr_err_ratelimited("kvm: no hardware support for '%s'\n", 9073 ops->runtime_ops->name); 9074 r = -EOPNOTSUPP; 9075 goto out; 9076 } 9077 if (ops->disabled_by_bios()) { 9078 pr_err_ratelimited("kvm: support for '%s' disabled by bios\n", 9079 ops->runtime_ops->name); 9080 r = -EOPNOTSUPP; 9081 goto out; 9082 } 9083 9084 /* 9085 * KVM explicitly assumes that the guest has an FPU and 9086 * FXSAVE/FXRSTOR. For example, the KVM_GET_FPU explicitly casts the 9087 * vCPU's FPU state as a fxregs_state struct. 9088 */ 9089 if (!boot_cpu_has(X86_FEATURE_FPU) || !boot_cpu_has(X86_FEATURE_FXSR)) { 9090 printk(KERN_ERR "kvm: inadequate fpu\n"); 9091 r = -EOPNOTSUPP; 9092 goto out; 9093 } 9094 9095 if (IS_ENABLED(CONFIG_PREEMPT_RT) && !boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { 9096 pr_err("RT requires X86_FEATURE_CONSTANT_TSC\n"); 9097 r = -EOPNOTSUPP; 9098 goto out; 9099 } 9100 9101 r = -ENOMEM; 9102 9103 x86_emulator_cache = kvm_alloc_emulator_cache(); 9104 if (!x86_emulator_cache) { 9105 pr_err("kvm: failed to allocate cache for x86 emulator\n"); 9106 goto out; 9107 } 9108 9109 user_return_msrs = alloc_percpu(struct kvm_user_return_msrs); 9110 if (!user_return_msrs) { 9111 printk(KERN_ERR "kvm: failed to allocate percpu kvm_user_return_msrs\n"); 9112 goto out_free_x86_emulator_cache; 9113 } 9114 kvm_nr_uret_msrs = 0; 9115 9116 r = kvm_mmu_vendor_module_init(); 9117 if (r) 9118 goto out_free_percpu; 9119 9120 kvm_timer_init(); 9121 9122 if (boot_cpu_has(X86_FEATURE_XSAVE)) { 9123 host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK); 9124 kvm_caps.supported_xcr0 = host_xcr0 & KVM_SUPPORTED_XCR0; 9125 } 9126 9127 if (pi_inject_timer == -1) 9128 pi_inject_timer = housekeeping_enabled(HK_TYPE_TIMER); 9129 #ifdef CONFIG_X86_64 9130 pvclock_gtod_register_notifier(&pvclock_gtod_notifier); 9131 9132 if (hypervisor_is_type(X86_HYPER_MS_HYPERV)) 9133 set_hv_tscchange_cb(kvm_hyperv_tsc_notifier); 9134 #endif 9135 9136 return 0; 9137 9138 out_free_percpu: 9139 free_percpu(user_return_msrs); 9140 out_free_x86_emulator_cache: 9141 kmem_cache_destroy(x86_emulator_cache); 9142 out: 9143 return r; 9144 } 9145 9146 void kvm_arch_exit(void) 9147 { 9148 #ifdef CONFIG_X86_64 9149 if (hypervisor_is_type(X86_HYPER_MS_HYPERV)) 9150 clear_hv_tscchange_cb(); 9151 #endif 9152 kvm_lapic_exit(); 9153 9154 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) 9155 cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block, 9156 CPUFREQ_TRANSITION_NOTIFIER); 9157 cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE); 9158 #ifdef CONFIG_X86_64 9159 pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier); 9160 irq_work_sync(&pvclock_irq_work); 9161 cancel_work_sync(&pvclock_gtod_work); 9162 #endif 9163 kvm_x86_ops.hardware_enable = NULL; 9164 kvm_mmu_vendor_module_exit(); 9165 free_percpu(user_return_msrs); 9166 kmem_cache_destroy(x86_emulator_cache); 9167 #ifdef CONFIG_KVM_XEN 9168 static_key_deferred_flush(&kvm_xen_enabled); 9169 WARN_ON(static_branch_unlikely(&kvm_xen_enabled.key)); 9170 #endif 9171 } 9172 9173 static int __kvm_emulate_halt(struct kvm_vcpu *vcpu, int state, int reason) 9174 { 9175 /* 9176 * The vCPU has halted, e.g. executed HLT. Update the run state if the 9177 * local APIC is in-kernel, the run loop will detect the non-runnable 9178 * state and halt the vCPU. Exit to userspace if the local APIC is 9179 * managed by userspace, in which case userspace is responsible for 9180 * handling wake events. 9181 */ 9182 ++vcpu->stat.halt_exits; 9183 if (lapic_in_kernel(vcpu)) { 9184 vcpu->arch.mp_state = state; 9185 return 1; 9186 } else { 9187 vcpu->run->exit_reason = reason; 9188 return 0; 9189 } 9190 } 9191 9192 int kvm_emulate_halt_noskip(struct kvm_vcpu *vcpu) 9193 { 9194 return __kvm_emulate_halt(vcpu, KVM_MP_STATE_HALTED, KVM_EXIT_HLT); 9195 } 9196 EXPORT_SYMBOL_GPL(kvm_emulate_halt_noskip); 9197 9198 int kvm_emulate_halt(struct kvm_vcpu *vcpu) 9199 { 9200 int ret = kvm_skip_emulated_instruction(vcpu); 9201 /* 9202 * TODO: we might be squashing a GUESTDBG_SINGLESTEP-triggered 9203 * KVM_EXIT_DEBUG here. 9204 */ 9205 return kvm_emulate_halt_noskip(vcpu) && ret; 9206 } 9207 EXPORT_SYMBOL_GPL(kvm_emulate_halt); 9208 9209 int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu) 9210 { 9211 int ret = kvm_skip_emulated_instruction(vcpu); 9212 9213 return __kvm_emulate_halt(vcpu, KVM_MP_STATE_AP_RESET_HOLD, 9214 KVM_EXIT_AP_RESET_HOLD) && ret; 9215 } 9216 EXPORT_SYMBOL_GPL(kvm_emulate_ap_reset_hold); 9217 9218 #ifdef CONFIG_X86_64 9219 static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr, 9220 unsigned long clock_type) 9221 { 9222 struct kvm_clock_pairing clock_pairing; 9223 struct timespec64 ts; 9224 u64 cycle; 9225 int ret; 9226 9227 if (clock_type != KVM_CLOCK_PAIRING_WALLCLOCK) 9228 return -KVM_EOPNOTSUPP; 9229 9230 /* 9231 * When tsc is in permanent catchup mode guests won't be able to use 9232 * pvclock_read_retry loop to get consistent view of pvclock 9233 */ 9234 if (vcpu->arch.tsc_always_catchup) 9235 return -KVM_EOPNOTSUPP; 9236 9237 if (!kvm_get_walltime_and_clockread(&ts, &cycle)) 9238 return -KVM_EOPNOTSUPP; 9239 9240 clock_pairing.sec = ts.tv_sec; 9241 clock_pairing.nsec = ts.tv_nsec; 9242 clock_pairing.tsc = kvm_read_l1_tsc(vcpu, cycle); 9243 clock_pairing.flags = 0; 9244 memset(&clock_pairing.pad, 0, sizeof(clock_pairing.pad)); 9245 9246 ret = 0; 9247 if (kvm_write_guest(vcpu->kvm, paddr, &clock_pairing, 9248 sizeof(struct kvm_clock_pairing))) 9249 ret = -KVM_EFAULT; 9250 9251 return ret; 9252 } 9253 #endif 9254 9255 /* 9256 * kvm_pv_kick_cpu_op: Kick a vcpu. 9257 * 9258 * @apicid - apicid of vcpu to be kicked. 9259 */ 9260 static void kvm_pv_kick_cpu_op(struct kvm *kvm, int apicid) 9261 { 9262 struct kvm_lapic_irq lapic_irq; 9263 9264 lapic_irq.shorthand = APIC_DEST_NOSHORT; 9265 lapic_irq.dest_mode = APIC_DEST_PHYSICAL; 9266 lapic_irq.level = 0; 9267 lapic_irq.dest_id = apicid; 9268 lapic_irq.msi_redir_hint = false; 9269 9270 lapic_irq.delivery_mode = APIC_DM_REMRD; 9271 kvm_irq_delivery_to_apic(kvm, NULL, &lapic_irq, NULL); 9272 } 9273 9274 bool kvm_apicv_activated(struct kvm *kvm) 9275 { 9276 return (READ_ONCE(kvm->arch.apicv_inhibit_reasons) == 0); 9277 } 9278 EXPORT_SYMBOL_GPL(kvm_apicv_activated); 9279 9280 bool kvm_vcpu_apicv_activated(struct kvm_vcpu *vcpu) 9281 { 9282 ulong vm_reasons = READ_ONCE(vcpu->kvm->arch.apicv_inhibit_reasons); 9283 ulong vcpu_reasons = static_call(kvm_x86_vcpu_get_apicv_inhibit_reasons)(vcpu); 9284 9285 return (vm_reasons | vcpu_reasons) == 0; 9286 } 9287 EXPORT_SYMBOL_GPL(kvm_vcpu_apicv_activated); 9288 9289 static void set_or_clear_apicv_inhibit(unsigned long *inhibits, 9290 enum kvm_apicv_inhibit reason, bool set) 9291 { 9292 if (set) 9293 __set_bit(reason, inhibits); 9294 else 9295 __clear_bit(reason, inhibits); 9296 9297 trace_kvm_apicv_inhibit_changed(reason, set, *inhibits); 9298 } 9299 9300 static void kvm_apicv_init(struct kvm *kvm) 9301 { 9302 unsigned long *inhibits = &kvm->arch.apicv_inhibit_reasons; 9303 9304 init_rwsem(&kvm->arch.apicv_update_lock); 9305 9306 set_or_clear_apicv_inhibit(inhibits, APICV_INHIBIT_REASON_ABSENT, true); 9307 9308 if (!enable_apicv) 9309 set_or_clear_apicv_inhibit(inhibits, 9310 APICV_INHIBIT_REASON_DISABLE, true); 9311 } 9312 9313 static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id) 9314 { 9315 struct kvm_vcpu *target = NULL; 9316 struct kvm_apic_map *map; 9317 9318 vcpu->stat.directed_yield_attempted++; 9319 9320 if (single_task_running()) 9321 goto no_yield; 9322 9323 rcu_read_lock(); 9324 map = rcu_dereference(vcpu->kvm->arch.apic_map); 9325 9326 if (likely(map) && dest_id <= map->max_apic_id && map->phys_map[dest_id]) 9327 target = map->phys_map[dest_id]->vcpu; 9328 9329 rcu_read_unlock(); 9330 9331 if (!target || !READ_ONCE(target->ready)) 9332 goto no_yield; 9333 9334 /* Ignore requests to yield to self */ 9335 if (vcpu == target) 9336 goto no_yield; 9337 9338 if (kvm_vcpu_yield_to(target) <= 0) 9339 goto no_yield; 9340 9341 vcpu->stat.directed_yield_successful++; 9342 9343 no_yield: 9344 return; 9345 } 9346 9347 static int complete_hypercall_exit(struct kvm_vcpu *vcpu) 9348 { 9349 u64 ret = vcpu->run->hypercall.ret; 9350 9351 if (!is_64_bit_mode(vcpu)) 9352 ret = (u32)ret; 9353 kvm_rax_write(vcpu, ret); 9354 ++vcpu->stat.hypercalls; 9355 return kvm_skip_emulated_instruction(vcpu); 9356 } 9357 9358 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) 9359 { 9360 unsigned long nr, a0, a1, a2, a3, ret; 9361 int op_64_bit; 9362 9363 if (kvm_xen_hypercall_enabled(vcpu->kvm)) 9364 return kvm_xen_hypercall(vcpu); 9365 9366 if (kvm_hv_hypercall_enabled(vcpu)) 9367 return kvm_hv_hypercall(vcpu); 9368 9369 nr = kvm_rax_read(vcpu); 9370 a0 = kvm_rbx_read(vcpu); 9371 a1 = kvm_rcx_read(vcpu); 9372 a2 = kvm_rdx_read(vcpu); 9373 a3 = kvm_rsi_read(vcpu); 9374 9375 trace_kvm_hypercall(nr, a0, a1, a2, a3); 9376 9377 op_64_bit = is_64_bit_hypercall(vcpu); 9378 if (!op_64_bit) { 9379 nr &= 0xFFFFFFFF; 9380 a0 &= 0xFFFFFFFF; 9381 a1 &= 0xFFFFFFFF; 9382 a2 &= 0xFFFFFFFF; 9383 a3 &= 0xFFFFFFFF; 9384 } 9385 9386 if (static_call(kvm_x86_get_cpl)(vcpu) != 0) { 9387 ret = -KVM_EPERM; 9388 goto out; 9389 } 9390 9391 ret = -KVM_ENOSYS; 9392 9393 switch (nr) { 9394 case KVM_HC_VAPIC_POLL_IRQ: 9395 ret = 0; 9396 break; 9397 case KVM_HC_KICK_CPU: 9398 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_UNHALT)) 9399 break; 9400 9401 kvm_pv_kick_cpu_op(vcpu->kvm, a1); 9402 kvm_sched_yield(vcpu, a1); 9403 ret = 0; 9404 break; 9405 #ifdef CONFIG_X86_64 9406 case KVM_HC_CLOCK_PAIRING: 9407 ret = kvm_pv_clock_pairing(vcpu, a0, a1); 9408 break; 9409 #endif 9410 case KVM_HC_SEND_IPI: 9411 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_SEND_IPI)) 9412 break; 9413 9414 ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit); 9415 break; 9416 case KVM_HC_SCHED_YIELD: 9417 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_SCHED_YIELD)) 9418 break; 9419 9420 kvm_sched_yield(vcpu, a0); 9421 ret = 0; 9422 break; 9423 case KVM_HC_MAP_GPA_RANGE: { 9424 u64 gpa = a0, npages = a1, attrs = a2; 9425 9426 ret = -KVM_ENOSYS; 9427 if (!(vcpu->kvm->arch.hypercall_exit_enabled & (1 << KVM_HC_MAP_GPA_RANGE))) 9428 break; 9429 9430 if (!PAGE_ALIGNED(gpa) || !npages || 9431 gpa_to_gfn(gpa) + npages <= gpa_to_gfn(gpa)) { 9432 ret = -KVM_EINVAL; 9433 break; 9434 } 9435 9436 vcpu->run->exit_reason = KVM_EXIT_HYPERCALL; 9437 vcpu->run->hypercall.nr = KVM_HC_MAP_GPA_RANGE; 9438 vcpu->run->hypercall.args[0] = gpa; 9439 vcpu->run->hypercall.args[1] = npages; 9440 vcpu->run->hypercall.args[2] = attrs; 9441 vcpu->run->hypercall.longmode = op_64_bit; 9442 vcpu->arch.complete_userspace_io = complete_hypercall_exit; 9443 return 0; 9444 } 9445 default: 9446 ret = -KVM_ENOSYS; 9447 break; 9448 } 9449 out: 9450 if (!op_64_bit) 9451 ret = (u32)ret; 9452 kvm_rax_write(vcpu, ret); 9453 9454 ++vcpu->stat.hypercalls; 9455 return kvm_skip_emulated_instruction(vcpu); 9456 } 9457 EXPORT_SYMBOL_GPL(kvm_emulate_hypercall); 9458 9459 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt) 9460 { 9461 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 9462 char instruction[3]; 9463 unsigned long rip = kvm_rip_read(vcpu); 9464 9465 /* 9466 * If the quirk is disabled, synthesize a #UD and let the guest pick up 9467 * the pieces. 9468 */ 9469 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_FIX_HYPERCALL_INSN)) { 9470 ctxt->exception.error_code_valid = false; 9471 ctxt->exception.vector = UD_VECTOR; 9472 ctxt->have_exception = true; 9473 return X86EMUL_PROPAGATE_FAULT; 9474 } 9475 9476 static_call(kvm_x86_patch_hypercall)(vcpu, instruction); 9477 9478 return emulator_write_emulated(ctxt, rip, instruction, 3, 9479 &ctxt->exception); 9480 } 9481 9482 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) 9483 { 9484 return vcpu->run->request_interrupt_window && 9485 likely(!pic_in_kernel(vcpu->kvm)); 9486 } 9487 9488 /* Called within kvm->srcu read side. */ 9489 static void post_kvm_run_save(struct kvm_vcpu *vcpu) 9490 { 9491 struct kvm_run *kvm_run = vcpu->run; 9492 9493 kvm_run->if_flag = static_call(kvm_x86_get_if_flag)(vcpu); 9494 kvm_run->cr8 = kvm_get_cr8(vcpu); 9495 kvm_run->apic_base = kvm_get_apic_base(vcpu); 9496 9497 kvm_run->ready_for_interrupt_injection = 9498 pic_in_kernel(vcpu->kvm) || 9499 kvm_vcpu_ready_for_interrupt_injection(vcpu); 9500 9501 if (is_smm(vcpu)) 9502 kvm_run->flags |= KVM_RUN_X86_SMM; 9503 } 9504 9505 static void update_cr8_intercept(struct kvm_vcpu *vcpu) 9506 { 9507 int max_irr, tpr; 9508 9509 if (!kvm_x86_ops.update_cr8_intercept) 9510 return; 9511 9512 if (!lapic_in_kernel(vcpu)) 9513 return; 9514 9515 if (vcpu->arch.apic->apicv_active) 9516 return; 9517 9518 if (!vcpu->arch.apic->vapic_addr) 9519 max_irr = kvm_lapic_find_highest_irr(vcpu); 9520 else 9521 max_irr = -1; 9522 9523 if (max_irr != -1) 9524 max_irr >>= 4; 9525 9526 tpr = kvm_lapic_get_cr8(vcpu); 9527 9528 static_call(kvm_x86_update_cr8_intercept)(vcpu, tpr, max_irr); 9529 } 9530 9531 9532 int kvm_check_nested_events(struct kvm_vcpu *vcpu) 9533 { 9534 if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) { 9535 kvm_x86_ops.nested_ops->triple_fault(vcpu); 9536 return 1; 9537 } 9538 9539 return kvm_x86_ops.nested_ops->check_events(vcpu); 9540 } 9541 9542 static void kvm_inject_exception(struct kvm_vcpu *vcpu) 9543 { 9544 trace_kvm_inj_exception(vcpu->arch.exception.nr, 9545 vcpu->arch.exception.has_error_code, 9546 vcpu->arch.exception.error_code, 9547 vcpu->arch.exception.injected); 9548 9549 if (vcpu->arch.exception.error_code && !is_protmode(vcpu)) 9550 vcpu->arch.exception.error_code = false; 9551 static_call(kvm_x86_queue_exception)(vcpu); 9552 } 9553 9554 static int inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit) 9555 { 9556 int r; 9557 bool can_inject = true; 9558 9559 /* try to reinject previous events if any */ 9560 9561 if (vcpu->arch.exception.injected) { 9562 kvm_inject_exception(vcpu); 9563 can_inject = false; 9564 } 9565 /* 9566 * Do not inject an NMI or interrupt if there is a pending 9567 * exception. Exceptions and interrupts are recognized at 9568 * instruction boundaries, i.e. the start of an instruction. 9569 * Trap-like exceptions, e.g. #DB, have higher priority than 9570 * NMIs and interrupts, i.e. traps are recognized before an 9571 * NMI/interrupt that's pending on the same instruction. 9572 * Fault-like exceptions, e.g. #GP and #PF, are the lowest 9573 * priority, but are only generated (pended) during instruction 9574 * execution, i.e. a pending fault-like exception means the 9575 * fault occurred on the *previous* instruction and must be 9576 * serviced prior to recognizing any new events in order to 9577 * fully complete the previous instruction. 9578 */ 9579 else if (!vcpu->arch.exception.pending) { 9580 if (vcpu->arch.nmi_injected) { 9581 static_call(kvm_x86_inject_nmi)(vcpu); 9582 can_inject = false; 9583 } else if (vcpu->arch.interrupt.injected) { 9584 static_call(kvm_x86_inject_irq)(vcpu, true); 9585 can_inject = false; 9586 } 9587 } 9588 9589 WARN_ON_ONCE(vcpu->arch.exception.injected && 9590 vcpu->arch.exception.pending); 9591 9592 /* 9593 * Call check_nested_events() even if we reinjected a previous event 9594 * in order for caller to determine if it should require immediate-exit 9595 * from L2 to L1 due to pending L1 events which require exit 9596 * from L2 to L1. 9597 */ 9598 if (is_guest_mode(vcpu)) { 9599 r = kvm_check_nested_events(vcpu); 9600 if (r < 0) 9601 goto out; 9602 } 9603 9604 /* try to inject new event if pending */ 9605 if (vcpu->arch.exception.pending) { 9606 if (exception_type(vcpu->arch.exception.nr) == EXCPT_FAULT) 9607 __kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) | 9608 X86_EFLAGS_RF); 9609 9610 if (vcpu->arch.exception.nr == DB_VECTOR) { 9611 kvm_deliver_exception_payload(vcpu); 9612 if (vcpu->arch.dr7 & DR7_GD) { 9613 vcpu->arch.dr7 &= ~DR7_GD; 9614 kvm_update_dr7(vcpu); 9615 } 9616 } 9617 9618 kvm_inject_exception(vcpu); 9619 9620 vcpu->arch.exception.pending = false; 9621 vcpu->arch.exception.injected = true; 9622 9623 can_inject = false; 9624 } 9625 9626 /* Don't inject interrupts if the user asked to avoid doing so */ 9627 if (vcpu->guest_debug & KVM_GUESTDBG_BLOCKIRQ) 9628 return 0; 9629 9630 /* 9631 * Finally, inject interrupt events. If an event cannot be injected 9632 * due to architectural conditions (e.g. IF=0) a window-open exit 9633 * will re-request KVM_REQ_EVENT. Sometimes however an event is pending 9634 * and can architecturally be injected, but we cannot do it right now: 9635 * an interrupt could have arrived just now and we have to inject it 9636 * as a vmexit, or there could already an event in the queue, which is 9637 * indicated by can_inject. In that case we request an immediate exit 9638 * in order to make progress and get back here for another iteration. 9639 * The kvm_x86_ops hooks communicate this by returning -EBUSY. 9640 */ 9641 if (vcpu->arch.smi_pending) { 9642 r = can_inject ? static_call(kvm_x86_smi_allowed)(vcpu, true) : -EBUSY; 9643 if (r < 0) 9644 goto out; 9645 if (r) { 9646 vcpu->arch.smi_pending = false; 9647 ++vcpu->arch.smi_count; 9648 enter_smm(vcpu); 9649 can_inject = false; 9650 } else 9651 static_call(kvm_x86_enable_smi_window)(vcpu); 9652 } 9653 9654 if (vcpu->arch.nmi_pending) { 9655 r = can_inject ? static_call(kvm_x86_nmi_allowed)(vcpu, true) : -EBUSY; 9656 if (r < 0) 9657 goto out; 9658 if (r) { 9659 --vcpu->arch.nmi_pending; 9660 vcpu->arch.nmi_injected = true; 9661 static_call(kvm_x86_inject_nmi)(vcpu); 9662 can_inject = false; 9663 WARN_ON(static_call(kvm_x86_nmi_allowed)(vcpu, true) < 0); 9664 } 9665 if (vcpu->arch.nmi_pending) 9666 static_call(kvm_x86_enable_nmi_window)(vcpu); 9667 } 9668 9669 if (kvm_cpu_has_injectable_intr(vcpu)) { 9670 r = can_inject ? static_call(kvm_x86_interrupt_allowed)(vcpu, true) : -EBUSY; 9671 if (r < 0) 9672 goto out; 9673 if (r) { 9674 kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), false); 9675 static_call(kvm_x86_inject_irq)(vcpu, false); 9676 WARN_ON(static_call(kvm_x86_interrupt_allowed)(vcpu, true) < 0); 9677 } 9678 if (kvm_cpu_has_injectable_intr(vcpu)) 9679 static_call(kvm_x86_enable_irq_window)(vcpu); 9680 } 9681 9682 if (is_guest_mode(vcpu) && 9683 kvm_x86_ops.nested_ops->hv_timer_pending && 9684 kvm_x86_ops.nested_ops->hv_timer_pending(vcpu)) 9685 *req_immediate_exit = true; 9686 9687 WARN_ON(vcpu->arch.exception.pending); 9688 return 0; 9689 9690 out: 9691 if (r == -EBUSY) { 9692 *req_immediate_exit = true; 9693 r = 0; 9694 } 9695 return r; 9696 } 9697 9698 static void process_nmi(struct kvm_vcpu *vcpu) 9699 { 9700 unsigned limit = 2; 9701 9702 /* 9703 * x86 is limited to one NMI running, and one NMI pending after it. 9704 * If an NMI is already in progress, limit further NMIs to just one. 9705 * Otherwise, allow two (and we'll inject the first one immediately). 9706 */ 9707 if (static_call(kvm_x86_get_nmi_mask)(vcpu) || vcpu->arch.nmi_injected) 9708 limit = 1; 9709 9710 vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0); 9711 vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit); 9712 kvm_make_request(KVM_REQ_EVENT, vcpu); 9713 } 9714 9715 static u32 enter_smm_get_segment_flags(struct kvm_segment *seg) 9716 { 9717 u32 flags = 0; 9718 flags |= seg->g << 23; 9719 flags |= seg->db << 22; 9720 flags |= seg->l << 21; 9721 flags |= seg->avl << 20; 9722 flags |= seg->present << 15; 9723 flags |= seg->dpl << 13; 9724 flags |= seg->s << 12; 9725 flags |= seg->type << 8; 9726 return flags; 9727 } 9728 9729 static void enter_smm_save_seg_32(struct kvm_vcpu *vcpu, char *buf, int n) 9730 { 9731 struct kvm_segment seg; 9732 int offset; 9733 9734 kvm_get_segment(vcpu, &seg, n); 9735 put_smstate(u32, buf, 0x7fa8 + n * 4, seg.selector); 9736 9737 if (n < 3) 9738 offset = 0x7f84 + n * 12; 9739 else 9740 offset = 0x7f2c + (n - 3) * 12; 9741 9742 put_smstate(u32, buf, offset + 8, seg.base); 9743 put_smstate(u32, buf, offset + 4, seg.limit); 9744 put_smstate(u32, buf, offset, enter_smm_get_segment_flags(&seg)); 9745 } 9746 9747 #ifdef CONFIG_X86_64 9748 static void enter_smm_save_seg_64(struct kvm_vcpu *vcpu, char *buf, int n) 9749 { 9750 struct kvm_segment seg; 9751 int offset; 9752 u16 flags; 9753 9754 kvm_get_segment(vcpu, &seg, n); 9755 offset = 0x7e00 + n * 16; 9756 9757 flags = enter_smm_get_segment_flags(&seg) >> 8; 9758 put_smstate(u16, buf, offset, seg.selector); 9759 put_smstate(u16, buf, offset + 2, flags); 9760 put_smstate(u32, buf, offset + 4, seg.limit); 9761 put_smstate(u64, buf, offset + 8, seg.base); 9762 } 9763 #endif 9764 9765 static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf) 9766 { 9767 struct desc_ptr dt; 9768 struct kvm_segment seg; 9769 unsigned long val; 9770 int i; 9771 9772 put_smstate(u32, buf, 0x7ffc, kvm_read_cr0(vcpu)); 9773 put_smstate(u32, buf, 0x7ff8, kvm_read_cr3(vcpu)); 9774 put_smstate(u32, buf, 0x7ff4, kvm_get_rflags(vcpu)); 9775 put_smstate(u32, buf, 0x7ff0, kvm_rip_read(vcpu)); 9776 9777 for (i = 0; i < 8; i++) 9778 put_smstate(u32, buf, 0x7fd0 + i * 4, kvm_register_read_raw(vcpu, i)); 9779 9780 kvm_get_dr(vcpu, 6, &val); 9781 put_smstate(u32, buf, 0x7fcc, (u32)val); 9782 kvm_get_dr(vcpu, 7, &val); 9783 put_smstate(u32, buf, 0x7fc8, (u32)val); 9784 9785 kvm_get_segment(vcpu, &seg, VCPU_SREG_TR); 9786 put_smstate(u32, buf, 0x7fc4, seg.selector); 9787 put_smstate(u32, buf, 0x7f64, seg.base); 9788 put_smstate(u32, buf, 0x7f60, seg.limit); 9789 put_smstate(u32, buf, 0x7f5c, enter_smm_get_segment_flags(&seg)); 9790 9791 kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR); 9792 put_smstate(u32, buf, 0x7fc0, seg.selector); 9793 put_smstate(u32, buf, 0x7f80, seg.base); 9794 put_smstate(u32, buf, 0x7f7c, seg.limit); 9795 put_smstate(u32, buf, 0x7f78, enter_smm_get_segment_flags(&seg)); 9796 9797 static_call(kvm_x86_get_gdt)(vcpu, &dt); 9798 put_smstate(u32, buf, 0x7f74, dt.address); 9799 put_smstate(u32, buf, 0x7f70, dt.size); 9800 9801 static_call(kvm_x86_get_idt)(vcpu, &dt); 9802 put_smstate(u32, buf, 0x7f58, dt.address); 9803 put_smstate(u32, buf, 0x7f54, dt.size); 9804 9805 for (i = 0; i < 6; i++) 9806 enter_smm_save_seg_32(vcpu, buf, i); 9807 9808 put_smstate(u32, buf, 0x7f14, kvm_read_cr4(vcpu)); 9809 9810 /* revision id */ 9811 put_smstate(u32, buf, 0x7efc, 0x00020000); 9812 put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase); 9813 } 9814 9815 #ifdef CONFIG_X86_64 9816 static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf) 9817 { 9818 struct desc_ptr dt; 9819 struct kvm_segment seg; 9820 unsigned long val; 9821 int i; 9822 9823 for (i = 0; i < 16; i++) 9824 put_smstate(u64, buf, 0x7ff8 - i * 8, kvm_register_read_raw(vcpu, i)); 9825 9826 put_smstate(u64, buf, 0x7f78, kvm_rip_read(vcpu)); 9827 put_smstate(u32, buf, 0x7f70, kvm_get_rflags(vcpu)); 9828 9829 kvm_get_dr(vcpu, 6, &val); 9830 put_smstate(u64, buf, 0x7f68, val); 9831 kvm_get_dr(vcpu, 7, &val); 9832 put_smstate(u64, buf, 0x7f60, val); 9833 9834 put_smstate(u64, buf, 0x7f58, kvm_read_cr0(vcpu)); 9835 put_smstate(u64, buf, 0x7f50, kvm_read_cr3(vcpu)); 9836 put_smstate(u64, buf, 0x7f48, kvm_read_cr4(vcpu)); 9837 9838 put_smstate(u32, buf, 0x7f00, vcpu->arch.smbase); 9839 9840 /* revision id */ 9841 put_smstate(u32, buf, 0x7efc, 0x00020064); 9842 9843 put_smstate(u64, buf, 0x7ed0, vcpu->arch.efer); 9844 9845 kvm_get_segment(vcpu, &seg, VCPU_SREG_TR); 9846 put_smstate(u16, buf, 0x7e90, seg.selector); 9847 put_smstate(u16, buf, 0x7e92, enter_smm_get_segment_flags(&seg) >> 8); 9848 put_smstate(u32, buf, 0x7e94, seg.limit); 9849 put_smstate(u64, buf, 0x7e98, seg.base); 9850 9851 static_call(kvm_x86_get_idt)(vcpu, &dt); 9852 put_smstate(u32, buf, 0x7e84, dt.size); 9853 put_smstate(u64, buf, 0x7e88, dt.address); 9854 9855 kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR); 9856 put_smstate(u16, buf, 0x7e70, seg.selector); 9857 put_smstate(u16, buf, 0x7e72, enter_smm_get_segment_flags(&seg) >> 8); 9858 put_smstate(u32, buf, 0x7e74, seg.limit); 9859 put_smstate(u64, buf, 0x7e78, seg.base); 9860 9861 static_call(kvm_x86_get_gdt)(vcpu, &dt); 9862 put_smstate(u32, buf, 0x7e64, dt.size); 9863 put_smstate(u64, buf, 0x7e68, dt.address); 9864 9865 for (i = 0; i < 6; i++) 9866 enter_smm_save_seg_64(vcpu, buf, i); 9867 } 9868 #endif 9869 9870 static void enter_smm(struct kvm_vcpu *vcpu) 9871 { 9872 struct kvm_segment cs, ds; 9873 struct desc_ptr dt; 9874 unsigned long cr0; 9875 char buf[512]; 9876 9877 memset(buf, 0, 512); 9878 #ifdef CONFIG_X86_64 9879 if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) 9880 enter_smm_save_state_64(vcpu, buf); 9881 else 9882 #endif 9883 enter_smm_save_state_32(vcpu, buf); 9884 9885 /* 9886 * Give enter_smm() a chance to make ISA-specific changes to the vCPU 9887 * state (e.g. leave guest mode) after we've saved the state into the 9888 * SMM state-save area. 9889 */ 9890 static_call(kvm_x86_enter_smm)(vcpu, buf); 9891 9892 kvm_smm_changed(vcpu, true); 9893 kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf)); 9894 9895 if (static_call(kvm_x86_get_nmi_mask)(vcpu)) 9896 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; 9897 else 9898 static_call(kvm_x86_set_nmi_mask)(vcpu, true); 9899 9900 kvm_set_rflags(vcpu, X86_EFLAGS_FIXED); 9901 kvm_rip_write(vcpu, 0x8000); 9902 9903 cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG); 9904 static_call(kvm_x86_set_cr0)(vcpu, cr0); 9905 vcpu->arch.cr0 = cr0; 9906 9907 static_call(kvm_x86_set_cr4)(vcpu, 0); 9908 9909 /* Undocumented: IDT limit is set to zero on entry to SMM. */ 9910 dt.address = dt.size = 0; 9911 static_call(kvm_x86_set_idt)(vcpu, &dt); 9912 9913 kvm_set_dr(vcpu, 7, DR7_FIXED_1); 9914 9915 cs.selector = (vcpu->arch.smbase >> 4) & 0xffff; 9916 cs.base = vcpu->arch.smbase; 9917 9918 ds.selector = 0; 9919 ds.base = 0; 9920 9921 cs.limit = ds.limit = 0xffffffff; 9922 cs.type = ds.type = 0x3; 9923 cs.dpl = ds.dpl = 0; 9924 cs.db = ds.db = 0; 9925 cs.s = ds.s = 1; 9926 cs.l = ds.l = 0; 9927 cs.g = ds.g = 1; 9928 cs.avl = ds.avl = 0; 9929 cs.present = ds.present = 1; 9930 cs.unusable = ds.unusable = 0; 9931 cs.padding = ds.padding = 0; 9932 9933 kvm_set_segment(vcpu, &cs, VCPU_SREG_CS); 9934 kvm_set_segment(vcpu, &ds, VCPU_SREG_DS); 9935 kvm_set_segment(vcpu, &ds, VCPU_SREG_ES); 9936 kvm_set_segment(vcpu, &ds, VCPU_SREG_FS); 9937 kvm_set_segment(vcpu, &ds, VCPU_SREG_GS); 9938 kvm_set_segment(vcpu, &ds, VCPU_SREG_SS); 9939 9940 #ifdef CONFIG_X86_64 9941 if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) 9942 static_call(kvm_x86_set_efer)(vcpu, 0); 9943 #endif 9944 9945 kvm_update_cpuid_runtime(vcpu); 9946 kvm_mmu_reset_context(vcpu); 9947 } 9948 9949 static void process_smi(struct kvm_vcpu *vcpu) 9950 { 9951 vcpu->arch.smi_pending = true; 9952 kvm_make_request(KVM_REQ_EVENT, vcpu); 9953 } 9954 9955 void kvm_make_scan_ioapic_request_mask(struct kvm *kvm, 9956 unsigned long *vcpu_bitmap) 9957 { 9958 kvm_make_vcpus_request_mask(kvm, KVM_REQ_SCAN_IOAPIC, vcpu_bitmap); 9959 } 9960 9961 void kvm_make_scan_ioapic_request(struct kvm *kvm) 9962 { 9963 kvm_make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC); 9964 } 9965 9966 void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu) 9967 { 9968 struct kvm_lapic *apic = vcpu->arch.apic; 9969 bool activate; 9970 9971 if (!lapic_in_kernel(vcpu)) 9972 return; 9973 9974 down_read(&vcpu->kvm->arch.apicv_update_lock); 9975 preempt_disable(); 9976 9977 activate = kvm_vcpu_apicv_activated(vcpu); 9978 9979 if (apic->apicv_active == activate) 9980 goto out; 9981 9982 apic->apicv_active = activate; 9983 kvm_apic_update_apicv(vcpu); 9984 static_call(kvm_x86_refresh_apicv_exec_ctrl)(vcpu); 9985 9986 /* 9987 * When APICv gets disabled, we may still have injected interrupts 9988 * pending. At the same time, KVM_REQ_EVENT may not be set as APICv was 9989 * still active when the interrupt got accepted. Make sure 9990 * inject_pending_event() is called to check for that. 9991 */ 9992 if (!apic->apicv_active) 9993 kvm_make_request(KVM_REQ_EVENT, vcpu); 9994 9995 out: 9996 preempt_enable(); 9997 up_read(&vcpu->kvm->arch.apicv_update_lock); 9998 } 9999 EXPORT_SYMBOL_GPL(kvm_vcpu_update_apicv); 10000 10001 void __kvm_set_or_clear_apicv_inhibit(struct kvm *kvm, 10002 enum kvm_apicv_inhibit reason, bool set) 10003 { 10004 unsigned long old, new; 10005 10006 lockdep_assert_held_write(&kvm->arch.apicv_update_lock); 10007 10008 if (!static_call(kvm_x86_check_apicv_inhibit_reasons)(reason)) 10009 return; 10010 10011 old = new = kvm->arch.apicv_inhibit_reasons; 10012 10013 set_or_clear_apicv_inhibit(&new, reason, set); 10014 10015 if (!!old != !!new) { 10016 /* 10017 * Kick all vCPUs before setting apicv_inhibit_reasons to avoid 10018 * false positives in the sanity check WARN in svm_vcpu_run(). 10019 * This task will wait for all vCPUs to ack the kick IRQ before 10020 * updating apicv_inhibit_reasons, and all other vCPUs will 10021 * block on acquiring apicv_update_lock so that vCPUs can't 10022 * redo svm_vcpu_run() without seeing the new inhibit state. 10023 * 10024 * Note, holding apicv_update_lock and taking it in the read 10025 * side (handling the request) also prevents other vCPUs from 10026 * servicing the request with a stale apicv_inhibit_reasons. 10027 */ 10028 kvm_make_all_cpus_request(kvm, KVM_REQ_APICV_UPDATE); 10029 kvm->arch.apicv_inhibit_reasons = new; 10030 if (new) { 10031 unsigned long gfn = gpa_to_gfn(APIC_DEFAULT_PHYS_BASE); 10032 kvm_zap_gfn_range(kvm, gfn, gfn+1); 10033 } 10034 } else { 10035 kvm->arch.apicv_inhibit_reasons = new; 10036 } 10037 } 10038 10039 void kvm_set_or_clear_apicv_inhibit(struct kvm *kvm, 10040 enum kvm_apicv_inhibit reason, bool set) 10041 { 10042 if (!enable_apicv) 10043 return; 10044 10045 down_write(&kvm->arch.apicv_update_lock); 10046 __kvm_set_or_clear_apicv_inhibit(kvm, reason, set); 10047 up_write(&kvm->arch.apicv_update_lock); 10048 } 10049 EXPORT_SYMBOL_GPL(kvm_set_or_clear_apicv_inhibit); 10050 10051 static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) 10052 { 10053 if (!kvm_apic_present(vcpu)) 10054 return; 10055 10056 bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256); 10057 10058 if (irqchip_split(vcpu->kvm)) 10059 kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors); 10060 else { 10061 static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); 10062 if (ioapic_in_kernel(vcpu->kvm)) 10063 kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors); 10064 } 10065 10066 if (is_guest_mode(vcpu)) 10067 vcpu->arch.load_eoi_exitmap_pending = true; 10068 else 10069 kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu); 10070 } 10071 10072 static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu) 10073 { 10074 u64 eoi_exit_bitmap[4]; 10075 10076 if (!kvm_apic_hw_enabled(vcpu->arch.apic)) 10077 return; 10078 10079 if (to_hv_vcpu(vcpu)) { 10080 bitmap_or((ulong *)eoi_exit_bitmap, 10081 vcpu->arch.ioapic_handled_vectors, 10082 to_hv_synic(vcpu)->vec_bitmap, 256); 10083 static_call_cond(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap); 10084 return; 10085 } 10086 10087 static_call_cond(kvm_x86_load_eoi_exitmap)( 10088 vcpu, (u64 *)vcpu->arch.ioapic_handled_vectors); 10089 } 10090 10091 void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, 10092 unsigned long start, unsigned long end) 10093 { 10094 unsigned long apic_address; 10095 10096 /* 10097 * The physical address of apic access page is stored in the VMCS. 10098 * Update it when it becomes invalid. 10099 */ 10100 apic_address = gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT); 10101 if (start <= apic_address && apic_address < end) 10102 kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD); 10103 } 10104 10105 void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) 10106 { 10107 static_call_cond(kvm_x86_guest_memory_reclaimed)(kvm); 10108 } 10109 10110 static void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) 10111 { 10112 if (!lapic_in_kernel(vcpu)) 10113 return; 10114 10115 static_call_cond(kvm_x86_set_apic_access_page_addr)(vcpu); 10116 } 10117 10118 void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu) 10119 { 10120 smp_send_reschedule(vcpu->cpu); 10121 } 10122 EXPORT_SYMBOL_GPL(__kvm_request_immediate_exit); 10123 10124 /* 10125 * Called within kvm->srcu read side. 10126 * Returns 1 to let vcpu_run() continue the guest execution loop without 10127 * exiting to the userspace. Otherwise, the value will be returned to the 10128 * userspace. 10129 */ 10130 static int vcpu_enter_guest(struct kvm_vcpu *vcpu) 10131 { 10132 int r; 10133 bool req_int_win = 10134 dm_request_for_irq_injection(vcpu) && 10135 kvm_cpu_accept_dm_intr(vcpu); 10136 fastpath_t exit_fastpath; 10137 10138 bool req_immediate_exit = false; 10139 10140 /* Forbid vmenter if vcpu dirty ring is soft-full */ 10141 if (unlikely(vcpu->kvm->dirty_ring_size && 10142 kvm_dirty_ring_soft_full(&vcpu->dirty_ring))) { 10143 vcpu->run->exit_reason = KVM_EXIT_DIRTY_RING_FULL; 10144 trace_kvm_dirty_ring_exit(vcpu); 10145 r = 0; 10146 goto out; 10147 } 10148 10149 if (kvm_request_pending(vcpu)) { 10150 if (kvm_check_request(KVM_REQ_VM_DEAD, vcpu)) { 10151 r = -EIO; 10152 goto out; 10153 } 10154 if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) { 10155 if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) { 10156 r = 0; 10157 goto out; 10158 } 10159 } 10160 if (kvm_check_request(KVM_REQ_MMU_FREE_OBSOLETE_ROOTS, vcpu)) 10161 kvm_mmu_free_obsolete_roots(vcpu); 10162 if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu)) 10163 __kvm_migrate_timers(vcpu); 10164 if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu)) 10165 kvm_update_masterclock(vcpu->kvm); 10166 if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu)) 10167 kvm_gen_kvmclock_update(vcpu); 10168 if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) { 10169 r = kvm_guest_time_update(vcpu); 10170 if (unlikely(r)) 10171 goto out; 10172 } 10173 if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu)) 10174 kvm_mmu_sync_roots(vcpu); 10175 if (kvm_check_request(KVM_REQ_LOAD_MMU_PGD, vcpu)) 10176 kvm_mmu_load_pgd(vcpu); 10177 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { 10178 kvm_vcpu_flush_tlb_all(vcpu); 10179 10180 /* Flushing all ASIDs flushes the current ASID... */ 10181 kvm_clear_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); 10182 } 10183 kvm_service_local_tlb_flush_requests(vcpu); 10184 10185 if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) { 10186 vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS; 10187 r = 0; 10188 goto out; 10189 } 10190 if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) { 10191 if (is_guest_mode(vcpu)) { 10192 kvm_x86_ops.nested_ops->triple_fault(vcpu); 10193 } else { 10194 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; 10195 vcpu->mmio_needed = 0; 10196 r = 0; 10197 goto out; 10198 } 10199 } 10200 if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) { 10201 /* Page is swapped out. Do synthetic halt */ 10202 vcpu->arch.apf.halted = true; 10203 r = 1; 10204 goto out; 10205 } 10206 if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu)) 10207 record_steal_time(vcpu); 10208 if (kvm_check_request(KVM_REQ_SMI, vcpu)) 10209 process_smi(vcpu); 10210 if (kvm_check_request(KVM_REQ_NMI, vcpu)) 10211 process_nmi(vcpu); 10212 if (kvm_check_request(KVM_REQ_PMU, vcpu)) 10213 kvm_pmu_handle_event(vcpu); 10214 if (kvm_check_request(KVM_REQ_PMI, vcpu)) 10215 kvm_pmu_deliver_pmi(vcpu); 10216 if (kvm_check_request(KVM_REQ_IOAPIC_EOI_EXIT, vcpu)) { 10217 BUG_ON(vcpu->arch.pending_ioapic_eoi > 255); 10218 if (test_bit(vcpu->arch.pending_ioapic_eoi, 10219 vcpu->arch.ioapic_handled_vectors)) { 10220 vcpu->run->exit_reason = KVM_EXIT_IOAPIC_EOI; 10221 vcpu->run->eoi.vector = 10222 vcpu->arch.pending_ioapic_eoi; 10223 r = 0; 10224 goto out; 10225 } 10226 } 10227 if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu)) 10228 vcpu_scan_ioapic(vcpu); 10229 if (kvm_check_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu)) 10230 vcpu_load_eoi_exitmap(vcpu); 10231 if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu)) 10232 kvm_vcpu_reload_apic_access_page(vcpu); 10233 if (kvm_check_request(KVM_REQ_HV_CRASH, vcpu)) { 10234 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; 10235 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_CRASH; 10236 vcpu->run->system_event.ndata = 0; 10237 r = 0; 10238 goto out; 10239 } 10240 if (kvm_check_request(KVM_REQ_HV_RESET, vcpu)) { 10241 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; 10242 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_RESET; 10243 vcpu->run->system_event.ndata = 0; 10244 r = 0; 10245 goto out; 10246 } 10247 if (kvm_check_request(KVM_REQ_HV_EXIT, vcpu)) { 10248 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); 10249 10250 vcpu->run->exit_reason = KVM_EXIT_HYPERV; 10251 vcpu->run->hyperv = hv_vcpu->exit; 10252 r = 0; 10253 goto out; 10254 } 10255 10256 /* 10257 * KVM_REQ_HV_STIMER has to be processed after 10258 * KVM_REQ_CLOCK_UPDATE, because Hyper-V SynIC timers 10259 * depend on the guest clock being up-to-date 10260 */ 10261 if (kvm_check_request(KVM_REQ_HV_STIMER, vcpu)) 10262 kvm_hv_process_stimers(vcpu); 10263 if (kvm_check_request(KVM_REQ_APICV_UPDATE, vcpu)) 10264 kvm_vcpu_update_apicv(vcpu); 10265 if (kvm_check_request(KVM_REQ_APF_READY, vcpu)) 10266 kvm_check_async_pf_completion(vcpu); 10267 if (kvm_check_request(KVM_REQ_MSR_FILTER_CHANGED, vcpu)) 10268 static_call(kvm_x86_msr_filter_changed)(vcpu); 10269 10270 if (kvm_check_request(KVM_REQ_UPDATE_CPU_DIRTY_LOGGING, vcpu)) 10271 static_call(kvm_x86_update_cpu_dirty_logging)(vcpu); 10272 } 10273 10274 if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win || 10275 kvm_xen_has_interrupt(vcpu)) { 10276 ++vcpu->stat.req_event; 10277 r = kvm_apic_accept_events(vcpu); 10278 if (r < 0) { 10279 r = 0; 10280 goto out; 10281 } 10282 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { 10283 r = 1; 10284 goto out; 10285 } 10286 10287 r = inject_pending_event(vcpu, &req_immediate_exit); 10288 if (r < 0) { 10289 r = 0; 10290 goto out; 10291 } 10292 if (req_int_win) 10293 static_call(kvm_x86_enable_irq_window)(vcpu); 10294 10295 if (kvm_lapic_enabled(vcpu)) { 10296 update_cr8_intercept(vcpu); 10297 kvm_lapic_sync_to_vapic(vcpu); 10298 } 10299 } 10300 10301 r = kvm_mmu_reload(vcpu); 10302 if (unlikely(r)) { 10303 goto cancel_injection; 10304 } 10305 10306 preempt_disable(); 10307 10308 static_call(kvm_x86_prepare_switch_to_guest)(vcpu); 10309 10310 /* 10311 * Disable IRQs before setting IN_GUEST_MODE. Posted interrupt 10312 * IPI are then delayed after guest entry, which ensures that they 10313 * result in virtual interrupt delivery. 10314 */ 10315 local_irq_disable(); 10316 10317 /* Store vcpu->apicv_active before vcpu->mode. */ 10318 smp_store_release(&vcpu->mode, IN_GUEST_MODE); 10319 10320 kvm_vcpu_srcu_read_unlock(vcpu); 10321 10322 /* 10323 * 1) We should set ->mode before checking ->requests. Please see 10324 * the comment in kvm_vcpu_exiting_guest_mode(). 10325 * 10326 * 2) For APICv, we should set ->mode before checking PID.ON. This 10327 * pairs with the memory barrier implicit in pi_test_and_set_on 10328 * (see vmx_deliver_posted_interrupt). 10329 * 10330 * 3) This also orders the write to mode from any reads to the page 10331 * tables done while the VCPU is running. Please see the comment 10332 * in kvm_flush_remote_tlbs. 10333 */ 10334 smp_mb__after_srcu_read_unlock(); 10335 10336 /* 10337 * Process pending posted interrupts to handle the case where the 10338 * notification IRQ arrived in the host, or was never sent (because the 10339 * target vCPU wasn't running). Do this regardless of the vCPU's APICv 10340 * status, KVM doesn't update assigned devices when APICv is inhibited, 10341 * i.e. they can post interrupts even if APICv is temporarily disabled. 10342 */ 10343 if (kvm_lapic_enabled(vcpu)) 10344 static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); 10345 10346 if (kvm_vcpu_exit_request(vcpu)) { 10347 vcpu->mode = OUTSIDE_GUEST_MODE; 10348 smp_wmb(); 10349 local_irq_enable(); 10350 preempt_enable(); 10351 kvm_vcpu_srcu_read_lock(vcpu); 10352 r = 1; 10353 goto cancel_injection; 10354 } 10355 10356 if (req_immediate_exit) { 10357 kvm_make_request(KVM_REQ_EVENT, vcpu); 10358 static_call(kvm_x86_request_immediate_exit)(vcpu); 10359 } 10360 10361 fpregs_assert_state_consistent(); 10362 if (test_thread_flag(TIF_NEED_FPU_LOAD)) 10363 switch_fpu_return(); 10364 10365 if (vcpu->arch.guest_fpu.xfd_err) 10366 wrmsrl(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err); 10367 10368 if (unlikely(vcpu->arch.switch_db_regs)) { 10369 set_debugreg(0, 7); 10370 set_debugreg(vcpu->arch.eff_db[0], 0); 10371 set_debugreg(vcpu->arch.eff_db[1], 1); 10372 set_debugreg(vcpu->arch.eff_db[2], 2); 10373 set_debugreg(vcpu->arch.eff_db[3], 3); 10374 } else if (unlikely(hw_breakpoint_active())) { 10375 set_debugreg(0, 7); 10376 } 10377 10378 guest_timing_enter_irqoff(); 10379 10380 for (;;) { 10381 /* 10382 * Assert that vCPU vs. VM APICv state is consistent. An APICv 10383 * update must kick and wait for all vCPUs before toggling the 10384 * per-VM state, and responsing vCPUs must wait for the update 10385 * to complete before servicing KVM_REQ_APICV_UPDATE. 10386 */ 10387 WARN_ON_ONCE(kvm_vcpu_apicv_activated(vcpu) != kvm_vcpu_apicv_active(vcpu)); 10388 10389 exit_fastpath = static_call(kvm_x86_vcpu_run)(vcpu); 10390 if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST)) 10391 break; 10392 10393 if (kvm_lapic_enabled(vcpu)) 10394 static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); 10395 10396 if (unlikely(kvm_vcpu_exit_request(vcpu))) { 10397 exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED; 10398 break; 10399 } 10400 } 10401 10402 /* 10403 * Do this here before restoring debug registers on the host. And 10404 * since we do this before handling the vmexit, a DR access vmexit 10405 * can (a) read the correct value of the debug registers, (b) set 10406 * KVM_DEBUGREG_WONT_EXIT again. 10407 */ 10408 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) { 10409 WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP); 10410 static_call(kvm_x86_sync_dirty_debug_regs)(vcpu); 10411 kvm_update_dr0123(vcpu); 10412 kvm_update_dr7(vcpu); 10413 } 10414 10415 /* 10416 * If the guest has used debug registers, at least dr7 10417 * will be disabled while returning to the host. 10418 * If we don't have active breakpoints in the host, we don't 10419 * care about the messed up debug address registers. But if 10420 * we have some of them active, restore the old state. 10421 */ 10422 if (hw_breakpoint_active()) 10423 hw_breakpoint_restore(); 10424 10425 vcpu->arch.last_vmentry_cpu = vcpu->cpu; 10426 vcpu->arch.last_guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc()); 10427 10428 vcpu->mode = OUTSIDE_GUEST_MODE; 10429 smp_wmb(); 10430 10431 /* 10432 * Sync xfd before calling handle_exit_irqoff() which may 10433 * rely on the fact that guest_fpu::xfd is up-to-date (e.g. 10434 * in #NM irqoff handler). 10435 */ 10436 if (vcpu->arch.xfd_no_write_intercept) 10437 fpu_sync_guest_vmexit_xfd_state(); 10438 10439 static_call(kvm_x86_handle_exit_irqoff)(vcpu); 10440 10441 if (vcpu->arch.guest_fpu.xfd_err) 10442 wrmsrl(MSR_IA32_XFD_ERR, 0); 10443 10444 /* 10445 * Consume any pending interrupts, including the possible source of 10446 * VM-Exit on SVM and any ticks that occur between VM-Exit and now. 10447 * An instruction is required after local_irq_enable() to fully unblock 10448 * interrupts on processors that implement an interrupt shadow, the 10449 * stat.exits increment will do nicely. 10450 */ 10451 kvm_before_interrupt(vcpu, KVM_HANDLING_IRQ); 10452 local_irq_enable(); 10453 ++vcpu->stat.exits; 10454 local_irq_disable(); 10455 kvm_after_interrupt(vcpu); 10456 10457 /* 10458 * Wait until after servicing IRQs to account guest time so that any 10459 * ticks that occurred while running the guest are properly accounted 10460 * to the guest. Waiting until IRQs are enabled degrades the accuracy 10461 * of accounting via context tracking, but the loss of accuracy is 10462 * acceptable for all known use cases. 10463 */ 10464 guest_timing_exit_irqoff(); 10465 10466 local_irq_enable(); 10467 preempt_enable(); 10468 10469 kvm_vcpu_srcu_read_lock(vcpu); 10470 10471 /* 10472 * Profile KVM exit RIPs: 10473 */ 10474 if (unlikely(prof_on == KVM_PROFILING)) { 10475 unsigned long rip = kvm_rip_read(vcpu); 10476 profile_hit(KVM_PROFILING, (void *)rip); 10477 } 10478 10479 if (unlikely(vcpu->arch.tsc_always_catchup)) 10480 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 10481 10482 if (vcpu->arch.apic_attention) 10483 kvm_lapic_sync_from_vapic(vcpu); 10484 10485 r = static_call(kvm_x86_handle_exit)(vcpu, exit_fastpath); 10486 return r; 10487 10488 cancel_injection: 10489 if (req_immediate_exit) 10490 kvm_make_request(KVM_REQ_EVENT, vcpu); 10491 static_call(kvm_x86_cancel_injection)(vcpu); 10492 if (unlikely(vcpu->arch.apic_attention)) 10493 kvm_lapic_sync_from_vapic(vcpu); 10494 out: 10495 return r; 10496 } 10497 10498 /* Called within kvm->srcu read side. */ 10499 static inline int vcpu_block(struct kvm_vcpu *vcpu) 10500 { 10501 bool hv_timer; 10502 10503 if (!kvm_arch_vcpu_runnable(vcpu)) { 10504 /* 10505 * Switch to the software timer before halt-polling/blocking as 10506 * the guest's timer may be a break event for the vCPU, and the 10507 * hypervisor timer runs only when the CPU is in guest mode. 10508 * Switch before halt-polling so that KVM recognizes an expired 10509 * timer before blocking. 10510 */ 10511 hv_timer = kvm_lapic_hv_timer_in_use(vcpu); 10512 if (hv_timer) 10513 kvm_lapic_switch_to_sw_timer(vcpu); 10514 10515 kvm_vcpu_srcu_read_unlock(vcpu); 10516 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) 10517 kvm_vcpu_halt(vcpu); 10518 else 10519 kvm_vcpu_block(vcpu); 10520 kvm_vcpu_srcu_read_lock(vcpu); 10521 10522 if (hv_timer) 10523 kvm_lapic_switch_to_hv_timer(vcpu); 10524 10525 if (!kvm_check_request(KVM_REQ_UNHALT, vcpu)) 10526 return 1; 10527 } 10528 10529 if (kvm_apic_accept_events(vcpu) < 0) 10530 return 0; 10531 switch(vcpu->arch.mp_state) { 10532 case KVM_MP_STATE_HALTED: 10533 case KVM_MP_STATE_AP_RESET_HOLD: 10534 vcpu->arch.pv.pv_unhalted = false; 10535 vcpu->arch.mp_state = 10536 KVM_MP_STATE_RUNNABLE; 10537 fallthrough; 10538 case KVM_MP_STATE_RUNNABLE: 10539 vcpu->arch.apf.halted = false; 10540 break; 10541 case KVM_MP_STATE_INIT_RECEIVED: 10542 break; 10543 default: 10544 return -EINTR; 10545 } 10546 return 1; 10547 } 10548 10549 static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu) 10550 { 10551 if (is_guest_mode(vcpu)) 10552 kvm_check_nested_events(vcpu); 10553 10554 return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && 10555 !vcpu->arch.apf.halted); 10556 } 10557 10558 /* Called within kvm->srcu read side. */ 10559 static int vcpu_run(struct kvm_vcpu *vcpu) 10560 { 10561 int r; 10562 10563 vcpu->arch.l1tf_flush_l1d = true; 10564 10565 for (;;) { 10566 /* 10567 * If another guest vCPU requests a PV TLB flush in the middle 10568 * of instruction emulation, the rest of the emulation could 10569 * use a stale page translation. Assume that any code after 10570 * this point can start executing an instruction. 10571 */ 10572 vcpu->arch.at_instruction_boundary = false; 10573 if (kvm_vcpu_running(vcpu)) { 10574 r = vcpu_enter_guest(vcpu); 10575 } else { 10576 r = vcpu_block(vcpu); 10577 } 10578 10579 if (r <= 0) 10580 break; 10581 10582 kvm_clear_request(KVM_REQ_UNBLOCK, vcpu); 10583 if (kvm_xen_has_pending_events(vcpu)) 10584 kvm_xen_inject_pending_events(vcpu); 10585 10586 if (kvm_cpu_has_pending_timer(vcpu)) 10587 kvm_inject_pending_timer_irqs(vcpu); 10588 10589 if (dm_request_for_irq_injection(vcpu) && 10590 kvm_vcpu_ready_for_interrupt_injection(vcpu)) { 10591 r = 0; 10592 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; 10593 ++vcpu->stat.request_irq_exits; 10594 break; 10595 } 10596 10597 if (__xfer_to_guest_mode_work_pending()) { 10598 kvm_vcpu_srcu_read_unlock(vcpu); 10599 r = xfer_to_guest_mode_handle_work(vcpu); 10600 kvm_vcpu_srcu_read_lock(vcpu); 10601 if (r) 10602 return r; 10603 } 10604 } 10605 10606 return r; 10607 } 10608 10609 static inline int complete_emulated_io(struct kvm_vcpu *vcpu) 10610 { 10611 return kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE); 10612 } 10613 10614 static int complete_emulated_pio(struct kvm_vcpu *vcpu) 10615 { 10616 BUG_ON(!vcpu->arch.pio.count); 10617 10618 return complete_emulated_io(vcpu); 10619 } 10620 10621 /* 10622 * Implements the following, as a state machine: 10623 * 10624 * read: 10625 * for each fragment 10626 * for each mmio piece in the fragment 10627 * write gpa, len 10628 * exit 10629 * copy data 10630 * execute insn 10631 * 10632 * write: 10633 * for each fragment 10634 * for each mmio piece in the fragment 10635 * write gpa, len 10636 * copy data 10637 * exit 10638 */ 10639 static int complete_emulated_mmio(struct kvm_vcpu *vcpu) 10640 { 10641 struct kvm_run *run = vcpu->run; 10642 struct kvm_mmio_fragment *frag; 10643 unsigned len; 10644 10645 BUG_ON(!vcpu->mmio_needed); 10646 10647 /* Complete previous fragment */ 10648 frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment]; 10649 len = min(8u, frag->len); 10650 if (!vcpu->mmio_is_write) 10651 memcpy(frag->data, run->mmio.data, len); 10652 10653 if (frag->len <= 8) { 10654 /* Switch to the next fragment. */ 10655 frag++; 10656 vcpu->mmio_cur_fragment++; 10657 } else { 10658 /* Go forward to the next mmio piece. */ 10659 frag->data += len; 10660 frag->gpa += len; 10661 frag->len -= len; 10662 } 10663 10664 if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) { 10665 vcpu->mmio_needed = 0; 10666 10667 /* FIXME: return into emulator if single-stepping. */ 10668 if (vcpu->mmio_is_write) 10669 return 1; 10670 vcpu->mmio_read_completed = 1; 10671 return complete_emulated_io(vcpu); 10672 } 10673 10674 run->exit_reason = KVM_EXIT_MMIO; 10675 run->mmio.phys_addr = frag->gpa; 10676 if (vcpu->mmio_is_write) 10677 memcpy(run->mmio.data, frag->data, min(8u, frag->len)); 10678 run->mmio.len = min(8u, frag->len); 10679 run->mmio.is_write = vcpu->mmio_is_write; 10680 vcpu->arch.complete_userspace_io = complete_emulated_mmio; 10681 return 0; 10682 } 10683 10684 /* Swap (qemu) user FPU context for the guest FPU context. */ 10685 static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) 10686 { 10687 /* Exclude PKRU, it's restored separately immediately after VM-Exit. */ 10688 fpu_swap_kvm_fpstate(&vcpu->arch.guest_fpu, true); 10689 trace_kvm_fpu(1); 10690 } 10691 10692 /* When vcpu_run ends, restore user space FPU context. */ 10693 static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) 10694 { 10695 fpu_swap_kvm_fpstate(&vcpu->arch.guest_fpu, false); 10696 ++vcpu->stat.fpu_reload; 10697 trace_kvm_fpu(0); 10698 } 10699 10700 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) 10701 { 10702 struct kvm_run *kvm_run = vcpu->run; 10703 int r; 10704 10705 vcpu_load(vcpu); 10706 kvm_sigset_activate(vcpu); 10707 kvm_run->flags = 0; 10708 kvm_load_guest_fpu(vcpu); 10709 10710 kvm_vcpu_srcu_read_lock(vcpu); 10711 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { 10712 if (kvm_run->immediate_exit) { 10713 r = -EINTR; 10714 goto out; 10715 } 10716 /* 10717 * It should be impossible for the hypervisor timer to be in 10718 * use before KVM has ever run the vCPU. 10719 */ 10720 WARN_ON_ONCE(kvm_lapic_hv_timer_in_use(vcpu)); 10721 10722 kvm_vcpu_srcu_read_unlock(vcpu); 10723 kvm_vcpu_block(vcpu); 10724 kvm_vcpu_srcu_read_lock(vcpu); 10725 10726 if (kvm_apic_accept_events(vcpu) < 0) { 10727 r = 0; 10728 goto out; 10729 } 10730 kvm_clear_request(KVM_REQ_UNHALT, vcpu); 10731 r = -EAGAIN; 10732 if (signal_pending(current)) { 10733 r = -EINTR; 10734 kvm_run->exit_reason = KVM_EXIT_INTR; 10735 ++vcpu->stat.signal_exits; 10736 } 10737 goto out; 10738 } 10739 10740 if ((kvm_run->kvm_valid_regs & ~KVM_SYNC_X86_VALID_FIELDS) || 10741 (kvm_run->kvm_dirty_regs & ~KVM_SYNC_X86_VALID_FIELDS)) { 10742 r = -EINVAL; 10743 goto out; 10744 } 10745 10746 if (kvm_run->kvm_dirty_regs) { 10747 r = sync_regs(vcpu); 10748 if (r != 0) 10749 goto out; 10750 } 10751 10752 /* re-sync apic's tpr */ 10753 if (!lapic_in_kernel(vcpu)) { 10754 if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) { 10755 r = -EINVAL; 10756 goto out; 10757 } 10758 } 10759 10760 if (unlikely(vcpu->arch.complete_userspace_io)) { 10761 int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io; 10762 vcpu->arch.complete_userspace_io = NULL; 10763 r = cui(vcpu); 10764 if (r <= 0) 10765 goto out; 10766 } else 10767 WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed); 10768 10769 if (kvm_run->immediate_exit) { 10770 r = -EINTR; 10771 goto out; 10772 } 10773 10774 r = static_call(kvm_x86_vcpu_pre_run)(vcpu); 10775 if (r <= 0) 10776 goto out; 10777 10778 r = vcpu_run(vcpu); 10779 10780 out: 10781 kvm_put_guest_fpu(vcpu); 10782 if (kvm_run->kvm_valid_regs) 10783 store_regs(vcpu); 10784 post_kvm_run_save(vcpu); 10785 kvm_vcpu_srcu_read_unlock(vcpu); 10786 10787 kvm_sigset_deactivate(vcpu); 10788 vcpu_put(vcpu); 10789 return r; 10790 } 10791 10792 static void __get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 10793 { 10794 if (vcpu->arch.emulate_regs_need_sync_to_vcpu) { 10795 /* 10796 * We are here if userspace calls get_regs() in the middle of 10797 * instruction emulation. Registers state needs to be copied 10798 * back from emulation context to vcpu. Userspace shouldn't do 10799 * that usually, but some bad designed PV devices (vmware 10800 * backdoor interface) need this to work 10801 */ 10802 emulator_writeback_register_cache(vcpu->arch.emulate_ctxt); 10803 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; 10804 } 10805 regs->rax = kvm_rax_read(vcpu); 10806 regs->rbx = kvm_rbx_read(vcpu); 10807 regs->rcx = kvm_rcx_read(vcpu); 10808 regs->rdx = kvm_rdx_read(vcpu); 10809 regs->rsi = kvm_rsi_read(vcpu); 10810 regs->rdi = kvm_rdi_read(vcpu); 10811 regs->rsp = kvm_rsp_read(vcpu); 10812 regs->rbp = kvm_rbp_read(vcpu); 10813 #ifdef CONFIG_X86_64 10814 regs->r8 = kvm_r8_read(vcpu); 10815 regs->r9 = kvm_r9_read(vcpu); 10816 regs->r10 = kvm_r10_read(vcpu); 10817 regs->r11 = kvm_r11_read(vcpu); 10818 regs->r12 = kvm_r12_read(vcpu); 10819 regs->r13 = kvm_r13_read(vcpu); 10820 regs->r14 = kvm_r14_read(vcpu); 10821 regs->r15 = kvm_r15_read(vcpu); 10822 #endif 10823 10824 regs->rip = kvm_rip_read(vcpu); 10825 regs->rflags = kvm_get_rflags(vcpu); 10826 } 10827 10828 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 10829 { 10830 vcpu_load(vcpu); 10831 __get_regs(vcpu, regs); 10832 vcpu_put(vcpu); 10833 return 0; 10834 } 10835 10836 static void __set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 10837 { 10838 vcpu->arch.emulate_regs_need_sync_from_vcpu = true; 10839 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; 10840 10841 kvm_rax_write(vcpu, regs->rax); 10842 kvm_rbx_write(vcpu, regs->rbx); 10843 kvm_rcx_write(vcpu, regs->rcx); 10844 kvm_rdx_write(vcpu, regs->rdx); 10845 kvm_rsi_write(vcpu, regs->rsi); 10846 kvm_rdi_write(vcpu, regs->rdi); 10847 kvm_rsp_write(vcpu, regs->rsp); 10848 kvm_rbp_write(vcpu, regs->rbp); 10849 #ifdef CONFIG_X86_64 10850 kvm_r8_write(vcpu, regs->r8); 10851 kvm_r9_write(vcpu, regs->r9); 10852 kvm_r10_write(vcpu, regs->r10); 10853 kvm_r11_write(vcpu, regs->r11); 10854 kvm_r12_write(vcpu, regs->r12); 10855 kvm_r13_write(vcpu, regs->r13); 10856 kvm_r14_write(vcpu, regs->r14); 10857 kvm_r15_write(vcpu, regs->r15); 10858 #endif 10859 10860 kvm_rip_write(vcpu, regs->rip); 10861 kvm_set_rflags(vcpu, regs->rflags | X86_EFLAGS_FIXED); 10862 10863 vcpu->arch.exception.pending = false; 10864 10865 kvm_make_request(KVM_REQ_EVENT, vcpu); 10866 } 10867 10868 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 10869 { 10870 vcpu_load(vcpu); 10871 __set_regs(vcpu, regs); 10872 vcpu_put(vcpu); 10873 return 0; 10874 } 10875 10876 static void __get_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 10877 { 10878 struct desc_ptr dt; 10879 10880 if (vcpu->arch.guest_state_protected) 10881 goto skip_protected_regs; 10882 10883 kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS); 10884 kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS); 10885 kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES); 10886 kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS); 10887 kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS); 10888 kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS); 10889 10890 kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR); 10891 kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); 10892 10893 static_call(kvm_x86_get_idt)(vcpu, &dt); 10894 sregs->idt.limit = dt.size; 10895 sregs->idt.base = dt.address; 10896 static_call(kvm_x86_get_gdt)(vcpu, &dt); 10897 sregs->gdt.limit = dt.size; 10898 sregs->gdt.base = dt.address; 10899 10900 sregs->cr2 = vcpu->arch.cr2; 10901 sregs->cr3 = kvm_read_cr3(vcpu); 10902 10903 skip_protected_regs: 10904 sregs->cr0 = kvm_read_cr0(vcpu); 10905 sregs->cr4 = kvm_read_cr4(vcpu); 10906 sregs->cr8 = kvm_get_cr8(vcpu); 10907 sregs->efer = vcpu->arch.efer; 10908 sregs->apic_base = kvm_get_apic_base(vcpu); 10909 } 10910 10911 static void __get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 10912 { 10913 __get_sregs_common(vcpu, sregs); 10914 10915 if (vcpu->arch.guest_state_protected) 10916 return; 10917 10918 if (vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft) 10919 set_bit(vcpu->arch.interrupt.nr, 10920 (unsigned long *)sregs->interrupt_bitmap); 10921 } 10922 10923 static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2) 10924 { 10925 int i; 10926 10927 __get_sregs_common(vcpu, (struct kvm_sregs *)sregs2); 10928 10929 if (vcpu->arch.guest_state_protected) 10930 return; 10931 10932 if (is_pae_paging(vcpu)) { 10933 for (i = 0 ; i < 4 ; i++) 10934 sregs2->pdptrs[i] = kvm_pdptr_read(vcpu, i); 10935 sregs2->flags |= KVM_SREGS2_FLAGS_PDPTRS_VALID; 10936 } 10937 } 10938 10939 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 10940 struct kvm_sregs *sregs) 10941 { 10942 vcpu_load(vcpu); 10943 __get_sregs(vcpu, sregs); 10944 vcpu_put(vcpu); 10945 return 0; 10946 } 10947 10948 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 10949 struct kvm_mp_state *mp_state) 10950 { 10951 int r; 10952 10953 vcpu_load(vcpu); 10954 if (kvm_mpx_supported()) 10955 kvm_load_guest_fpu(vcpu); 10956 10957 r = kvm_apic_accept_events(vcpu); 10958 if (r < 0) 10959 goto out; 10960 r = 0; 10961 10962 if ((vcpu->arch.mp_state == KVM_MP_STATE_HALTED || 10963 vcpu->arch.mp_state == KVM_MP_STATE_AP_RESET_HOLD) && 10964 vcpu->arch.pv.pv_unhalted) 10965 mp_state->mp_state = KVM_MP_STATE_RUNNABLE; 10966 else 10967 mp_state->mp_state = vcpu->arch.mp_state; 10968 10969 out: 10970 if (kvm_mpx_supported()) 10971 kvm_put_guest_fpu(vcpu); 10972 vcpu_put(vcpu); 10973 return r; 10974 } 10975 10976 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 10977 struct kvm_mp_state *mp_state) 10978 { 10979 int ret = -EINVAL; 10980 10981 vcpu_load(vcpu); 10982 10983 if (!lapic_in_kernel(vcpu) && 10984 mp_state->mp_state != KVM_MP_STATE_RUNNABLE) 10985 goto out; 10986 10987 /* 10988 * KVM_MP_STATE_INIT_RECEIVED means the processor is in 10989 * INIT state; latched init should be reported using 10990 * KVM_SET_VCPU_EVENTS, so reject it here. 10991 */ 10992 if ((kvm_vcpu_latch_init(vcpu) || vcpu->arch.smi_pending) && 10993 (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED || 10994 mp_state->mp_state == KVM_MP_STATE_INIT_RECEIVED)) 10995 goto out; 10996 10997 if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) { 10998 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; 10999 set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events); 11000 } else 11001 vcpu->arch.mp_state = mp_state->mp_state; 11002 kvm_make_request(KVM_REQ_EVENT, vcpu); 11003 11004 ret = 0; 11005 out: 11006 vcpu_put(vcpu); 11007 return ret; 11008 } 11009 11010 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, 11011 int reason, bool has_error_code, u32 error_code) 11012 { 11013 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 11014 int ret; 11015 11016 init_emulate_ctxt(vcpu); 11017 11018 ret = emulator_task_switch(ctxt, tss_selector, idt_index, reason, 11019 has_error_code, error_code); 11020 if (ret) { 11021 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 11022 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; 11023 vcpu->run->internal.ndata = 0; 11024 return 0; 11025 } 11026 11027 kvm_rip_write(vcpu, ctxt->eip); 11028 kvm_set_rflags(vcpu, ctxt->eflags); 11029 return 1; 11030 } 11031 EXPORT_SYMBOL_GPL(kvm_task_switch); 11032 11033 static bool kvm_is_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 11034 { 11035 if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) { 11036 /* 11037 * When EFER.LME and CR0.PG are set, the processor is in 11038 * 64-bit mode (though maybe in a 32-bit code segment). 11039 * CR4.PAE and EFER.LMA must be set. 11040 */ 11041 if (!(sregs->cr4 & X86_CR4_PAE) || !(sregs->efer & EFER_LMA)) 11042 return false; 11043 if (kvm_vcpu_is_illegal_gpa(vcpu, sregs->cr3)) 11044 return false; 11045 } else { 11046 /* 11047 * Not in 64-bit mode: EFER.LMA is clear and the code 11048 * segment cannot be 64-bit. 11049 */ 11050 if (sregs->efer & EFER_LMA || sregs->cs.l) 11051 return false; 11052 } 11053 11054 return kvm_is_valid_cr4(vcpu, sregs->cr4); 11055 } 11056 11057 static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs, 11058 int *mmu_reset_needed, bool update_pdptrs) 11059 { 11060 struct msr_data apic_base_msr; 11061 int idx; 11062 struct desc_ptr dt; 11063 11064 if (!kvm_is_valid_sregs(vcpu, sregs)) 11065 return -EINVAL; 11066 11067 apic_base_msr.data = sregs->apic_base; 11068 apic_base_msr.host_initiated = true; 11069 if (kvm_set_apic_base(vcpu, &apic_base_msr)) 11070 return -EINVAL; 11071 11072 if (vcpu->arch.guest_state_protected) 11073 return 0; 11074 11075 dt.size = sregs->idt.limit; 11076 dt.address = sregs->idt.base; 11077 static_call(kvm_x86_set_idt)(vcpu, &dt); 11078 dt.size = sregs->gdt.limit; 11079 dt.address = sregs->gdt.base; 11080 static_call(kvm_x86_set_gdt)(vcpu, &dt); 11081 11082 vcpu->arch.cr2 = sregs->cr2; 11083 *mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; 11084 vcpu->arch.cr3 = sregs->cr3; 11085 kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); 11086 static_call_cond(kvm_x86_post_set_cr3)(vcpu, sregs->cr3); 11087 11088 kvm_set_cr8(vcpu, sregs->cr8); 11089 11090 *mmu_reset_needed |= vcpu->arch.efer != sregs->efer; 11091 static_call(kvm_x86_set_efer)(vcpu, sregs->efer); 11092 11093 *mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0; 11094 static_call(kvm_x86_set_cr0)(vcpu, sregs->cr0); 11095 vcpu->arch.cr0 = sregs->cr0; 11096 11097 *mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; 11098 static_call(kvm_x86_set_cr4)(vcpu, sregs->cr4); 11099 11100 if (update_pdptrs) { 11101 idx = srcu_read_lock(&vcpu->kvm->srcu); 11102 if (is_pae_paging(vcpu)) { 11103 load_pdptrs(vcpu, kvm_read_cr3(vcpu)); 11104 *mmu_reset_needed = 1; 11105 } 11106 srcu_read_unlock(&vcpu->kvm->srcu, idx); 11107 } 11108 11109 kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS); 11110 kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS); 11111 kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES); 11112 kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS); 11113 kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS); 11114 kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS); 11115 11116 kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR); 11117 kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); 11118 11119 update_cr8_intercept(vcpu); 11120 11121 /* Older userspace won't unhalt the vcpu on reset. */ 11122 if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 && 11123 sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 && 11124 !is_protmode(vcpu)) 11125 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 11126 11127 return 0; 11128 } 11129 11130 static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 11131 { 11132 int pending_vec, max_bits; 11133 int mmu_reset_needed = 0; 11134 int ret = __set_sregs_common(vcpu, sregs, &mmu_reset_needed, true); 11135 11136 if (ret) 11137 return ret; 11138 11139 if (mmu_reset_needed) 11140 kvm_mmu_reset_context(vcpu); 11141 11142 max_bits = KVM_NR_INTERRUPTS; 11143 pending_vec = find_first_bit( 11144 (const unsigned long *)sregs->interrupt_bitmap, max_bits); 11145 11146 if (pending_vec < max_bits) { 11147 kvm_queue_interrupt(vcpu, pending_vec, false); 11148 pr_debug("Set back pending irq %d\n", pending_vec); 11149 kvm_make_request(KVM_REQ_EVENT, vcpu); 11150 } 11151 return 0; 11152 } 11153 11154 static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2) 11155 { 11156 int mmu_reset_needed = 0; 11157 bool valid_pdptrs = sregs2->flags & KVM_SREGS2_FLAGS_PDPTRS_VALID; 11158 bool pae = (sregs2->cr0 & X86_CR0_PG) && (sregs2->cr4 & X86_CR4_PAE) && 11159 !(sregs2->efer & EFER_LMA); 11160 int i, ret; 11161 11162 if (sregs2->flags & ~KVM_SREGS2_FLAGS_PDPTRS_VALID) 11163 return -EINVAL; 11164 11165 if (valid_pdptrs && (!pae || vcpu->arch.guest_state_protected)) 11166 return -EINVAL; 11167 11168 ret = __set_sregs_common(vcpu, (struct kvm_sregs *)sregs2, 11169 &mmu_reset_needed, !valid_pdptrs); 11170 if (ret) 11171 return ret; 11172 11173 if (valid_pdptrs) { 11174 for (i = 0; i < 4 ; i++) 11175 kvm_pdptr_write(vcpu, i, sregs2->pdptrs[i]); 11176 11177 kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR); 11178 mmu_reset_needed = 1; 11179 vcpu->arch.pdptrs_from_userspace = true; 11180 } 11181 if (mmu_reset_needed) 11182 kvm_mmu_reset_context(vcpu); 11183 return 0; 11184 } 11185 11186 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 11187 struct kvm_sregs *sregs) 11188 { 11189 int ret; 11190 11191 vcpu_load(vcpu); 11192 ret = __set_sregs(vcpu, sregs); 11193 vcpu_put(vcpu); 11194 return ret; 11195 } 11196 11197 static void kvm_arch_vcpu_guestdbg_update_apicv_inhibit(struct kvm *kvm) 11198 { 11199 bool set = false; 11200 struct kvm_vcpu *vcpu; 11201 unsigned long i; 11202 11203 if (!enable_apicv) 11204 return; 11205 11206 down_write(&kvm->arch.apicv_update_lock); 11207 11208 kvm_for_each_vcpu(i, vcpu, kvm) { 11209 if (vcpu->guest_debug & KVM_GUESTDBG_BLOCKIRQ) { 11210 set = true; 11211 break; 11212 } 11213 } 11214 __kvm_set_or_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_BLOCKIRQ, set); 11215 up_write(&kvm->arch.apicv_update_lock); 11216 } 11217 11218 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 11219 struct kvm_guest_debug *dbg) 11220 { 11221 unsigned long rflags; 11222 int i, r; 11223 11224 if (vcpu->arch.guest_state_protected) 11225 return -EINVAL; 11226 11227 vcpu_load(vcpu); 11228 11229 if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) { 11230 r = -EBUSY; 11231 if (vcpu->arch.exception.pending) 11232 goto out; 11233 if (dbg->control & KVM_GUESTDBG_INJECT_DB) 11234 kvm_queue_exception(vcpu, DB_VECTOR); 11235 else 11236 kvm_queue_exception(vcpu, BP_VECTOR); 11237 } 11238 11239 /* 11240 * Read rflags as long as potentially injected trace flags are still 11241 * filtered out. 11242 */ 11243 rflags = kvm_get_rflags(vcpu); 11244 11245 vcpu->guest_debug = dbg->control; 11246 if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)) 11247 vcpu->guest_debug = 0; 11248 11249 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { 11250 for (i = 0; i < KVM_NR_DB_REGS; ++i) 11251 vcpu->arch.eff_db[i] = dbg->arch.debugreg[i]; 11252 vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7]; 11253 } else { 11254 for (i = 0; i < KVM_NR_DB_REGS; i++) 11255 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; 11256 } 11257 kvm_update_dr7(vcpu); 11258 11259 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) 11260 vcpu->arch.singlestep_rip = kvm_get_linear_rip(vcpu); 11261 11262 /* 11263 * Trigger an rflags update that will inject or remove the trace 11264 * flags. 11265 */ 11266 kvm_set_rflags(vcpu, rflags); 11267 11268 static_call(kvm_x86_update_exception_bitmap)(vcpu); 11269 11270 kvm_arch_vcpu_guestdbg_update_apicv_inhibit(vcpu->kvm); 11271 11272 r = 0; 11273 11274 out: 11275 vcpu_put(vcpu); 11276 return r; 11277 } 11278 11279 /* 11280 * Translate a guest virtual address to a guest physical address. 11281 */ 11282 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 11283 struct kvm_translation *tr) 11284 { 11285 unsigned long vaddr = tr->linear_address; 11286 gpa_t gpa; 11287 int idx; 11288 11289 vcpu_load(vcpu); 11290 11291 idx = srcu_read_lock(&vcpu->kvm->srcu); 11292 gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL); 11293 srcu_read_unlock(&vcpu->kvm->srcu, idx); 11294 tr->physical_address = gpa; 11295 tr->valid = gpa != UNMAPPED_GVA; 11296 tr->writeable = 1; 11297 tr->usermode = 0; 11298 11299 vcpu_put(vcpu); 11300 return 0; 11301 } 11302 11303 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 11304 { 11305 struct fxregs_state *fxsave; 11306 11307 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) 11308 return 0; 11309 11310 vcpu_load(vcpu); 11311 11312 fxsave = &vcpu->arch.guest_fpu.fpstate->regs.fxsave; 11313 memcpy(fpu->fpr, fxsave->st_space, 128); 11314 fpu->fcw = fxsave->cwd; 11315 fpu->fsw = fxsave->swd; 11316 fpu->ftwx = fxsave->twd; 11317 fpu->last_opcode = fxsave->fop; 11318 fpu->last_ip = fxsave->rip; 11319 fpu->last_dp = fxsave->rdp; 11320 memcpy(fpu->xmm, fxsave->xmm_space, sizeof(fxsave->xmm_space)); 11321 11322 vcpu_put(vcpu); 11323 return 0; 11324 } 11325 11326 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 11327 { 11328 struct fxregs_state *fxsave; 11329 11330 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) 11331 return 0; 11332 11333 vcpu_load(vcpu); 11334 11335 fxsave = &vcpu->arch.guest_fpu.fpstate->regs.fxsave; 11336 11337 memcpy(fxsave->st_space, fpu->fpr, 128); 11338 fxsave->cwd = fpu->fcw; 11339 fxsave->swd = fpu->fsw; 11340 fxsave->twd = fpu->ftwx; 11341 fxsave->fop = fpu->last_opcode; 11342 fxsave->rip = fpu->last_ip; 11343 fxsave->rdp = fpu->last_dp; 11344 memcpy(fxsave->xmm_space, fpu->xmm, sizeof(fxsave->xmm_space)); 11345 11346 vcpu_put(vcpu); 11347 return 0; 11348 } 11349 11350 static void store_regs(struct kvm_vcpu *vcpu) 11351 { 11352 BUILD_BUG_ON(sizeof(struct kvm_sync_regs) > SYNC_REGS_SIZE_BYTES); 11353 11354 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_REGS) 11355 __get_regs(vcpu, &vcpu->run->s.regs.regs); 11356 11357 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_SREGS) 11358 __get_sregs(vcpu, &vcpu->run->s.regs.sregs); 11359 11360 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_EVENTS) 11361 kvm_vcpu_ioctl_x86_get_vcpu_events( 11362 vcpu, &vcpu->run->s.regs.events); 11363 } 11364 11365 static int sync_regs(struct kvm_vcpu *vcpu) 11366 { 11367 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_REGS) { 11368 __set_regs(vcpu, &vcpu->run->s.regs.regs); 11369 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_REGS; 11370 } 11371 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_SREGS) { 11372 if (__set_sregs(vcpu, &vcpu->run->s.regs.sregs)) 11373 return -EINVAL; 11374 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_SREGS; 11375 } 11376 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_EVENTS) { 11377 if (kvm_vcpu_ioctl_x86_set_vcpu_events( 11378 vcpu, &vcpu->run->s.regs.events)) 11379 return -EINVAL; 11380 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_EVENTS; 11381 } 11382 11383 return 0; 11384 } 11385 11386 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) 11387 { 11388 if (kvm_check_tsc_unstable() && kvm->created_vcpus) 11389 pr_warn_once("kvm: SMP vm created on host with unstable TSC; " 11390 "guest TSC will not be reliable\n"); 11391 11392 if (!kvm->arch.max_vcpu_ids) 11393 kvm->arch.max_vcpu_ids = KVM_MAX_VCPU_IDS; 11394 11395 if (id >= kvm->arch.max_vcpu_ids) 11396 return -EINVAL; 11397 11398 return static_call(kvm_x86_vcpu_precreate)(kvm); 11399 } 11400 11401 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) 11402 { 11403 struct page *page; 11404 int r; 11405 11406 vcpu->arch.last_vmentry_cpu = -1; 11407 vcpu->arch.regs_avail = ~0; 11408 vcpu->arch.regs_dirty = ~0; 11409 11410 if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu)) 11411 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 11412 else 11413 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; 11414 11415 r = kvm_mmu_create(vcpu); 11416 if (r < 0) 11417 return r; 11418 11419 if (irqchip_in_kernel(vcpu->kvm)) { 11420 r = kvm_create_lapic(vcpu, lapic_timer_advance_ns); 11421 if (r < 0) 11422 goto fail_mmu_destroy; 11423 11424 /* 11425 * Defer evaluating inhibits until the vCPU is first run, as 11426 * this vCPU will not get notified of any changes until this 11427 * vCPU is visible to other vCPUs (marked online and added to 11428 * the set of vCPUs). Opportunistically mark APICv active as 11429 * VMX in particularly is highly unlikely to have inhibits. 11430 * Ignore the current per-VM APICv state so that vCPU creation 11431 * is guaranteed to run with a deterministic value, the request 11432 * will ensure the vCPU gets the correct state before VM-Entry. 11433 */ 11434 if (enable_apicv) { 11435 vcpu->arch.apic->apicv_active = true; 11436 kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu); 11437 } 11438 } else 11439 static_branch_inc(&kvm_has_noapic_vcpu); 11440 11441 r = -ENOMEM; 11442 11443 page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); 11444 if (!page) 11445 goto fail_free_lapic; 11446 vcpu->arch.pio_data = page_address(page); 11447 11448 vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4, 11449 GFP_KERNEL_ACCOUNT); 11450 if (!vcpu->arch.mce_banks) 11451 goto fail_free_pio_data; 11452 vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS; 11453 11454 if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, 11455 GFP_KERNEL_ACCOUNT)) 11456 goto fail_free_mce_banks; 11457 11458 if (!alloc_emulate_ctxt(vcpu)) 11459 goto free_wbinvd_dirty_mask; 11460 11461 if (!fpu_alloc_guest_fpstate(&vcpu->arch.guest_fpu)) { 11462 pr_err("kvm: failed to allocate vcpu's fpu\n"); 11463 goto free_emulate_ctxt; 11464 } 11465 11466 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); 11467 vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu); 11468 11469 vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT; 11470 11471 kvm_async_pf_hash_reset(vcpu); 11472 kvm_pmu_init(vcpu); 11473 11474 vcpu->arch.pending_external_vector = -1; 11475 vcpu->arch.preempted_in_kernel = false; 11476 11477 #if IS_ENABLED(CONFIG_HYPERV) 11478 vcpu->arch.hv_root_tdp = INVALID_PAGE; 11479 #endif 11480 11481 r = static_call(kvm_x86_vcpu_create)(vcpu); 11482 if (r) 11483 goto free_guest_fpu; 11484 11485 vcpu->arch.arch_capabilities = kvm_get_arch_capabilities(); 11486 vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT; 11487 kvm_xen_init_vcpu(vcpu); 11488 kvm_vcpu_mtrr_init(vcpu); 11489 vcpu_load(vcpu); 11490 kvm_set_tsc_khz(vcpu, vcpu->kvm->arch.default_tsc_khz); 11491 kvm_vcpu_reset(vcpu, false); 11492 kvm_init_mmu(vcpu); 11493 vcpu_put(vcpu); 11494 return 0; 11495 11496 free_guest_fpu: 11497 fpu_free_guest_fpstate(&vcpu->arch.guest_fpu); 11498 free_emulate_ctxt: 11499 kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt); 11500 free_wbinvd_dirty_mask: 11501 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); 11502 fail_free_mce_banks: 11503 kfree(vcpu->arch.mce_banks); 11504 fail_free_pio_data: 11505 free_page((unsigned long)vcpu->arch.pio_data); 11506 fail_free_lapic: 11507 kvm_free_lapic(vcpu); 11508 fail_mmu_destroy: 11509 kvm_mmu_destroy(vcpu); 11510 return r; 11511 } 11512 11513 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 11514 { 11515 struct kvm *kvm = vcpu->kvm; 11516 11517 if (mutex_lock_killable(&vcpu->mutex)) 11518 return; 11519 vcpu_load(vcpu); 11520 kvm_synchronize_tsc(vcpu, 0); 11521 vcpu_put(vcpu); 11522 11523 /* poll control enabled by default */ 11524 vcpu->arch.msr_kvm_poll_control = 1; 11525 11526 mutex_unlock(&vcpu->mutex); 11527 11528 if (kvmclock_periodic_sync && vcpu->vcpu_idx == 0) 11529 schedule_delayed_work(&kvm->arch.kvmclock_sync_work, 11530 KVMCLOCK_SYNC_PERIOD); 11531 } 11532 11533 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 11534 { 11535 int idx; 11536 11537 kvmclock_reset(vcpu); 11538 11539 static_call(kvm_x86_vcpu_free)(vcpu); 11540 11541 kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt); 11542 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); 11543 fpu_free_guest_fpstate(&vcpu->arch.guest_fpu); 11544 11545 kvm_xen_destroy_vcpu(vcpu); 11546 kvm_hv_vcpu_uninit(vcpu); 11547 kvm_pmu_destroy(vcpu); 11548 kfree(vcpu->arch.mce_banks); 11549 kvm_free_lapic(vcpu); 11550 idx = srcu_read_lock(&vcpu->kvm->srcu); 11551 kvm_mmu_destroy(vcpu); 11552 srcu_read_unlock(&vcpu->kvm->srcu, idx); 11553 free_page((unsigned long)vcpu->arch.pio_data); 11554 kvfree(vcpu->arch.cpuid_entries); 11555 if (!lapic_in_kernel(vcpu)) 11556 static_branch_dec(&kvm_has_noapic_vcpu); 11557 } 11558 11559 void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) 11560 { 11561 struct kvm_cpuid_entry2 *cpuid_0x1; 11562 unsigned long old_cr0 = kvm_read_cr0(vcpu); 11563 unsigned long new_cr0; 11564 11565 /* 11566 * Several of the "set" flows, e.g. ->set_cr0(), read other registers 11567 * to handle side effects. RESET emulation hits those flows and relies 11568 * on emulated/virtualized registers, including those that are loaded 11569 * into hardware, to be zeroed at vCPU creation. Use CRs as a sentinel 11570 * to detect improper or missing initialization. 11571 */ 11572 WARN_ON_ONCE(!init_event && 11573 (old_cr0 || kvm_read_cr3(vcpu) || kvm_read_cr4(vcpu))); 11574 11575 kvm_lapic_reset(vcpu, init_event); 11576 11577 vcpu->arch.hflags = 0; 11578 11579 vcpu->arch.smi_pending = 0; 11580 vcpu->arch.smi_count = 0; 11581 atomic_set(&vcpu->arch.nmi_queued, 0); 11582 vcpu->arch.nmi_pending = 0; 11583 vcpu->arch.nmi_injected = false; 11584 kvm_clear_interrupt_queue(vcpu); 11585 kvm_clear_exception_queue(vcpu); 11586 11587 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); 11588 kvm_update_dr0123(vcpu); 11589 vcpu->arch.dr6 = DR6_ACTIVE_LOW; 11590 vcpu->arch.dr7 = DR7_FIXED_1; 11591 kvm_update_dr7(vcpu); 11592 11593 vcpu->arch.cr2 = 0; 11594 11595 kvm_make_request(KVM_REQ_EVENT, vcpu); 11596 vcpu->arch.apf.msr_en_val = 0; 11597 vcpu->arch.apf.msr_int_val = 0; 11598 vcpu->arch.st.msr_val = 0; 11599 11600 kvmclock_reset(vcpu); 11601 11602 kvm_clear_async_pf_completion_queue(vcpu); 11603 kvm_async_pf_hash_reset(vcpu); 11604 vcpu->arch.apf.halted = false; 11605 11606 if (vcpu->arch.guest_fpu.fpstate && kvm_mpx_supported()) { 11607 struct fpstate *fpstate = vcpu->arch.guest_fpu.fpstate; 11608 11609 /* 11610 * To avoid have the INIT path from kvm_apic_has_events() that be 11611 * called with loaded FPU and does not let userspace fix the state. 11612 */ 11613 if (init_event) 11614 kvm_put_guest_fpu(vcpu); 11615 11616 fpstate_clear_xstate_component(fpstate, XFEATURE_BNDREGS); 11617 fpstate_clear_xstate_component(fpstate, XFEATURE_BNDCSR); 11618 11619 if (init_event) 11620 kvm_load_guest_fpu(vcpu); 11621 } 11622 11623 if (!init_event) { 11624 kvm_pmu_reset(vcpu); 11625 vcpu->arch.smbase = 0x30000; 11626 11627 vcpu->arch.msr_misc_features_enables = 0; 11628 vcpu->arch.ia32_misc_enable_msr = MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL | 11629 MSR_IA32_MISC_ENABLE_BTS_UNAVAIL; 11630 11631 __kvm_set_xcr(vcpu, 0, XFEATURE_MASK_FP); 11632 __kvm_set_msr(vcpu, MSR_IA32_XSS, 0, true); 11633 } 11634 11635 /* All GPRs except RDX (handled below) are zeroed on RESET/INIT. */ 11636 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); 11637 kvm_register_mark_dirty(vcpu, VCPU_REGS_RSP); 11638 11639 /* 11640 * Fall back to KVM's default Family/Model/Stepping of 0x600 (P6/Athlon) 11641 * if no CPUID match is found. Note, it's impossible to get a match at 11642 * RESET since KVM emulates RESET before exposing the vCPU to userspace, 11643 * i.e. it's impossible for kvm_find_cpuid_entry() to find a valid entry 11644 * on RESET. But, go through the motions in case that's ever remedied. 11645 */ 11646 cpuid_0x1 = kvm_find_cpuid_entry(vcpu, 1, 0); 11647 kvm_rdx_write(vcpu, cpuid_0x1 ? cpuid_0x1->eax : 0x600); 11648 11649 static_call(kvm_x86_vcpu_reset)(vcpu, init_event); 11650 11651 kvm_set_rflags(vcpu, X86_EFLAGS_FIXED); 11652 kvm_rip_write(vcpu, 0xfff0); 11653 11654 vcpu->arch.cr3 = 0; 11655 kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); 11656 11657 /* 11658 * CR0.CD/NW are set on RESET, preserved on INIT. Note, some versions 11659 * of Intel's SDM list CD/NW as being set on INIT, but they contradict 11660 * (or qualify) that with a footnote stating that CD/NW are preserved. 11661 */ 11662 new_cr0 = X86_CR0_ET; 11663 if (init_event) 11664 new_cr0 |= (old_cr0 & (X86_CR0_NW | X86_CR0_CD)); 11665 else 11666 new_cr0 |= X86_CR0_NW | X86_CR0_CD; 11667 11668 static_call(kvm_x86_set_cr0)(vcpu, new_cr0); 11669 static_call(kvm_x86_set_cr4)(vcpu, 0); 11670 static_call(kvm_x86_set_efer)(vcpu, 0); 11671 static_call(kvm_x86_update_exception_bitmap)(vcpu); 11672 11673 /* 11674 * On the standard CR0/CR4/EFER modification paths, there are several 11675 * complex conditions determining whether the MMU has to be reset and/or 11676 * which PCIDs have to be flushed. However, CR0.WP and the paging-related 11677 * bits in CR4 and EFER are irrelevant if CR0.PG was '0'; and a reset+flush 11678 * is needed anyway if CR0.PG was '1' (which can only happen for INIT, as 11679 * CR0 will be '0' prior to RESET). So we only need to check CR0.PG here. 11680 */ 11681 if (old_cr0 & X86_CR0_PG) { 11682 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 11683 kvm_mmu_reset_context(vcpu); 11684 } 11685 11686 /* 11687 * Intel's SDM states that all TLB entries are flushed on INIT. AMD's 11688 * APM states the TLBs are untouched by INIT, but it also states that 11689 * the TLBs are flushed on "External initialization of the processor." 11690 * Flush the guest TLB regardless of vendor, there is no meaningful 11691 * benefit in relying on the guest to flush the TLB immediately after 11692 * INIT. A spurious TLB flush is benign and likely negligible from a 11693 * performance perspective. 11694 */ 11695 if (init_event) 11696 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 11697 } 11698 EXPORT_SYMBOL_GPL(kvm_vcpu_reset); 11699 11700 void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) 11701 { 11702 struct kvm_segment cs; 11703 11704 kvm_get_segment(vcpu, &cs, VCPU_SREG_CS); 11705 cs.selector = vector << 8; 11706 cs.base = vector << 12; 11707 kvm_set_segment(vcpu, &cs, VCPU_SREG_CS); 11708 kvm_rip_write(vcpu, 0); 11709 } 11710 EXPORT_SYMBOL_GPL(kvm_vcpu_deliver_sipi_vector); 11711 11712 int kvm_arch_hardware_enable(void) 11713 { 11714 struct kvm *kvm; 11715 struct kvm_vcpu *vcpu; 11716 unsigned long i; 11717 int ret; 11718 u64 local_tsc; 11719 u64 max_tsc = 0; 11720 bool stable, backwards_tsc = false; 11721 11722 kvm_user_return_msr_cpu_online(); 11723 ret = static_call(kvm_x86_hardware_enable)(); 11724 if (ret != 0) 11725 return ret; 11726 11727 local_tsc = rdtsc(); 11728 stable = !kvm_check_tsc_unstable(); 11729 list_for_each_entry(kvm, &vm_list, vm_list) { 11730 kvm_for_each_vcpu(i, vcpu, kvm) { 11731 if (!stable && vcpu->cpu == smp_processor_id()) 11732 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 11733 if (stable && vcpu->arch.last_host_tsc > local_tsc) { 11734 backwards_tsc = true; 11735 if (vcpu->arch.last_host_tsc > max_tsc) 11736 max_tsc = vcpu->arch.last_host_tsc; 11737 } 11738 } 11739 } 11740 11741 /* 11742 * Sometimes, even reliable TSCs go backwards. This happens on 11743 * platforms that reset TSC during suspend or hibernate actions, but 11744 * maintain synchronization. We must compensate. Fortunately, we can 11745 * detect that condition here, which happens early in CPU bringup, 11746 * before any KVM threads can be running. Unfortunately, we can't 11747 * bring the TSCs fully up to date with real time, as we aren't yet far 11748 * enough into CPU bringup that we know how much real time has actually 11749 * elapsed; our helper function, ktime_get_boottime_ns() will be using boot 11750 * variables that haven't been updated yet. 11751 * 11752 * So we simply find the maximum observed TSC above, then record the 11753 * adjustment to TSC in each VCPU. When the VCPU later gets loaded, 11754 * the adjustment will be applied. Note that we accumulate 11755 * adjustments, in case multiple suspend cycles happen before some VCPU 11756 * gets a chance to run again. In the event that no KVM threads get a 11757 * chance to run, we will miss the entire elapsed period, as we'll have 11758 * reset last_host_tsc, so VCPUs will not have the TSC adjusted and may 11759 * loose cycle time. This isn't too big a deal, since the loss will be 11760 * uniform across all VCPUs (not to mention the scenario is extremely 11761 * unlikely). It is possible that a second hibernate recovery happens 11762 * much faster than a first, causing the observed TSC here to be 11763 * smaller; this would require additional padding adjustment, which is 11764 * why we set last_host_tsc to the local tsc observed here. 11765 * 11766 * N.B. - this code below runs only on platforms with reliable TSC, 11767 * as that is the only way backwards_tsc is set above. Also note 11768 * that this runs for ALL vcpus, which is not a bug; all VCPUs should 11769 * have the same delta_cyc adjustment applied if backwards_tsc 11770 * is detected. Note further, this adjustment is only done once, 11771 * as we reset last_host_tsc on all VCPUs to stop this from being 11772 * called multiple times (one for each physical CPU bringup). 11773 * 11774 * Platforms with unreliable TSCs don't have to deal with this, they 11775 * will be compensated by the logic in vcpu_load, which sets the TSC to 11776 * catchup mode. This will catchup all VCPUs to real time, but cannot 11777 * guarantee that they stay in perfect synchronization. 11778 */ 11779 if (backwards_tsc) { 11780 u64 delta_cyc = max_tsc - local_tsc; 11781 list_for_each_entry(kvm, &vm_list, vm_list) { 11782 kvm->arch.backwards_tsc_observed = true; 11783 kvm_for_each_vcpu(i, vcpu, kvm) { 11784 vcpu->arch.tsc_offset_adjustment += delta_cyc; 11785 vcpu->arch.last_host_tsc = local_tsc; 11786 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); 11787 } 11788 11789 /* 11790 * We have to disable TSC offset matching.. if you were 11791 * booting a VM while issuing an S4 host suspend.... 11792 * you may have some problem. Solving this issue is 11793 * left as an exercise to the reader. 11794 */ 11795 kvm->arch.last_tsc_nsec = 0; 11796 kvm->arch.last_tsc_write = 0; 11797 } 11798 11799 } 11800 return 0; 11801 } 11802 11803 void kvm_arch_hardware_disable(void) 11804 { 11805 static_call(kvm_x86_hardware_disable)(); 11806 drop_user_return_notifiers(); 11807 } 11808 11809 static inline void kvm_ops_update(struct kvm_x86_init_ops *ops) 11810 { 11811 memcpy(&kvm_x86_ops, ops->runtime_ops, sizeof(kvm_x86_ops)); 11812 11813 #define __KVM_X86_OP(func) \ 11814 static_call_update(kvm_x86_##func, kvm_x86_ops.func); 11815 #define KVM_X86_OP(func) \ 11816 WARN_ON(!kvm_x86_ops.func); __KVM_X86_OP(func) 11817 #define KVM_X86_OP_OPTIONAL __KVM_X86_OP 11818 #define KVM_X86_OP_OPTIONAL_RET0(func) \ 11819 static_call_update(kvm_x86_##func, (void *)kvm_x86_ops.func ? : \ 11820 (void *)__static_call_return0); 11821 #include <asm/kvm-x86-ops.h> 11822 #undef __KVM_X86_OP 11823 11824 kvm_pmu_ops_update(ops->pmu_ops); 11825 } 11826 11827 int kvm_arch_hardware_setup(void *opaque) 11828 { 11829 struct kvm_x86_init_ops *ops = opaque; 11830 int r; 11831 11832 rdmsrl_safe(MSR_EFER, &host_efer); 11833 11834 if (boot_cpu_has(X86_FEATURE_XSAVES)) 11835 rdmsrl(MSR_IA32_XSS, host_xss); 11836 11837 kvm_init_pmu_capability(); 11838 11839 r = ops->hardware_setup(); 11840 if (r != 0) 11841 return r; 11842 11843 kvm_ops_update(ops); 11844 11845 kvm_register_perf_callbacks(ops->handle_intel_pt_intr); 11846 11847 if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES)) 11848 kvm_caps.supported_xss = 0; 11849 11850 #define __kvm_cpu_cap_has(UNUSED_, f) kvm_cpu_cap_has(f) 11851 cr4_reserved_bits = __cr4_reserved_bits(__kvm_cpu_cap_has, UNUSED_); 11852 #undef __kvm_cpu_cap_has 11853 11854 if (kvm_caps.has_tsc_control) { 11855 /* 11856 * Make sure the user can only configure tsc_khz values that 11857 * fit into a signed integer. 11858 * A min value is not calculated because it will always 11859 * be 1 on all machines. 11860 */ 11861 u64 max = min(0x7fffffffULL, 11862 __scale_tsc(kvm_caps.max_tsc_scaling_ratio, tsc_khz)); 11863 kvm_caps.max_guest_tsc_khz = max; 11864 } 11865 kvm_caps.default_tsc_scaling_ratio = 1ULL << kvm_caps.tsc_scaling_ratio_frac_bits; 11866 kvm_init_msr_list(); 11867 return 0; 11868 } 11869 11870 void kvm_arch_hardware_unsetup(void) 11871 { 11872 kvm_unregister_perf_callbacks(); 11873 11874 static_call(kvm_x86_hardware_unsetup)(); 11875 } 11876 11877 int kvm_arch_check_processor_compat(void *opaque) 11878 { 11879 struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); 11880 struct kvm_x86_init_ops *ops = opaque; 11881 11882 WARN_ON(!irqs_disabled()); 11883 11884 if (__cr4_reserved_bits(cpu_has, c) != 11885 __cr4_reserved_bits(cpu_has, &boot_cpu_data)) 11886 return -EIO; 11887 11888 return ops->check_processor_compatibility(); 11889 } 11890 11891 bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu) 11892 { 11893 return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id; 11894 } 11895 EXPORT_SYMBOL_GPL(kvm_vcpu_is_reset_bsp); 11896 11897 bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu) 11898 { 11899 return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0; 11900 } 11901 11902 __read_mostly DEFINE_STATIC_KEY_FALSE(kvm_has_noapic_vcpu); 11903 EXPORT_SYMBOL_GPL(kvm_has_noapic_vcpu); 11904 11905 void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) 11906 { 11907 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 11908 11909 vcpu->arch.l1tf_flush_l1d = true; 11910 if (pmu->version && unlikely(pmu->event_count)) { 11911 pmu->need_cleanup = true; 11912 kvm_make_request(KVM_REQ_PMU, vcpu); 11913 } 11914 static_call(kvm_x86_sched_in)(vcpu, cpu); 11915 } 11916 11917 void kvm_arch_free_vm(struct kvm *kvm) 11918 { 11919 kfree(to_kvm_hv(kvm)->hv_pa_pg); 11920 __kvm_arch_free_vm(kvm); 11921 } 11922 11923 11924 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 11925 { 11926 int ret; 11927 unsigned long flags; 11928 11929 if (type) 11930 return -EINVAL; 11931 11932 ret = kvm_page_track_init(kvm); 11933 if (ret) 11934 goto out; 11935 11936 ret = kvm_mmu_init_vm(kvm); 11937 if (ret) 11938 goto out_page_track; 11939 11940 INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list); 11941 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); 11942 atomic_set(&kvm->arch.noncoherent_dma_count, 0); 11943 11944 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ 11945 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); 11946 /* Reserve bit 1 of irq_sources_bitmap for irqfd-resampler */ 11947 set_bit(KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, 11948 &kvm->arch.irq_sources_bitmap); 11949 11950 raw_spin_lock_init(&kvm->arch.tsc_write_lock); 11951 mutex_init(&kvm->arch.apic_map_lock); 11952 seqcount_raw_spinlock_init(&kvm->arch.pvclock_sc, &kvm->arch.tsc_write_lock); 11953 kvm->arch.kvmclock_offset = -get_kvmclock_base_ns(); 11954 11955 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); 11956 pvclock_update_vm_gtod_copy(kvm); 11957 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); 11958 11959 kvm->arch.default_tsc_khz = max_tsc_khz ? : tsc_khz; 11960 kvm->arch.guest_can_read_msr_platform_info = true; 11961 kvm->arch.enable_pmu = enable_pmu; 11962 11963 #if IS_ENABLED(CONFIG_HYPERV) 11964 spin_lock_init(&kvm->arch.hv_root_tdp_lock); 11965 kvm->arch.hv_root_tdp = INVALID_PAGE; 11966 #endif 11967 11968 INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn); 11969 INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn); 11970 11971 kvm_apicv_init(kvm); 11972 kvm_hv_init_vm(kvm); 11973 kvm_xen_init_vm(kvm); 11974 11975 return static_call(kvm_x86_vm_init)(kvm); 11976 11977 out_page_track: 11978 kvm_page_track_cleanup(kvm); 11979 out: 11980 return ret; 11981 } 11982 11983 int kvm_arch_post_init_vm(struct kvm *kvm) 11984 { 11985 return kvm_mmu_post_init_vm(kvm); 11986 } 11987 11988 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu) 11989 { 11990 vcpu_load(vcpu); 11991 kvm_mmu_unload(vcpu); 11992 vcpu_put(vcpu); 11993 } 11994 11995 static void kvm_unload_vcpu_mmus(struct kvm *kvm) 11996 { 11997 unsigned long i; 11998 struct kvm_vcpu *vcpu; 11999 12000 kvm_for_each_vcpu(i, vcpu, kvm) { 12001 kvm_clear_async_pf_completion_queue(vcpu); 12002 kvm_unload_vcpu_mmu(vcpu); 12003 } 12004 } 12005 12006 void kvm_arch_sync_events(struct kvm *kvm) 12007 { 12008 cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work); 12009 cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work); 12010 kvm_free_pit(kvm); 12011 } 12012 12013 /** 12014 * __x86_set_memory_region: Setup KVM internal memory slot 12015 * 12016 * @kvm: the kvm pointer to the VM. 12017 * @id: the slot ID to setup. 12018 * @gpa: the GPA to install the slot (unused when @size == 0). 12019 * @size: the size of the slot. Set to zero to uninstall a slot. 12020 * 12021 * This function helps to setup a KVM internal memory slot. Specify 12022 * @size > 0 to install a new slot, while @size == 0 to uninstall a 12023 * slot. The return code can be one of the following: 12024 * 12025 * HVA: on success (uninstall will return a bogus HVA) 12026 * -errno: on error 12027 * 12028 * The caller should always use IS_ERR() to check the return value 12029 * before use. Note, the KVM internal memory slots are guaranteed to 12030 * remain valid and unchanged until the VM is destroyed, i.e., the 12031 * GPA->HVA translation will not change. However, the HVA is a user 12032 * address, i.e. its accessibility is not guaranteed, and must be 12033 * accessed via __copy_{to,from}_user(). 12034 */ 12035 void __user * __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, 12036 u32 size) 12037 { 12038 int i, r; 12039 unsigned long hva, old_npages; 12040 struct kvm_memslots *slots = kvm_memslots(kvm); 12041 struct kvm_memory_slot *slot; 12042 12043 /* Called with kvm->slots_lock held. */ 12044 if (WARN_ON(id >= KVM_MEM_SLOTS_NUM)) 12045 return ERR_PTR_USR(-EINVAL); 12046 12047 slot = id_to_memslot(slots, id); 12048 if (size) { 12049 if (slot && slot->npages) 12050 return ERR_PTR_USR(-EEXIST); 12051 12052 /* 12053 * MAP_SHARED to prevent internal slot pages from being moved 12054 * by fork()/COW. 12055 */ 12056 hva = vm_mmap(NULL, 0, size, PROT_READ | PROT_WRITE, 12057 MAP_SHARED | MAP_ANONYMOUS, 0); 12058 if (IS_ERR((void *)hva)) 12059 return (void __user *)hva; 12060 } else { 12061 if (!slot || !slot->npages) 12062 return NULL; 12063 12064 old_npages = slot->npages; 12065 hva = slot->userspace_addr; 12066 } 12067 12068 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 12069 struct kvm_userspace_memory_region m; 12070 12071 m.slot = id | (i << 16); 12072 m.flags = 0; 12073 m.guest_phys_addr = gpa; 12074 m.userspace_addr = hva; 12075 m.memory_size = size; 12076 r = __kvm_set_memory_region(kvm, &m); 12077 if (r < 0) 12078 return ERR_PTR_USR(r); 12079 } 12080 12081 if (!size) 12082 vm_munmap(hva, old_npages * PAGE_SIZE); 12083 12084 return (void __user *)hva; 12085 } 12086 EXPORT_SYMBOL_GPL(__x86_set_memory_region); 12087 12088 void kvm_arch_pre_destroy_vm(struct kvm *kvm) 12089 { 12090 kvm_mmu_pre_destroy_vm(kvm); 12091 } 12092 12093 void kvm_arch_destroy_vm(struct kvm *kvm) 12094 { 12095 if (current->mm == kvm->mm) { 12096 /* 12097 * Free memory regions allocated on behalf of userspace, 12098 * unless the memory map has changed due to process exit 12099 * or fd copying. 12100 */ 12101 mutex_lock(&kvm->slots_lock); 12102 __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 12103 0, 0); 12104 __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 12105 0, 0); 12106 __x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0); 12107 mutex_unlock(&kvm->slots_lock); 12108 } 12109 kvm_unload_vcpu_mmus(kvm); 12110 static_call_cond(kvm_x86_vm_destroy)(kvm); 12111 kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1)); 12112 kvm_pic_destroy(kvm); 12113 kvm_ioapic_destroy(kvm); 12114 kvm_destroy_vcpus(kvm); 12115 kvfree(rcu_dereference_check(kvm->arch.apic_map, 1)); 12116 kfree(srcu_dereference_check(kvm->arch.pmu_event_filter, &kvm->srcu, 1)); 12117 kvm_mmu_uninit_vm(kvm); 12118 kvm_page_track_cleanup(kvm); 12119 kvm_xen_destroy_vm(kvm); 12120 kvm_hv_destroy_vm(kvm); 12121 } 12122 12123 static void memslot_rmap_free(struct kvm_memory_slot *slot) 12124 { 12125 int i; 12126 12127 for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { 12128 kvfree(slot->arch.rmap[i]); 12129 slot->arch.rmap[i] = NULL; 12130 } 12131 } 12132 12133 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) 12134 { 12135 int i; 12136 12137 memslot_rmap_free(slot); 12138 12139 for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) { 12140 kvfree(slot->arch.lpage_info[i - 1]); 12141 slot->arch.lpage_info[i - 1] = NULL; 12142 } 12143 12144 kvm_page_track_free_memslot(slot); 12145 } 12146 12147 int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages) 12148 { 12149 const int sz = sizeof(*slot->arch.rmap[0]); 12150 int i; 12151 12152 for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { 12153 int level = i + 1; 12154 int lpages = __kvm_mmu_slot_lpages(slot, npages, level); 12155 12156 if (slot->arch.rmap[i]) 12157 continue; 12158 12159 slot->arch.rmap[i] = __vcalloc(lpages, sz, GFP_KERNEL_ACCOUNT); 12160 if (!slot->arch.rmap[i]) { 12161 memslot_rmap_free(slot); 12162 return -ENOMEM; 12163 } 12164 } 12165 12166 return 0; 12167 } 12168 12169 static int kvm_alloc_memslot_metadata(struct kvm *kvm, 12170 struct kvm_memory_slot *slot) 12171 { 12172 unsigned long npages = slot->npages; 12173 int i, r; 12174 12175 /* 12176 * Clear out the previous array pointers for the KVM_MR_MOVE case. The 12177 * old arrays will be freed by __kvm_set_memory_region() if installing 12178 * the new memslot is successful. 12179 */ 12180 memset(&slot->arch, 0, sizeof(slot->arch)); 12181 12182 if (kvm_memslots_have_rmaps(kvm)) { 12183 r = memslot_rmap_alloc(slot, npages); 12184 if (r) 12185 return r; 12186 } 12187 12188 for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) { 12189 struct kvm_lpage_info *linfo; 12190 unsigned long ugfn; 12191 int lpages; 12192 int level = i + 1; 12193 12194 lpages = __kvm_mmu_slot_lpages(slot, npages, level); 12195 12196 linfo = __vcalloc(lpages, sizeof(*linfo), GFP_KERNEL_ACCOUNT); 12197 if (!linfo) 12198 goto out_free; 12199 12200 slot->arch.lpage_info[i - 1] = linfo; 12201 12202 if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1)) 12203 linfo[0].disallow_lpage = 1; 12204 if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1)) 12205 linfo[lpages - 1].disallow_lpage = 1; 12206 ugfn = slot->userspace_addr >> PAGE_SHIFT; 12207 /* 12208 * If the gfn and userspace address are not aligned wrt each 12209 * other, disable large page support for this slot. 12210 */ 12211 if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1)) { 12212 unsigned long j; 12213 12214 for (j = 0; j < lpages; ++j) 12215 linfo[j].disallow_lpage = 1; 12216 } 12217 } 12218 12219 if (kvm_page_track_create_memslot(kvm, slot, npages)) 12220 goto out_free; 12221 12222 return 0; 12223 12224 out_free: 12225 memslot_rmap_free(slot); 12226 12227 for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) { 12228 kvfree(slot->arch.lpage_info[i - 1]); 12229 slot->arch.lpage_info[i - 1] = NULL; 12230 } 12231 return -ENOMEM; 12232 } 12233 12234 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) 12235 { 12236 struct kvm_vcpu *vcpu; 12237 unsigned long i; 12238 12239 /* 12240 * memslots->generation has been incremented. 12241 * mmio generation may have reached its maximum value. 12242 */ 12243 kvm_mmu_invalidate_mmio_sptes(kvm, gen); 12244 12245 /* Force re-initialization of steal_time cache */ 12246 kvm_for_each_vcpu(i, vcpu, kvm) 12247 kvm_vcpu_kick(vcpu); 12248 } 12249 12250 int kvm_arch_prepare_memory_region(struct kvm *kvm, 12251 const struct kvm_memory_slot *old, 12252 struct kvm_memory_slot *new, 12253 enum kvm_mr_change change) 12254 { 12255 if (change == KVM_MR_CREATE || change == KVM_MR_MOVE) { 12256 if ((new->base_gfn + new->npages - 1) > kvm_mmu_max_gfn()) 12257 return -EINVAL; 12258 12259 return kvm_alloc_memslot_metadata(kvm, new); 12260 } 12261 12262 if (change == KVM_MR_FLAGS_ONLY) 12263 memcpy(&new->arch, &old->arch, sizeof(old->arch)); 12264 else if (WARN_ON_ONCE(change != KVM_MR_DELETE)) 12265 return -EIO; 12266 12267 return 0; 12268 } 12269 12270 12271 static void kvm_mmu_update_cpu_dirty_logging(struct kvm *kvm, bool enable) 12272 { 12273 struct kvm_arch *ka = &kvm->arch; 12274 12275 if (!kvm_x86_ops.cpu_dirty_log_size) 12276 return; 12277 12278 if ((enable && ++ka->cpu_dirty_logging_count == 1) || 12279 (!enable && --ka->cpu_dirty_logging_count == 0)) 12280 kvm_make_all_cpus_request(kvm, KVM_REQ_UPDATE_CPU_DIRTY_LOGGING); 12281 12282 WARN_ON_ONCE(ka->cpu_dirty_logging_count < 0); 12283 } 12284 12285 static void kvm_mmu_slot_apply_flags(struct kvm *kvm, 12286 struct kvm_memory_slot *old, 12287 const struct kvm_memory_slot *new, 12288 enum kvm_mr_change change) 12289 { 12290 u32 old_flags = old ? old->flags : 0; 12291 u32 new_flags = new ? new->flags : 0; 12292 bool log_dirty_pages = new_flags & KVM_MEM_LOG_DIRTY_PAGES; 12293 12294 /* 12295 * Update CPU dirty logging if dirty logging is being toggled. This 12296 * applies to all operations. 12297 */ 12298 if ((old_flags ^ new_flags) & KVM_MEM_LOG_DIRTY_PAGES) 12299 kvm_mmu_update_cpu_dirty_logging(kvm, log_dirty_pages); 12300 12301 /* 12302 * Nothing more to do for RO slots (which can't be dirtied and can't be 12303 * made writable) or CREATE/MOVE/DELETE of a slot. 12304 * 12305 * For a memslot with dirty logging disabled: 12306 * CREATE: No dirty mappings will already exist. 12307 * MOVE/DELETE: The old mappings will already have been cleaned up by 12308 * kvm_arch_flush_shadow_memslot() 12309 * 12310 * For a memslot with dirty logging enabled: 12311 * CREATE: No shadow pages exist, thus nothing to write-protect 12312 * and no dirty bits to clear. 12313 * MOVE/DELETE: The old mappings will already have been cleaned up by 12314 * kvm_arch_flush_shadow_memslot(). 12315 */ 12316 if ((change != KVM_MR_FLAGS_ONLY) || (new_flags & KVM_MEM_READONLY)) 12317 return; 12318 12319 /* 12320 * READONLY and non-flags changes were filtered out above, and the only 12321 * other flag is LOG_DIRTY_PAGES, i.e. something is wrong if dirty 12322 * logging isn't being toggled on or off. 12323 */ 12324 if (WARN_ON_ONCE(!((old_flags ^ new_flags) & KVM_MEM_LOG_DIRTY_PAGES))) 12325 return; 12326 12327 if (!log_dirty_pages) { 12328 /* 12329 * Dirty logging tracks sptes in 4k granularity, meaning that 12330 * large sptes have to be split. If live migration succeeds, 12331 * the guest in the source machine will be destroyed and large 12332 * sptes will be created in the destination. However, if the 12333 * guest continues to run in the source machine (for example if 12334 * live migration fails), small sptes will remain around and 12335 * cause bad performance. 12336 * 12337 * Scan sptes if dirty logging has been stopped, dropping those 12338 * which can be collapsed into a single large-page spte. Later 12339 * page faults will create the large-page sptes. 12340 */ 12341 kvm_mmu_zap_collapsible_sptes(kvm, new); 12342 } else { 12343 /* 12344 * Initially-all-set does not require write protecting any page, 12345 * because they're all assumed to be dirty. 12346 */ 12347 if (kvm_dirty_log_manual_protect_and_init_set(kvm)) 12348 return; 12349 12350 if (READ_ONCE(eager_page_split)) 12351 kvm_mmu_slot_try_split_huge_pages(kvm, new, PG_LEVEL_4K); 12352 12353 if (kvm_x86_ops.cpu_dirty_log_size) { 12354 kvm_mmu_slot_leaf_clear_dirty(kvm, new); 12355 kvm_mmu_slot_remove_write_access(kvm, new, PG_LEVEL_2M); 12356 } else { 12357 kvm_mmu_slot_remove_write_access(kvm, new, PG_LEVEL_4K); 12358 } 12359 } 12360 } 12361 12362 void kvm_arch_commit_memory_region(struct kvm *kvm, 12363 struct kvm_memory_slot *old, 12364 const struct kvm_memory_slot *new, 12365 enum kvm_mr_change change) 12366 { 12367 if (!kvm->arch.n_requested_mmu_pages && 12368 (change == KVM_MR_CREATE || change == KVM_MR_DELETE)) { 12369 unsigned long nr_mmu_pages; 12370 12371 nr_mmu_pages = kvm->nr_memslot_pages / KVM_MEMSLOT_PAGES_TO_MMU_PAGES_RATIO; 12372 nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES); 12373 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); 12374 } 12375 12376 kvm_mmu_slot_apply_flags(kvm, old, new, change); 12377 12378 /* Free the arrays associated with the old memslot. */ 12379 if (change == KVM_MR_MOVE) 12380 kvm_arch_free_memslot(kvm, old); 12381 } 12382 12383 void kvm_arch_flush_shadow_all(struct kvm *kvm) 12384 { 12385 kvm_mmu_zap_all(kvm); 12386 } 12387 12388 void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 12389 struct kvm_memory_slot *slot) 12390 { 12391 kvm_page_track_flush_slot(kvm, slot); 12392 } 12393 12394 static inline bool kvm_guest_apic_has_interrupt(struct kvm_vcpu *vcpu) 12395 { 12396 return (is_guest_mode(vcpu) && 12397 static_call(kvm_x86_guest_apic_has_interrupt)(vcpu)); 12398 } 12399 12400 static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) 12401 { 12402 if (!list_empty_careful(&vcpu->async_pf.done)) 12403 return true; 12404 12405 if (kvm_apic_has_events(vcpu)) 12406 return true; 12407 12408 if (vcpu->arch.pv.pv_unhalted) 12409 return true; 12410 12411 if (vcpu->arch.exception.pending) 12412 return true; 12413 12414 if (kvm_test_request(KVM_REQ_NMI, vcpu) || 12415 (vcpu->arch.nmi_pending && 12416 static_call(kvm_x86_nmi_allowed)(vcpu, false))) 12417 return true; 12418 12419 if (kvm_test_request(KVM_REQ_SMI, vcpu) || 12420 (vcpu->arch.smi_pending && 12421 static_call(kvm_x86_smi_allowed)(vcpu, false))) 12422 return true; 12423 12424 if (kvm_arch_interrupt_allowed(vcpu) && 12425 (kvm_cpu_has_interrupt(vcpu) || 12426 kvm_guest_apic_has_interrupt(vcpu))) 12427 return true; 12428 12429 if (kvm_hv_has_stimer_pending(vcpu)) 12430 return true; 12431 12432 if (is_guest_mode(vcpu) && 12433 kvm_x86_ops.nested_ops->hv_timer_pending && 12434 kvm_x86_ops.nested_ops->hv_timer_pending(vcpu)) 12435 return true; 12436 12437 if (kvm_xen_has_pending_events(vcpu)) 12438 return true; 12439 12440 if (kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu)) 12441 return true; 12442 12443 return false; 12444 } 12445 12446 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) 12447 { 12448 return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu); 12449 } 12450 12451 bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu) 12452 { 12453 if (kvm_vcpu_apicv_active(vcpu) && 12454 static_call(kvm_x86_dy_apicv_has_pending_interrupt)(vcpu)) 12455 return true; 12456 12457 return false; 12458 } 12459 12460 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) 12461 { 12462 if (READ_ONCE(vcpu->arch.pv.pv_unhalted)) 12463 return true; 12464 12465 if (kvm_test_request(KVM_REQ_NMI, vcpu) || 12466 kvm_test_request(KVM_REQ_SMI, vcpu) || 12467 kvm_test_request(KVM_REQ_EVENT, vcpu)) 12468 return true; 12469 12470 return kvm_arch_dy_has_pending_interrupt(vcpu); 12471 } 12472 12473 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) 12474 { 12475 if (vcpu->arch.guest_state_protected) 12476 return true; 12477 12478 return vcpu->arch.preempted_in_kernel; 12479 } 12480 12481 unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu) 12482 { 12483 return kvm_rip_read(vcpu); 12484 } 12485 12486 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) 12487 { 12488 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; 12489 } 12490 12491 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu) 12492 { 12493 return static_call(kvm_x86_interrupt_allowed)(vcpu, false); 12494 } 12495 12496 unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu) 12497 { 12498 /* Can't read the RIP when guest state is protected, just return 0 */ 12499 if (vcpu->arch.guest_state_protected) 12500 return 0; 12501 12502 if (is_64_bit_mode(vcpu)) 12503 return kvm_rip_read(vcpu); 12504 return (u32)(get_segment_base(vcpu, VCPU_SREG_CS) + 12505 kvm_rip_read(vcpu)); 12506 } 12507 EXPORT_SYMBOL_GPL(kvm_get_linear_rip); 12508 12509 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip) 12510 { 12511 return kvm_get_linear_rip(vcpu) == linear_rip; 12512 } 12513 EXPORT_SYMBOL_GPL(kvm_is_linear_rip); 12514 12515 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu) 12516 { 12517 unsigned long rflags; 12518 12519 rflags = static_call(kvm_x86_get_rflags)(vcpu); 12520 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) 12521 rflags &= ~X86_EFLAGS_TF; 12522 return rflags; 12523 } 12524 EXPORT_SYMBOL_GPL(kvm_get_rflags); 12525 12526 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) 12527 { 12528 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP && 12529 kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) 12530 rflags |= X86_EFLAGS_TF; 12531 static_call(kvm_x86_set_rflags)(vcpu, rflags); 12532 } 12533 12534 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) 12535 { 12536 __kvm_set_rflags(vcpu, rflags); 12537 kvm_make_request(KVM_REQ_EVENT, vcpu); 12538 } 12539 EXPORT_SYMBOL_GPL(kvm_set_rflags); 12540 12541 static inline u32 kvm_async_pf_hash_fn(gfn_t gfn) 12542 { 12543 BUILD_BUG_ON(!is_power_of_2(ASYNC_PF_PER_VCPU)); 12544 12545 return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU)); 12546 } 12547 12548 static inline u32 kvm_async_pf_next_probe(u32 key) 12549 { 12550 return (key + 1) & (ASYNC_PF_PER_VCPU - 1); 12551 } 12552 12553 static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 12554 { 12555 u32 key = kvm_async_pf_hash_fn(gfn); 12556 12557 while (vcpu->arch.apf.gfns[key] != ~0) 12558 key = kvm_async_pf_next_probe(key); 12559 12560 vcpu->arch.apf.gfns[key] = gfn; 12561 } 12562 12563 static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn) 12564 { 12565 int i; 12566 u32 key = kvm_async_pf_hash_fn(gfn); 12567 12568 for (i = 0; i < ASYNC_PF_PER_VCPU && 12569 (vcpu->arch.apf.gfns[key] != gfn && 12570 vcpu->arch.apf.gfns[key] != ~0); i++) 12571 key = kvm_async_pf_next_probe(key); 12572 12573 return key; 12574 } 12575 12576 bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 12577 { 12578 return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn; 12579 } 12580 12581 static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 12582 { 12583 u32 i, j, k; 12584 12585 i = j = kvm_async_pf_gfn_slot(vcpu, gfn); 12586 12587 if (WARN_ON_ONCE(vcpu->arch.apf.gfns[i] != gfn)) 12588 return; 12589 12590 while (true) { 12591 vcpu->arch.apf.gfns[i] = ~0; 12592 do { 12593 j = kvm_async_pf_next_probe(j); 12594 if (vcpu->arch.apf.gfns[j] == ~0) 12595 return; 12596 k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]); 12597 /* 12598 * k lies cyclically in ]i,j] 12599 * | i.k.j | 12600 * |....j i.k.| or |.k..j i...| 12601 */ 12602 } while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j)); 12603 vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j]; 12604 i = j; 12605 } 12606 } 12607 12608 static inline int apf_put_user_notpresent(struct kvm_vcpu *vcpu) 12609 { 12610 u32 reason = KVM_PV_REASON_PAGE_NOT_PRESENT; 12611 12612 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &reason, 12613 sizeof(reason)); 12614 } 12615 12616 static inline int apf_put_user_ready(struct kvm_vcpu *vcpu, u32 token) 12617 { 12618 unsigned int offset = offsetof(struct kvm_vcpu_pv_apf_data, token); 12619 12620 return kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data, 12621 &token, offset, sizeof(token)); 12622 } 12623 12624 static inline bool apf_pageready_slot_free(struct kvm_vcpu *vcpu) 12625 { 12626 unsigned int offset = offsetof(struct kvm_vcpu_pv_apf_data, token); 12627 u32 val; 12628 12629 if (kvm_read_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data, 12630 &val, offset, sizeof(val))) 12631 return false; 12632 12633 return !val; 12634 } 12635 12636 static bool kvm_can_deliver_async_pf(struct kvm_vcpu *vcpu) 12637 { 12638 12639 if (!kvm_pv_async_pf_enabled(vcpu)) 12640 return false; 12641 12642 if (vcpu->arch.apf.send_user_only && 12643 static_call(kvm_x86_get_cpl)(vcpu) == 0) 12644 return false; 12645 12646 if (is_guest_mode(vcpu)) { 12647 /* 12648 * L1 needs to opt into the special #PF vmexits that are 12649 * used to deliver async page faults. 12650 */ 12651 return vcpu->arch.apf.delivery_as_pf_vmexit; 12652 } else { 12653 /* 12654 * Play it safe in case the guest temporarily disables paging. 12655 * The real mode IDT in particular is unlikely to have a #PF 12656 * exception setup. 12657 */ 12658 return is_paging(vcpu); 12659 } 12660 } 12661 12662 bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu) 12663 { 12664 if (unlikely(!lapic_in_kernel(vcpu) || 12665 kvm_event_needs_reinjection(vcpu) || 12666 vcpu->arch.exception.pending)) 12667 return false; 12668 12669 if (kvm_hlt_in_guest(vcpu->kvm) && !kvm_can_deliver_async_pf(vcpu)) 12670 return false; 12671 12672 /* 12673 * If interrupts are off we cannot even use an artificial 12674 * halt state. 12675 */ 12676 return kvm_arch_interrupt_allowed(vcpu); 12677 } 12678 12679 bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, 12680 struct kvm_async_pf *work) 12681 { 12682 struct x86_exception fault; 12683 12684 trace_kvm_async_pf_not_present(work->arch.token, work->cr2_or_gpa); 12685 kvm_add_async_pf_gfn(vcpu, work->arch.gfn); 12686 12687 if (kvm_can_deliver_async_pf(vcpu) && 12688 !apf_put_user_notpresent(vcpu)) { 12689 fault.vector = PF_VECTOR; 12690 fault.error_code_valid = true; 12691 fault.error_code = 0; 12692 fault.nested_page_fault = false; 12693 fault.address = work->arch.token; 12694 fault.async_page_fault = true; 12695 kvm_inject_page_fault(vcpu, &fault); 12696 return true; 12697 } else { 12698 /* 12699 * It is not possible to deliver a paravirtualized asynchronous 12700 * page fault, but putting the guest in an artificial halt state 12701 * can be beneficial nevertheless: if an interrupt arrives, we 12702 * can deliver it timely and perhaps the guest will schedule 12703 * another process. When the instruction that triggered a page 12704 * fault is retried, hopefully the page will be ready in the host. 12705 */ 12706 kvm_make_request(KVM_REQ_APF_HALT, vcpu); 12707 return false; 12708 } 12709 } 12710 12711 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, 12712 struct kvm_async_pf *work) 12713 { 12714 struct kvm_lapic_irq irq = { 12715 .delivery_mode = APIC_DM_FIXED, 12716 .vector = vcpu->arch.apf.vec 12717 }; 12718 12719 if (work->wakeup_all) 12720 work->arch.token = ~0; /* broadcast wakeup */ 12721 else 12722 kvm_del_async_pf_gfn(vcpu, work->arch.gfn); 12723 trace_kvm_async_pf_ready(work->arch.token, work->cr2_or_gpa); 12724 12725 if ((work->wakeup_all || work->notpresent_injected) && 12726 kvm_pv_async_pf_enabled(vcpu) && 12727 !apf_put_user_ready(vcpu, work->arch.token)) { 12728 vcpu->arch.apf.pageready_pending = true; 12729 kvm_apic_set_irq(vcpu, &irq, NULL); 12730 } 12731 12732 vcpu->arch.apf.halted = false; 12733 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 12734 } 12735 12736 void kvm_arch_async_page_present_queued(struct kvm_vcpu *vcpu) 12737 { 12738 kvm_make_request(KVM_REQ_APF_READY, vcpu); 12739 if (!vcpu->arch.apf.pageready_pending) 12740 kvm_vcpu_kick(vcpu); 12741 } 12742 12743 bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu) 12744 { 12745 if (!kvm_pv_async_pf_enabled(vcpu)) 12746 return true; 12747 else 12748 return kvm_lapic_enabled(vcpu) && apf_pageready_slot_free(vcpu); 12749 } 12750 12751 void kvm_arch_start_assignment(struct kvm *kvm) 12752 { 12753 if (atomic_inc_return(&kvm->arch.assigned_device_count) == 1) 12754 static_call_cond(kvm_x86_pi_start_assignment)(kvm); 12755 } 12756 EXPORT_SYMBOL_GPL(kvm_arch_start_assignment); 12757 12758 void kvm_arch_end_assignment(struct kvm *kvm) 12759 { 12760 atomic_dec(&kvm->arch.assigned_device_count); 12761 } 12762 EXPORT_SYMBOL_GPL(kvm_arch_end_assignment); 12763 12764 bool kvm_arch_has_assigned_device(struct kvm *kvm) 12765 { 12766 return atomic_read(&kvm->arch.assigned_device_count); 12767 } 12768 EXPORT_SYMBOL_GPL(kvm_arch_has_assigned_device); 12769 12770 void kvm_arch_register_noncoherent_dma(struct kvm *kvm) 12771 { 12772 atomic_inc(&kvm->arch.noncoherent_dma_count); 12773 } 12774 EXPORT_SYMBOL_GPL(kvm_arch_register_noncoherent_dma); 12775 12776 void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm) 12777 { 12778 atomic_dec(&kvm->arch.noncoherent_dma_count); 12779 } 12780 EXPORT_SYMBOL_GPL(kvm_arch_unregister_noncoherent_dma); 12781 12782 bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) 12783 { 12784 return atomic_read(&kvm->arch.noncoherent_dma_count); 12785 } 12786 EXPORT_SYMBOL_GPL(kvm_arch_has_noncoherent_dma); 12787 12788 bool kvm_arch_has_irq_bypass(void) 12789 { 12790 return true; 12791 } 12792 12793 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons, 12794 struct irq_bypass_producer *prod) 12795 { 12796 struct kvm_kernel_irqfd *irqfd = 12797 container_of(cons, struct kvm_kernel_irqfd, consumer); 12798 int ret; 12799 12800 irqfd->producer = prod; 12801 kvm_arch_start_assignment(irqfd->kvm); 12802 ret = static_call(kvm_x86_pi_update_irte)(irqfd->kvm, 12803 prod->irq, irqfd->gsi, 1); 12804 12805 if (ret) 12806 kvm_arch_end_assignment(irqfd->kvm); 12807 12808 return ret; 12809 } 12810 12811 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, 12812 struct irq_bypass_producer *prod) 12813 { 12814 int ret; 12815 struct kvm_kernel_irqfd *irqfd = 12816 container_of(cons, struct kvm_kernel_irqfd, consumer); 12817 12818 WARN_ON(irqfd->producer != prod); 12819 irqfd->producer = NULL; 12820 12821 /* 12822 * When producer of consumer is unregistered, we change back to 12823 * remapped mode, so we can re-use the current implementation 12824 * when the irq is masked/disabled or the consumer side (KVM 12825 * int this case doesn't want to receive the interrupts. 12826 */ 12827 ret = static_call(kvm_x86_pi_update_irte)(irqfd->kvm, prod->irq, irqfd->gsi, 0); 12828 if (ret) 12829 printk(KERN_INFO "irq bypass consumer (token %p) unregistration" 12830 " fails: %d\n", irqfd->consumer.token, ret); 12831 12832 kvm_arch_end_assignment(irqfd->kvm); 12833 } 12834 12835 int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, 12836 uint32_t guest_irq, bool set) 12837 { 12838 return static_call(kvm_x86_pi_update_irte)(kvm, host_irq, guest_irq, set); 12839 } 12840 12841 bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *old, 12842 struct kvm_kernel_irq_routing_entry *new) 12843 { 12844 if (new->type != KVM_IRQ_ROUTING_MSI) 12845 return true; 12846 12847 return !!memcmp(&old->msi, &new->msi, sizeof(new->msi)); 12848 } 12849 12850 bool kvm_vector_hashing_enabled(void) 12851 { 12852 return vector_hashing; 12853 } 12854 12855 bool kvm_arch_no_poll(struct kvm_vcpu *vcpu) 12856 { 12857 return (vcpu->arch.msr_kvm_poll_control & 1) == 0; 12858 } 12859 EXPORT_SYMBOL_GPL(kvm_arch_no_poll); 12860 12861 12862 int kvm_spec_ctrl_test_value(u64 value) 12863 { 12864 /* 12865 * test that setting IA32_SPEC_CTRL to given value 12866 * is allowed by the host processor 12867 */ 12868 12869 u64 saved_value; 12870 unsigned long flags; 12871 int ret = 0; 12872 12873 local_irq_save(flags); 12874 12875 if (rdmsrl_safe(MSR_IA32_SPEC_CTRL, &saved_value)) 12876 ret = 1; 12877 else if (wrmsrl_safe(MSR_IA32_SPEC_CTRL, value)) 12878 ret = 1; 12879 else 12880 wrmsrl(MSR_IA32_SPEC_CTRL, saved_value); 12881 12882 local_irq_restore(flags); 12883 12884 return ret; 12885 } 12886 EXPORT_SYMBOL_GPL(kvm_spec_ctrl_test_value); 12887 12888 void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code) 12889 { 12890 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 12891 struct x86_exception fault; 12892 u64 access = error_code & 12893 (PFERR_WRITE_MASK | PFERR_FETCH_MASK | PFERR_USER_MASK); 12894 12895 if (!(error_code & PFERR_PRESENT_MASK) || 12896 mmu->gva_to_gpa(vcpu, mmu, gva, access, &fault) != UNMAPPED_GVA) { 12897 /* 12898 * If vcpu->arch.walk_mmu->gva_to_gpa succeeded, the page 12899 * tables probably do not match the TLB. Just proceed 12900 * with the error code that the processor gave. 12901 */ 12902 fault.vector = PF_VECTOR; 12903 fault.error_code_valid = true; 12904 fault.error_code = error_code; 12905 fault.nested_page_fault = false; 12906 fault.address = gva; 12907 } 12908 vcpu->arch.walk_mmu->inject_page_fault(vcpu, &fault); 12909 } 12910 EXPORT_SYMBOL_GPL(kvm_fixup_and_inject_pf_error); 12911 12912 /* 12913 * Handles kvm_read/write_guest_virt*() result and either injects #PF or returns 12914 * KVM_EXIT_INTERNAL_ERROR for cases not currently handled by KVM. Return value 12915 * indicates whether exit to userspace is needed. 12916 */ 12917 int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r, 12918 struct x86_exception *e) 12919 { 12920 if (r == X86EMUL_PROPAGATE_FAULT) { 12921 kvm_inject_emulated_page_fault(vcpu, e); 12922 return 1; 12923 } 12924 12925 /* 12926 * In case kvm_read/write_guest_virt*() failed with X86EMUL_IO_NEEDED 12927 * while handling a VMX instruction KVM could've handled the request 12928 * correctly by exiting to userspace and performing I/O but there 12929 * doesn't seem to be a real use-case behind such requests, just return 12930 * KVM_EXIT_INTERNAL_ERROR for now. 12931 */ 12932 kvm_prepare_emulation_failure_exit(vcpu); 12933 12934 return 0; 12935 } 12936 EXPORT_SYMBOL_GPL(kvm_handle_memory_failure); 12937 12938 int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva) 12939 { 12940 bool pcid_enabled; 12941 struct x86_exception e; 12942 struct { 12943 u64 pcid; 12944 u64 gla; 12945 } operand; 12946 int r; 12947 12948 r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e); 12949 if (r != X86EMUL_CONTINUE) 12950 return kvm_handle_memory_failure(vcpu, r, &e); 12951 12952 if (operand.pcid >> 12 != 0) { 12953 kvm_inject_gp(vcpu, 0); 12954 return 1; 12955 } 12956 12957 pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE); 12958 12959 switch (type) { 12960 case INVPCID_TYPE_INDIV_ADDR: 12961 if ((!pcid_enabled && (operand.pcid != 0)) || 12962 is_noncanonical_address(operand.gla, vcpu)) { 12963 kvm_inject_gp(vcpu, 0); 12964 return 1; 12965 } 12966 kvm_mmu_invpcid_gva(vcpu, operand.gla, operand.pcid); 12967 return kvm_skip_emulated_instruction(vcpu); 12968 12969 case INVPCID_TYPE_SINGLE_CTXT: 12970 if (!pcid_enabled && (operand.pcid != 0)) { 12971 kvm_inject_gp(vcpu, 0); 12972 return 1; 12973 } 12974 12975 kvm_invalidate_pcid(vcpu, operand.pcid); 12976 return kvm_skip_emulated_instruction(vcpu); 12977 12978 case INVPCID_TYPE_ALL_NON_GLOBAL: 12979 /* 12980 * Currently, KVM doesn't mark global entries in the shadow 12981 * page tables, so a non-global flush just degenerates to a 12982 * global flush. If needed, we could optimize this later by 12983 * keeping track of global entries in shadow page tables. 12984 */ 12985 12986 fallthrough; 12987 case INVPCID_TYPE_ALL_INCL_GLOBAL: 12988 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 12989 return kvm_skip_emulated_instruction(vcpu); 12990 12991 default: 12992 kvm_inject_gp(vcpu, 0); 12993 return 1; 12994 } 12995 } 12996 EXPORT_SYMBOL_GPL(kvm_handle_invpcid); 12997 12998 static int complete_sev_es_emulated_mmio(struct kvm_vcpu *vcpu) 12999 { 13000 struct kvm_run *run = vcpu->run; 13001 struct kvm_mmio_fragment *frag; 13002 unsigned int len; 13003 13004 BUG_ON(!vcpu->mmio_needed); 13005 13006 /* Complete previous fragment */ 13007 frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment]; 13008 len = min(8u, frag->len); 13009 if (!vcpu->mmio_is_write) 13010 memcpy(frag->data, run->mmio.data, len); 13011 13012 if (frag->len <= 8) { 13013 /* Switch to the next fragment. */ 13014 frag++; 13015 vcpu->mmio_cur_fragment++; 13016 } else { 13017 /* Go forward to the next mmio piece. */ 13018 frag->data += len; 13019 frag->gpa += len; 13020 frag->len -= len; 13021 } 13022 13023 if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) { 13024 vcpu->mmio_needed = 0; 13025 13026 // VMG change, at this point, we're always done 13027 // RIP has already been advanced 13028 return 1; 13029 } 13030 13031 // More MMIO is needed 13032 run->mmio.phys_addr = frag->gpa; 13033 run->mmio.len = min(8u, frag->len); 13034 run->mmio.is_write = vcpu->mmio_is_write; 13035 if (run->mmio.is_write) 13036 memcpy(run->mmio.data, frag->data, min(8u, frag->len)); 13037 run->exit_reason = KVM_EXIT_MMIO; 13038 13039 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio; 13040 13041 return 0; 13042 } 13043 13044 int kvm_sev_es_mmio_write(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes, 13045 void *data) 13046 { 13047 int handled; 13048 struct kvm_mmio_fragment *frag; 13049 13050 if (!data) 13051 return -EINVAL; 13052 13053 handled = write_emultor.read_write_mmio(vcpu, gpa, bytes, data); 13054 if (handled == bytes) 13055 return 1; 13056 13057 bytes -= handled; 13058 gpa += handled; 13059 data += handled; 13060 13061 /*TODO: Check if need to increment number of frags */ 13062 frag = vcpu->mmio_fragments; 13063 vcpu->mmio_nr_fragments = 1; 13064 frag->len = bytes; 13065 frag->gpa = gpa; 13066 frag->data = data; 13067 13068 vcpu->mmio_needed = 1; 13069 vcpu->mmio_cur_fragment = 0; 13070 13071 vcpu->run->mmio.phys_addr = gpa; 13072 vcpu->run->mmio.len = min(8u, frag->len); 13073 vcpu->run->mmio.is_write = 1; 13074 memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); 13075 vcpu->run->exit_reason = KVM_EXIT_MMIO; 13076 13077 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio; 13078 13079 return 0; 13080 } 13081 EXPORT_SYMBOL_GPL(kvm_sev_es_mmio_write); 13082 13083 int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes, 13084 void *data) 13085 { 13086 int handled; 13087 struct kvm_mmio_fragment *frag; 13088 13089 if (!data) 13090 return -EINVAL; 13091 13092 handled = read_emultor.read_write_mmio(vcpu, gpa, bytes, data); 13093 if (handled == bytes) 13094 return 1; 13095 13096 bytes -= handled; 13097 gpa += handled; 13098 data += handled; 13099 13100 /*TODO: Check if need to increment number of frags */ 13101 frag = vcpu->mmio_fragments; 13102 vcpu->mmio_nr_fragments = 1; 13103 frag->len = bytes; 13104 frag->gpa = gpa; 13105 frag->data = data; 13106 13107 vcpu->mmio_needed = 1; 13108 vcpu->mmio_cur_fragment = 0; 13109 13110 vcpu->run->mmio.phys_addr = gpa; 13111 vcpu->run->mmio.len = min(8u, frag->len); 13112 vcpu->run->mmio.is_write = 0; 13113 vcpu->run->exit_reason = KVM_EXIT_MMIO; 13114 13115 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio; 13116 13117 return 0; 13118 } 13119 EXPORT_SYMBOL_GPL(kvm_sev_es_mmio_read); 13120 13121 static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size, 13122 unsigned int port); 13123 13124 static int complete_sev_es_emulated_outs(struct kvm_vcpu *vcpu) 13125 { 13126 int size = vcpu->arch.pio.size; 13127 int port = vcpu->arch.pio.port; 13128 13129 vcpu->arch.pio.count = 0; 13130 if (vcpu->arch.sev_pio_count) 13131 return kvm_sev_es_outs(vcpu, size, port); 13132 return 1; 13133 } 13134 13135 static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size, 13136 unsigned int port) 13137 { 13138 for (;;) { 13139 unsigned int count = 13140 min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count); 13141 int ret = emulator_pio_out(vcpu, size, port, vcpu->arch.sev_pio_data, count); 13142 13143 /* memcpy done already by emulator_pio_out. */ 13144 vcpu->arch.sev_pio_count -= count; 13145 vcpu->arch.sev_pio_data += count * vcpu->arch.pio.size; 13146 if (!ret) 13147 break; 13148 13149 /* Emulation done by the kernel. */ 13150 if (!vcpu->arch.sev_pio_count) 13151 return 1; 13152 } 13153 13154 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_outs; 13155 return 0; 13156 } 13157 13158 static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size, 13159 unsigned int port); 13160 13161 static void advance_sev_es_emulated_ins(struct kvm_vcpu *vcpu) 13162 { 13163 unsigned count = vcpu->arch.pio.count; 13164 complete_emulator_pio_in(vcpu, vcpu->arch.sev_pio_data); 13165 vcpu->arch.sev_pio_count -= count; 13166 vcpu->arch.sev_pio_data += count * vcpu->arch.pio.size; 13167 } 13168 13169 static int complete_sev_es_emulated_ins(struct kvm_vcpu *vcpu) 13170 { 13171 int size = vcpu->arch.pio.size; 13172 int port = vcpu->arch.pio.port; 13173 13174 advance_sev_es_emulated_ins(vcpu); 13175 if (vcpu->arch.sev_pio_count) 13176 return kvm_sev_es_ins(vcpu, size, port); 13177 return 1; 13178 } 13179 13180 static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size, 13181 unsigned int port) 13182 { 13183 for (;;) { 13184 unsigned int count = 13185 min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count); 13186 if (!__emulator_pio_in(vcpu, size, port, count)) 13187 break; 13188 13189 /* Emulation done by the kernel. */ 13190 advance_sev_es_emulated_ins(vcpu); 13191 if (!vcpu->arch.sev_pio_count) 13192 return 1; 13193 } 13194 13195 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_ins; 13196 return 0; 13197 } 13198 13199 int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size, 13200 unsigned int port, void *data, unsigned int count, 13201 int in) 13202 { 13203 vcpu->arch.sev_pio_data = data; 13204 vcpu->arch.sev_pio_count = count; 13205 return in ? kvm_sev_es_ins(vcpu, size, port) 13206 : kvm_sev_es_outs(vcpu, size, port); 13207 } 13208 EXPORT_SYMBOL_GPL(kvm_sev_es_string_io); 13209 13210 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_entry); 13211 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit); 13212 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio); 13213 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq); 13214 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault); 13215 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr); 13216 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr); 13217 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun); 13218 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit); 13219 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject); 13220 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit); 13221 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmenter_failed); 13222 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga); 13223 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit); 13224 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts); 13225 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_write_tsc_offset); 13226 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ple_window_update); 13227 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pml_full); 13228 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pi_irte_update); 13229 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_unaccelerated_access); 13230 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_incomplete_ipi); 13231 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_ga_log); 13232 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_kick_vcpu_slowpath); 13233 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_apicv_accept_irq); 13234 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_enter); 13235 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_exit); 13236 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_enter); 13237 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_exit); 13238 13239 static int __init kvm_x86_init(void) 13240 { 13241 kvm_mmu_x86_module_init(); 13242 return 0; 13243 } 13244 module_init(kvm_x86_init); 13245 13246 static void __exit kvm_x86_exit(void) 13247 { 13248 /* 13249 * If module_init() is implemented, module_exit() must also be 13250 * implemented to allow module unload. 13251 */ 13252 } 13253 module_exit(kvm_x86_exit); 13254