1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Kernel-based Virtual Machine driver for Linux 4 * 5 * derived from drivers/kvm/kvm_main.c 6 * 7 * Copyright (C) 2006 Qumranet, Inc. 8 * Copyright (C) 2008 Qumranet, Inc. 9 * Copyright IBM Corporation, 2008 10 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 11 * 12 * Authors: 13 * Avi Kivity <avi@qumranet.com> 14 * Yaniv Kamay <yaniv@qumranet.com> 15 * Amit Shah <amit.shah@qumranet.com> 16 * Ben-Ami Yassour <benami@il.ibm.com> 17 */ 18 19 #include <linux/kvm_host.h> 20 #include "irq.h" 21 #include "ioapic.h" 22 #include "mmu.h" 23 #include "i8254.h" 24 #include "tss.h" 25 #include "kvm_cache_regs.h" 26 #include "kvm_emulate.h" 27 #include "x86.h" 28 #include "cpuid.h" 29 #include "pmu.h" 30 #include "hyperv.h" 31 #include "lapic.h" 32 #include "xen.h" 33 34 #include <linux/clocksource.h> 35 #include <linux/interrupt.h> 36 #include <linux/kvm.h> 37 #include <linux/fs.h> 38 #include <linux/vmalloc.h> 39 #include <linux/export.h> 40 #include <linux/moduleparam.h> 41 #include <linux/mman.h> 42 #include <linux/highmem.h> 43 #include <linux/iommu.h> 44 #include <linux/cpufreq.h> 45 #include <linux/user-return-notifier.h> 46 #include <linux/srcu.h> 47 #include <linux/slab.h> 48 #include <linux/perf_event.h> 49 #include <linux/uaccess.h> 50 #include <linux/hash.h> 51 #include <linux/pci.h> 52 #include <linux/timekeeper_internal.h> 53 #include <linux/pvclock_gtod.h> 54 #include <linux/kvm_irqfd.h> 55 #include <linux/irqbypass.h> 56 #include <linux/sched/stat.h> 57 #include <linux/sched/isolation.h> 58 #include <linux/mem_encrypt.h> 59 #include <linux/entry-kvm.h> 60 #include <linux/suspend.h> 61 62 #include <trace/events/kvm.h> 63 64 #include <asm/debugreg.h> 65 #include <asm/msr.h> 66 #include <asm/desc.h> 67 #include <asm/mce.h> 68 #include <asm/pkru.h> 69 #include <linux/kernel_stat.h> 70 #include <asm/fpu/api.h> 71 #include <asm/fpu/xcr.h> 72 #include <asm/fpu/xstate.h> 73 #include <asm/pvclock.h> 74 #include <asm/div64.h> 75 #include <asm/irq_remapping.h> 76 #include <asm/mshyperv.h> 77 #include <asm/hypervisor.h> 78 #include <asm/tlbflush.h> 79 #include <asm/intel_pt.h> 80 #include <asm/emulate_prefix.h> 81 #include <asm/sgx.h> 82 #include <clocksource/hyperv_timer.h> 83 84 #define CREATE_TRACE_POINTS 85 #include "trace.h" 86 87 #define MAX_IO_MSRS 256 88 #define KVM_MAX_MCE_BANKS 32 89 90 struct kvm_caps kvm_caps __read_mostly = { 91 .supported_mce_cap = MCG_CTL_P | MCG_SER_P, 92 }; 93 EXPORT_SYMBOL_GPL(kvm_caps); 94 95 #define ERR_PTR_USR(e) ((void __user *)ERR_PTR(e)) 96 97 #define emul_to_vcpu(ctxt) \ 98 ((struct kvm_vcpu *)(ctxt)->vcpu) 99 100 /* EFER defaults: 101 * - enable syscall per default because its emulated by KVM 102 * - enable LME and LMA per default on 64 bit KVM 103 */ 104 #ifdef CONFIG_X86_64 105 static 106 u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA)); 107 #else 108 static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE); 109 #endif 110 111 static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS; 112 113 #define KVM_EXIT_HYPERCALL_VALID_MASK (1 << KVM_HC_MAP_GPA_RANGE) 114 115 #define KVM_CAP_PMU_VALID_MASK KVM_PMU_CAP_DISABLE 116 117 #define KVM_X2APIC_API_VALID_FLAGS (KVM_X2APIC_API_USE_32BIT_IDS | \ 118 KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK) 119 120 static void update_cr8_intercept(struct kvm_vcpu *vcpu); 121 static void process_nmi(struct kvm_vcpu *vcpu); 122 static void process_smi(struct kvm_vcpu *vcpu); 123 static void enter_smm(struct kvm_vcpu *vcpu); 124 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); 125 static void store_regs(struct kvm_vcpu *vcpu); 126 static int sync_regs(struct kvm_vcpu *vcpu); 127 static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu); 128 129 static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2); 130 static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2); 131 132 struct kvm_x86_ops kvm_x86_ops __read_mostly; 133 134 #define KVM_X86_OP(func) \ 135 DEFINE_STATIC_CALL_NULL(kvm_x86_##func, \ 136 *(((struct kvm_x86_ops *)0)->func)); 137 #define KVM_X86_OP_OPTIONAL KVM_X86_OP 138 #define KVM_X86_OP_OPTIONAL_RET0 KVM_X86_OP 139 #include <asm/kvm-x86-ops.h> 140 EXPORT_STATIC_CALL_GPL(kvm_x86_get_cs_db_l_bits); 141 EXPORT_STATIC_CALL_GPL(kvm_x86_cache_reg); 142 143 static bool __read_mostly ignore_msrs = 0; 144 module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR); 145 146 bool __read_mostly report_ignored_msrs = true; 147 module_param(report_ignored_msrs, bool, S_IRUGO | S_IWUSR); 148 EXPORT_SYMBOL_GPL(report_ignored_msrs); 149 150 unsigned int min_timer_period_us = 200; 151 module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR); 152 153 static bool __read_mostly kvmclock_periodic_sync = true; 154 module_param(kvmclock_periodic_sync, bool, S_IRUGO); 155 156 /* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */ 157 static u32 __read_mostly tsc_tolerance_ppm = 250; 158 module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR); 159 160 /* 161 * lapic timer advance (tscdeadline mode only) in nanoseconds. '-1' enables 162 * adaptive tuning starting from default advancement of 1000ns. '0' disables 163 * advancement entirely. Any other value is used as-is and disables adaptive 164 * tuning, i.e. allows privileged userspace to set an exact advancement time. 165 */ 166 static int __read_mostly lapic_timer_advance_ns = -1; 167 module_param(lapic_timer_advance_ns, int, S_IRUGO | S_IWUSR); 168 169 static bool __read_mostly vector_hashing = true; 170 module_param(vector_hashing, bool, S_IRUGO); 171 172 bool __read_mostly enable_vmware_backdoor = false; 173 module_param(enable_vmware_backdoor, bool, S_IRUGO); 174 EXPORT_SYMBOL_GPL(enable_vmware_backdoor); 175 176 /* 177 * Flags to manipulate forced emulation behavior (any non-zero value will 178 * enable forced emulation). 179 */ 180 #define KVM_FEP_CLEAR_RFLAGS_RF BIT(1) 181 static int __read_mostly force_emulation_prefix; 182 module_param(force_emulation_prefix, int, 0644); 183 184 int __read_mostly pi_inject_timer = -1; 185 module_param(pi_inject_timer, bint, S_IRUGO | S_IWUSR); 186 187 /* Enable/disable PMU virtualization */ 188 bool __read_mostly enable_pmu = true; 189 EXPORT_SYMBOL_GPL(enable_pmu); 190 module_param(enable_pmu, bool, 0444); 191 192 bool __read_mostly eager_page_split = true; 193 module_param(eager_page_split, bool, 0644); 194 195 /* 196 * Restoring the host value for MSRs that are only consumed when running in 197 * usermode, e.g. SYSCALL MSRs and TSC_AUX, can be deferred until the CPU 198 * returns to userspace, i.e. the kernel can run with the guest's value. 199 */ 200 #define KVM_MAX_NR_USER_RETURN_MSRS 16 201 202 struct kvm_user_return_msrs { 203 struct user_return_notifier urn; 204 bool registered; 205 struct kvm_user_return_msr_values { 206 u64 host; 207 u64 curr; 208 } values[KVM_MAX_NR_USER_RETURN_MSRS]; 209 }; 210 211 u32 __read_mostly kvm_nr_uret_msrs; 212 EXPORT_SYMBOL_GPL(kvm_nr_uret_msrs); 213 static u32 __read_mostly kvm_uret_msrs_list[KVM_MAX_NR_USER_RETURN_MSRS]; 214 static struct kvm_user_return_msrs __percpu *user_return_msrs; 215 216 #define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \ 217 | XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \ 218 | XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512 \ 219 | XFEATURE_MASK_PKRU | XFEATURE_MASK_XTILE) 220 221 u64 __read_mostly host_efer; 222 EXPORT_SYMBOL_GPL(host_efer); 223 224 bool __read_mostly allow_smaller_maxphyaddr = 0; 225 EXPORT_SYMBOL_GPL(allow_smaller_maxphyaddr); 226 227 bool __read_mostly enable_apicv = true; 228 EXPORT_SYMBOL_GPL(enable_apicv); 229 230 u64 __read_mostly host_xss; 231 EXPORT_SYMBOL_GPL(host_xss); 232 233 const struct _kvm_stats_desc kvm_vm_stats_desc[] = { 234 KVM_GENERIC_VM_STATS(), 235 STATS_DESC_COUNTER(VM, mmu_shadow_zapped), 236 STATS_DESC_COUNTER(VM, mmu_pte_write), 237 STATS_DESC_COUNTER(VM, mmu_pde_zapped), 238 STATS_DESC_COUNTER(VM, mmu_flooded), 239 STATS_DESC_COUNTER(VM, mmu_recycled), 240 STATS_DESC_COUNTER(VM, mmu_cache_miss), 241 STATS_DESC_ICOUNTER(VM, mmu_unsync), 242 STATS_DESC_ICOUNTER(VM, pages_4k), 243 STATS_DESC_ICOUNTER(VM, pages_2m), 244 STATS_DESC_ICOUNTER(VM, pages_1g), 245 STATS_DESC_ICOUNTER(VM, nx_lpage_splits), 246 STATS_DESC_PCOUNTER(VM, max_mmu_rmap_size), 247 STATS_DESC_PCOUNTER(VM, max_mmu_page_hash_collisions) 248 }; 249 250 const struct kvm_stats_header kvm_vm_stats_header = { 251 .name_size = KVM_STATS_NAME_SIZE, 252 .num_desc = ARRAY_SIZE(kvm_vm_stats_desc), 253 .id_offset = sizeof(struct kvm_stats_header), 254 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, 255 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + 256 sizeof(kvm_vm_stats_desc), 257 }; 258 259 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { 260 KVM_GENERIC_VCPU_STATS(), 261 STATS_DESC_COUNTER(VCPU, pf_taken), 262 STATS_DESC_COUNTER(VCPU, pf_fixed), 263 STATS_DESC_COUNTER(VCPU, pf_emulate), 264 STATS_DESC_COUNTER(VCPU, pf_spurious), 265 STATS_DESC_COUNTER(VCPU, pf_fast), 266 STATS_DESC_COUNTER(VCPU, pf_mmio_spte_created), 267 STATS_DESC_COUNTER(VCPU, pf_guest), 268 STATS_DESC_COUNTER(VCPU, tlb_flush), 269 STATS_DESC_COUNTER(VCPU, invlpg), 270 STATS_DESC_COUNTER(VCPU, exits), 271 STATS_DESC_COUNTER(VCPU, io_exits), 272 STATS_DESC_COUNTER(VCPU, mmio_exits), 273 STATS_DESC_COUNTER(VCPU, signal_exits), 274 STATS_DESC_COUNTER(VCPU, irq_window_exits), 275 STATS_DESC_COUNTER(VCPU, nmi_window_exits), 276 STATS_DESC_COUNTER(VCPU, l1d_flush), 277 STATS_DESC_COUNTER(VCPU, halt_exits), 278 STATS_DESC_COUNTER(VCPU, request_irq_exits), 279 STATS_DESC_COUNTER(VCPU, irq_exits), 280 STATS_DESC_COUNTER(VCPU, host_state_reload), 281 STATS_DESC_COUNTER(VCPU, fpu_reload), 282 STATS_DESC_COUNTER(VCPU, insn_emulation), 283 STATS_DESC_COUNTER(VCPU, insn_emulation_fail), 284 STATS_DESC_COUNTER(VCPU, hypercalls), 285 STATS_DESC_COUNTER(VCPU, irq_injections), 286 STATS_DESC_COUNTER(VCPU, nmi_injections), 287 STATS_DESC_COUNTER(VCPU, req_event), 288 STATS_DESC_COUNTER(VCPU, nested_run), 289 STATS_DESC_COUNTER(VCPU, directed_yield_attempted), 290 STATS_DESC_COUNTER(VCPU, directed_yield_successful), 291 STATS_DESC_COUNTER(VCPU, preemption_reported), 292 STATS_DESC_COUNTER(VCPU, preemption_other), 293 STATS_DESC_IBOOLEAN(VCPU, guest_mode), 294 STATS_DESC_COUNTER(VCPU, notify_window_exits), 295 }; 296 297 const struct kvm_stats_header kvm_vcpu_stats_header = { 298 .name_size = KVM_STATS_NAME_SIZE, 299 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc), 300 .id_offset = sizeof(struct kvm_stats_header), 301 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, 302 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + 303 sizeof(kvm_vcpu_stats_desc), 304 }; 305 306 u64 __read_mostly host_xcr0; 307 308 static struct kmem_cache *x86_emulator_cache; 309 310 /* 311 * When called, it means the previous get/set msr reached an invalid msr. 312 * Return true if we want to ignore/silent this failed msr access. 313 */ 314 static bool kvm_msr_ignored_check(u32 msr, u64 data, bool write) 315 { 316 const char *op = write ? "wrmsr" : "rdmsr"; 317 318 if (ignore_msrs) { 319 if (report_ignored_msrs) 320 kvm_pr_unimpl("ignored %s: 0x%x data 0x%llx\n", 321 op, msr, data); 322 /* Mask the error */ 323 return true; 324 } else { 325 kvm_debug_ratelimited("unhandled %s: 0x%x data 0x%llx\n", 326 op, msr, data); 327 return false; 328 } 329 } 330 331 static struct kmem_cache *kvm_alloc_emulator_cache(void) 332 { 333 unsigned int useroffset = offsetof(struct x86_emulate_ctxt, src); 334 unsigned int size = sizeof(struct x86_emulate_ctxt); 335 336 return kmem_cache_create_usercopy("x86_emulator", size, 337 __alignof__(struct x86_emulate_ctxt), 338 SLAB_ACCOUNT, useroffset, 339 size - useroffset, NULL); 340 } 341 342 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt); 343 344 static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu) 345 { 346 int i; 347 for (i = 0; i < ASYNC_PF_PER_VCPU; i++) 348 vcpu->arch.apf.gfns[i] = ~0; 349 } 350 351 static void kvm_on_user_return(struct user_return_notifier *urn) 352 { 353 unsigned slot; 354 struct kvm_user_return_msrs *msrs 355 = container_of(urn, struct kvm_user_return_msrs, urn); 356 struct kvm_user_return_msr_values *values; 357 unsigned long flags; 358 359 /* 360 * Disabling irqs at this point since the following code could be 361 * interrupted and executed through kvm_arch_hardware_disable() 362 */ 363 local_irq_save(flags); 364 if (msrs->registered) { 365 msrs->registered = false; 366 user_return_notifier_unregister(urn); 367 } 368 local_irq_restore(flags); 369 for (slot = 0; slot < kvm_nr_uret_msrs; ++slot) { 370 values = &msrs->values[slot]; 371 if (values->host != values->curr) { 372 wrmsrl(kvm_uret_msrs_list[slot], values->host); 373 values->curr = values->host; 374 } 375 } 376 } 377 378 static int kvm_probe_user_return_msr(u32 msr) 379 { 380 u64 val; 381 int ret; 382 383 preempt_disable(); 384 ret = rdmsrl_safe(msr, &val); 385 if (ret) 386 goto out; 387 ret = wrmsrl_safe(msr, val); 388 out: 389 preempt_enable(); 390 return ret; 391 } 392 393 int kvm_add_user_return_msr(u32 msr) 394 { 395 BUG_ON(kvm_nr_uret_msrs >= KVM_MAX_NR_USER_RETURN_MSRS); 396 397 if (kvm_probe_user_return_msr(msr)) 398 return -1; 399 400 kvm_uret_msrs_list[kvm_nr_uret_msrs] = msr; 401 return kvm_nr_uret_msrs++; 402 } 403 EXPORT_SYMBOL_GPL(kvm_add_user_return_msr); 404 405 int kvm_find_user_return_msr(u32 msr) 406 { 407 int i; 408 409 for (i = 0; i < kvm_nr_uret_msrs; ++i) { 410 if (kvm_uret_msrs_list[i] == msr) 411 return i; 412 } 413 return -1; 414 } 415 EXPORT_SYMBOL_GPL(kvm_find_user_return_msr); 416 417 static void kvm_user_return_msr_cpu_online(void) 418 { 419 unsigned int cpu = smp_processor_id(); 420 struct kvm_user_return_msrs *msrs = per_cpu_ptr(user_return_msrs, cpu); 421 u64 value; 422 int i; 423 424 for (i = 0; i < kvm_nr_uret_msrs; ++i) { 425 rdmsrl_safe(kvm_uret_msrs_list[i], &value); 426 msrs->values[i].host = value; 427 msrs->values[i].curr = value; 428 } 429 } 430 431 int kvm_set_user_return_msr(unsigned slot, u64 value, u64 mask) 432 { 433 unsigned int cpu = smp_processor_id(); 434 struct kvm_user_return_msrs *msrs = per_cpu_ptr(user_return_msrs, cpu); 435 int err; 436 437 value = (value & mask) | (msrs->values[slot].host & ~mask); 438 if (value == msrs->values[slot].curr) 439 return 0; 440 err = wrmsrl_safe(kvm_uret_msrs_list[slot], value); 441 if (err) 442 return 1; 443 444 msrs->values[slot].curr = value; 445 if (!msrs->registered) { 446 msrs->urn.on_user_return = kvm_on_user_return; 447 user_return_notifier_register(&msrs->urn); 448 msrs->registered = true; 449 } 450 return 0; 451 } 452 EXPORT_SYMBOL_GPL(kvm_set_user_return_msr); 453 454 static void drop_user_return_notifiers(void) 455 { 456 unsigned int cpu = smp_processor_id(); 457 struct kvm_user_return_msrs *msrs = per_cpu_ptr(user_return_msrs, cpu); 458 459 if (msrs->registered) 460 kvm_on_user_return(&msrs->urn); 461 } 462 463 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu) 464 { 465 return vcpu->arch.apic_base; 466 } 467 EXPORT_SYMBOL_GPL(kvm_get_apic_base); 468 469 enum lapic_mode kvm_get_apic_mode(struct kvm_vcpu *vcpu) 470 { 471 return kvm_apic_mode(kvm_get_apic_base(vcpu)); 472 } 473 EXPORT_SYMBOL_GPL(kvm_get_apic_mode); 474 475 int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 476 { 477 enum lapic_mode old_mode = kvm_get_apic_mode(vcpu); 478 enum lapic_mode new_mode = kvm_apic_mode(msr_info->data); 479 u64 reserved_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu) | 0x2ff | 480 (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) ? 0 : X2APIC_ENABLE); 481 482 if ((msr_info->data & reserved_bits) != 0 || new_mode == LAPIC_MODE_INVALID) 483 return 1; 484 if (!msr_info->host_initiated) { 485 if (old_mode == LAPIC_MODE_X2APIC && new_mode == LAPIC_MODE_XAPIC) 486 return 1; 487 if (old_mode == LAPIC_MODE_DISABLED && new_mode == LAPIC_MODE_X2APIC) 488 return 1; 489 } 490 491 kvm_lapic_set_base(vcpu, msr_info->data); 492 kvm_recalculate_apic_map(vcpu->kvm); 493 return 0; 494 } 495 EXPORT_SYMBOL_GPL(kvm_set_apic_base); 496 497 /* 498 * Handle a fault on a hardware virtualization (VMX or SVM) instruction. 499 * 500 * Hardware virtualization extension instructions may fault if a reboot turns 501 * off virtualization while processes are running. Usually after catching the 502 * fault we just panic; during reboot instead the instruction is ignored. 503 */ 504 noinstr void kvm_spurious_fault(void) 505 { 506 /* Fault while not rebooting. We want the trace. */ 507 BUG_ON(!kvm_rebooting); 508 } 509 EXPORT_SYMBOL_GPL(kvm_spurious_fault); 510 511 #define EXCPT_BENIGN 0 512 #define EXCPT_CONTRIBUTORY 1 513 #define EXCPT_PF 2 514 515 static int exception_class(int vector) 516 { 517 switch (vector) { 518 case PF_VECTOR: 519 return EXCPT_PF; 520 case DE_VECTOR: 521 case TS_VECTOR: 522 case NP_VECTOR: 523 case SS_VECTOR: 524 case GP_VECTOR: 525 return EXCPT_CONTRIBUTORY; 526 default: 527 break; 528 } 529 return EXCPT_BENIGN; 530 } 531 532 #define EXCPT_FAULT 0 533 #define EXCPT_TRAP 1 534 #define EXCPT_ABORT 2 535 #define EXCPT_INTERRUPT 3 536 #define EXCPT_DB 4 537 538 static int exception_type(int vector) 539 { 540 unsigned int mask; 541 542 if (WARN_ON(vector > 31 || vector == NMI_VECTOR)) 543 return EXCPT_INTERRUPT; 544 545 mask = 1 << vector; 546 547 /* 548 * #DBs can be trap-like or fault-like, the caller must check other CPU 549 * state, e.g. DR6, to determine whether a #DB is a trap or fault. 550 */ 551 if (mask & (1 << DB_VECTOR)) 552 return EXCPT_DB; 553 554 if (mask & ((1 << BP_VECTOR) | (1 << OF_VECTOR))) 555 return EXCPT_TRAP; 556 557 if (mask & ((1 << DF_VECTOR) | (1 << MC_VECTOR))) 558 return EXCPT_ABORT; 559 560 /* Reserved exceptions will result in fault */ 561 return EXCPT_FAULT; 562 } 563 564 void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu, 565 struct kvm_queued_exception *ex) 566 { 567 if (!ex->has_payload) 568 return; 569 570 switch (ex->vector) { 571 case DB_VECTOR: 572 /* 573 * "Certain debug exceptions may clear bit 0-3. The 574 * remaining contents of the DR6 register are never 575 * cleared by the processor". 576 */ 577 vcpu->arch.dr6 &= ~DR_TRAP_BITS; 578 /* 579 * In order to reflect the #DB exception payload in guest 580 * dr6, three components need to be considered: active low 581 * bit, FIXED_1 bits and active high bits (e.g. DR6_BD, 582 * DR6_BS and DR6_BT) 583 * DR6_ACTIVE_LOW contains the FIXED_1 and active low bits. 584 * In the target guest dr6: 585 * FIXED_1 bits should always be set. 586 * Active low bits should be cleared if 1-setting in payload. 587 * Active high bits should be set if 1-setting in payload. 588 * 589 * Note, the payload is compatible with the pending debug 590 * exceptions/exit qualification under VMX, that active_low bits 591 * are active high in payload. 592 * So they need to be flipped for DR6. 593 */ 594 vcpu->arch.dr6 |= DR6_ACTIVE_LOW; 595 vcpu->arch.dr6 |= ex->payload; 596 vcpu->arch.dr6 ^= ex->payload & DR6_ACTIVE_LOW; 597 598 /* 599 * The #DB payload is defined as compatible with the 'pending 600 * debug exceptions' field under VMX, not DR6. While bit 12 is 601 * defined in the 'pending debug exceptions' field (enabled 602 * breakpoint), it is reserved and must be zero in DR6. 603 */ 604 vcpu->arch.dr6 &= ~BIT(12); 605 break; 606 case PF_VECTOR: 607 vcpu->arch.cr2 = ex->payload; 608 break; 609 } 610 611 ex->has_payload = false; 612 ex->payload = 0; 613 } 614 EXPORT_SYMBOL_GPL(kvm_deliver_exception_payload); 615 616 static void kvm_queue_exception_vmexit(struct kvm_vcpu *vcpu, unsigned int vector, 617 bool has_error_code, u32 error_code, 618 bool has_payload, unsigned long payload) 619 { 620 struct kvm_queued_exception *ex = &vcpu->arch.exception_vmexit; 621 622 ex->vector = vector; 623 ex->injected = false; 624 ex->pending = true; 625 ex->has_error_code = has_error_code; 626 ex->error_code = error_code; 627 ex->has_payload = has_payload; 628 ex->payload = payload; 629 } 630 631 static void kvm_multiple_exception(struct kvm_vcpu *vcpu, 632 unsigned nr, bool has_error, u32 error_code, 633 bool has_payload, unsigned long payload, bool reinject) 634 { 635 u32 prev_nr; 636 int class1, class2; 637 638 kvm_make_request(KVM_REQ_EVENT, vcpu); 639 640 /* 641 * If the exception is destined for L2 and isn't being reinjected, 642 * morph it to a VM-Exit if L1 wants to intercept the exception. A 643 * previously injected exception is not checked because it was checked 644 * when it was original queued, and re-checking is incorrect if _L1_ 645 * injected the exception, in which case it's exempt from interception. 646 */ 647 if (!reinject && is_guest_mode(vcpu) && 648 kvm_x86_ops.nested_ops->is_exception_vmexit(vcpu, nr, error_code)) { 649 kvm_queue_exception_vmexit(vcpu, nr, has_error, error_code, 650 has_payload, payload); 651 return; 652 } 653 654 if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) { 655 queue: 656 if (reinject) { 657 /* 658 * On VM-Entry, an exception can be pending if and only 659 * if event injection was blocked by nested_run_pending. 660 * In that case, however, vcpu_enter_guest() requests an 661 * immediate exit, and the guest shouldn't proceed far 662 * enough to need reinjection. 663 */ 664 WARN_ON_ONCE(kvm_is_exception_pending(vcpu)); 665 vcpu->arch.exception.injected = true; 666 if (WARN_ON_ONCE(has_payload)) { 667 /* 668 * A reinjected event has already 669 * delivered its payload. 670 */ 671 has_payload = false; 672 payload = 0; 673 } 674 } else { 675 vcpu->arch.exception.pending = true; 676 vcpu->arch.exception.injected = false; 677 } 678 vcpu->arch.exception.has_error_code = has_error; 679 vcpu->arch.exception.vector = nr; 680 vcpu->arch.exception.error_code = error_code; 681 vcpu->arch.exception.has_payload = has_payload; 682 vcpu->arch.exception.payload = payload; 683 if (!is_guest_mode(vcpu)) 684 kvm_deliver_exception_payload(vcpu, 685 &vcpu->arch.exception); 686 return; 687 } 688 689 /* to check exception */ 690 prev_nr = vcpu->arch.exception.vector; 691 if (prev_nr == DF_VECTOR) { 692 /* triple fault -> shutdown */ 693 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 694 return; 695 } 696 class1 = exception_class(prev_nr); 697 class2 = exception_class(nr); 698 if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY) || 699 (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) { 700 /* 701 * Synthesize #DF. Clear the previously injected or pending 702 * exception so as not to incorrectly trigger shutdown. 703 */ 704 vcpu->arch.exception.injected = false; 705 vcpu->arch.exception.pending = false; 706 707 kvm_queue_exception_e(vcpu, DF_VECTOR, 0); 708 } else { 709 /* replace previous exception with a new one in a hope 710 that instruction re-execution will regenerate lost 711 exception */ 712 goto queue; 713 } 714 } 715 716 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr) 717 { 718 kvm_multiple_exception(vcpu, nr, false, 0, false, 0, false); 719 } 720 EXPORT_SYMBOL_GPL(kvm_queue_exception); 721 722 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr) 723 { 724 kvm_multiple_exception(vcpu, nr, false, 0, false, 0, true); 725 } 726 EXPORT_SYMBOL_GPL(kvm_requeue_exception); 727 728 void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr, 729 unsigned long payload) 730 { 731 kvm_multiple_exception(vcpu, nr, false, 0, true, payload, false); 732 } 733 EXPORT_SYMBOL_GPL(kvm_queue_exception_p); 734 735 static void kvm_queue_exception_e_p(struct kvm_vcpu *vcpu, unsigned nr, 736 u32 error_code, unsigned long payload) 737 { 738 kvm_multiple_exception(vcpu, nr, true, error_code, 739 true, payload, false); 740 } 741 742 int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err) 743 { 744 if (err) 745 kvm_inject_gp(vcpu, 0); 746 else 747 return kvm_skip_emulated_instruction(vcpu); 748 749 return 1; 750 } 751 EXPORT_SYMBOL_GPL(kvm_complete_insn_gp); 752 753 static int complete_emulated_insn_gp(struct kvm_vcpu *vcpu, int err) 754 { 755 if (err) { 756 kvm_inject_gp(vcpu, 0); 757 return 1; 758 } 759 760 return kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE | EMULTYPE_SKIP | 761 EMULTYPE_COMPLETE_USER_EXIT); 762 } 763 764 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) 765 { 766 ++vcpu->stat.pf_guest; 767 768 /* 769 * Async #PF in L2 is always forwarded to L1 as a VM-Exit regardless of 770 * whether or not L1 wants to intercept "regular" #PF. 771 */ 772 if (is_guest_mode(vcpu) && fault->async_page_fault) 773 kvm_queue_exception_vmexit(vcpu, PF_VECTOR, 774 true, fault->error_code, 775 true, fault->address); 776 else 777 kvm_queue_exception_e_p(vcpu, PF_VECTOR, fault->error_code, 778 fault->address); 779 } 780 EXPORT_SYMBOL_GPL(kvm_inject_page_fault); 781 782 void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu, 783 struct x86_exception *fault) 784 { 785 struct kvm_mmu *fault_mmu; 786 WARN_ON_ONCE(fault->vector != PF_VECTOR); 787 788 fault_mmu = fault->nested_page_fault ? vcpu->arch.mmu : 789 vcpu->arch.walk_mmu; 790 791 /* 792 * Invalidate the TLB entry for the faulting address, if it exists, 793 * else the access will fault indefinitely (and to emulate hardware). 794 */ 795 if ((fault->error_code & PFERR_PRESENT_MASK) && 796 !(fault->error_code & PFERR_RSVD_MASK)) 797 kvm_mmu_invalidate_gva(vcpu, fault_mmu, fault->address, 798 fault_mmu->root.hpa); 799 800 fault_mmu->inject_page_fault(vcpu, fault); 801 } 802 EXPORT_SYMBOL_GPL(kvm_inject_emulated_page_fault); 803 804 void kvm_inject_nmi(struct kvm_vcpu *vcpu) 805 { 806 atomic_inc(&vcpu->arch.nmi_queued); 807 kvm_make_request(KVM_REQ_NMI, vcpu); 808 } 809 EXPORT_SYMBOL_GPL(kvm_inject_nmi); 810 811 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) 812 { 813 kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, false); 814 } 815 EXPORT_SYMBOL_GPL(kvm_queue_exception_e); 816 817 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) 818 { 819 kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, true); 820 } 821 EXPORT_SYMBOL_GPL(kvm_requeue_exception_e); 822 823 /* 824 * Checks if cpl <= required_cpl; if true, return true. Otherwise queue 825 * a #GP and return false. 826 */ 827 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl) 828 { 829 if (static_call(kvm_x86_get_cpl)(vcpu) <= required_cpl) 830 return true; 831 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); 832 return false; 833 } 834 EXPORT_SYMBOL_GPL(kvm_require_cpl); 835 836 bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr) 837 { 838 if ((dr != 4 && dr != 5) || !kvm_read_cr4_bits(vcpu, X86_CR4_DE)) 839 return true; 840 841 kvm_queue_exception(vcpu, UD_VECTOR); 842 return false; 843 } 844 EXPORT_SYMBOL_GPL(kvm_require_dr); 845 846 static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu) 847 { 848 return vcpu->arch.reserved_gpa_bits | rsvd_bits(5, 8) | rsvd_bits(1, 2); 849 } 850 851 /* 852 * Load the pae pdptrs. Return 1 if they are all valid, 0 otherwise. 853 */ 854 int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3) 855 { 856 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 857 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT; 858 gpa_t real_gpa; 859 int i; 860 int ret; 861 u64 pdpte[ARRAY_SIZE(mmu->pdptrs)]; 862 863 /* 864 * If the MMU is nested, CR3 holds an L2 GPA and needs to be translated 865 * to an L1 GPA. 866 */ 867 real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(pdpt_gfn), 868 PFERR_USER_MASK | PFERR_WRITE_MASK, NULL); 869 if (real_gpa == INVALID_GPA) 870 return 0; 871 872 /* Note the offset, PDPTRs are 32 byte aligned when using PAE paging. */ 873 ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(real_gpa), pdpte, 874 cr3 & GENMASK(11, 5), sizeof(pdpte)); 875 if (ret < 0) 876 return 0; 877 878 for (i = 0; i < ARRAY_SIZE(pdpte); ++i) { 879 if ((pdpte[i] & PT_PRESENT_MASK) && 880 (pdpte[i] & pdptr_rsvd_bits(vcpu))) { 881 return 0; 882 } 883 } 884 885 /* 886 * Marking VCPU_EXREG_PDPTR dirty doesn't work for !tdp_enabled. 887 * Shadow page roots need to be reconstructed instead. 888 */ 889 if (!tdp_enabled && memcmp(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs))) 890 kvm_mmu_free_roots(vcpu->kvm, mmu, KVM_MMU_ROOT_CURRENT); 891 892 memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs)); 893 kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR); 894 kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu); 895 vcpu->arch.pdptrs_from_userspace = false; 896 897 return 1; 898 } 899 EXPORT_SYMBOL_GPL(load_pdptrs); 900 901 void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0) 902 { 903 if ((cr0 ^ old_cr0) & X86_CR0_PG) { 904 kvm_clear_async_pf_completion_queue(vcpu); 905 kvm_async_pf_hash_reset(vcpu); 906 907 /* 908 * Clearing CR0.PG is defined to flush the TLB from the guest's 909 * perspective. 910 */ 911 if (!(cr0 & X86_CR0_PG)) 912 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 913 } 914 915 if ((cr0 ^ old_cr0) & KVM_MMU_CR0_ROLE_BITS) 916 kvm_mmu_reset_context(vcpu); 917 918 if (((cr0 ^ old_cr0) & X86_CR0_CD) && 919 kvm_arch_has_noncoherent_dma(vcpu->kvm) && 920 !kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) 921 kvm_zap_gfn_range(vcpu->kvm, 0, ~0ULL); 922 } 923 EXPORT_SYMBOL_GPL(kvm_post_set_cr0); 924 925 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 926 { 927 unsigned long old_cr0 = kvm_read_cr0(vcpu); 928 929 cr0 |= X86_CR0_ET; 930 931 #ifdef CONFIG_X86_64 932 if (cr0 & 0xffffffff00000000UL) 933 return 1; 934 #endif 935 936 cr0 &= ~CR0_RESERVED_BITS; 937 938 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) 939 return 1; 940 941 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) 942 return 1; 943 944 #ifdef CONFIG_X86_64 945 if ((vcpu->arch.efer & EFER_LME) && !is_paging(vcpu) && 946 (cr0 & X86_CR0_PG)) { 947 int cs_db, cs_l; 948 949 if (!is_pae(vcpu)) 950 return 1; 951 static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l); 952 if (cs_l) 953 return 1; 954 } 955 #endif 956 if (!(vcpu->arch.efer & EFER_LME) && (cr0 & X86_CR0_PG) && 957 is_pae(vcpu) && ((cr0 ^ old_cr0) & X86_CR0_PDPTR_BITS) && 958 !load_pdptrs(vcpu, kvm_read_cr3(vcpu))) 959 return 1; 960 961 if (!(cr0 & X86_CR0_PG) && 962 (is_64_bit_mode(vcpu) || kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE))) 963 return 1; 964 965 static_call(kvm_x86_set_cr0)(vcpu, cr0); 966 967 kvm_post_set_cr0(vcpu, old_cr0, cr0); 968 969 return 0; 970 } 971 EXPORT_SYMBOL_GPL(kvm_set_cr0); 972 973 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) 974 { 975 (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f)); 976 } 977 EXPORT_SYMBOL_GPL(kvm_lmsw); 978 979 void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu) 980 { 981 if (vcpu->arch.guest_state_protected) 982 return; 983 984 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) { 985 986 if (vcpu->arch.xcr0 != host_xcr0) 987 xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); 988 989 if (vcpu->arch.xsaves_enabled && 990 vcpu->arch.ia32_xss != host_xss) 991 wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss); 992 } 993 994 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS 995 if (static_cpu_has(X86_FEATURE_PKU) && 996 vcpu->arch.pkru != vcpu->arch.host_pkru && 997 ((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) || 998 kvm_read_cr4_bits(vcpu, X86_CR4_PKE))) 999 write_pkru(vcpu->arch.pkru); 1000 #endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */ 1001 } 1002 EXPORT_SYMBOL_GPL(kvm_load_guest_xsave_state); 1003 1004 void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu) 1005 { 1006 if (vcpu->arch.guest_state_protected) 1007 return; 1008 1009 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS 1010 if (static_cpu_has(X86_FEATURE_PKU) && 1011 ((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) || 1012 kvm_read_cr4_bits(vcpu, X86_CR4_PKE))) { 1013 vcpu->arch.pkru = rdpkru(); 1014 if (vcpu->arch.pkru != vcpu->arch.host_pkru) 1015 write_pkru(vcpu->arch.host_pkru); 1016 } 1017 #endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */ 1018 1019 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) { 1020 1021 if (vcpu->arch.xcr0 != host_xcr0) 1022 xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0); 1023 1024 if (vcpu->arch.xsaves_enabled && 1025 vcpu->arch.ia32_xss != host_xss) 1026 wrmsrl(MSR_IA32_XSS, host_xss); 1027 } 1028 1029 } 1030 EXPORT_SYMBOL_GPL(kvm_load_host_xsave_state); 1031 1032 #ifdef CONFIG_X86_64 1033 static inline u64 kvm_guest_supported_xfd(struct kvm_vcpu *vcpu) 1034 { 1035 return vcpu->arch.guest_supported_xcr0 & XFEATURE_MASK_USER_DYNAMIC; 1036 } 1037 #endif 1038 1039 static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) 1040 { 1041 u64 xcr0 = xcr; 1042 u64 old_xcr0 = vcpu->arch.xcr0; 1043 u64 valid_bits; 1044 1045 /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now */ 1046 if (index != XCR_XFEATURE_ENABLED_MASK) 1047 return 1; 1048 if (!(xcr0 & XFEATURE_MASK_FP)) 1049 return 1; 1050 if ((xcr0 & XFEATURE_MASK_YMM) && !(xcr0 & XFEATURE_MASK_SSE)) 1051 return 1; 1052 1053 /* 1054 * Do not allow the guest to set bits that we do not support 1055 * saving. However, xcr0 bit 0 is always set, even if the 1056 * emulated CPU does not support XSAVE (see kvm_vcpu_reset()). 1057 */ 1058 valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP; 1059 if (xcr0 & ~valid_bits) 1060 return 1; 1061 1062 if ((!(xcr0 & XFEATURE_MASK_BNDREGS)) != 1063 (!(xcr0 & XFEATURE_MASK_BNDCSR))) 1064 return 1; 1065 1066 if (xcr0 & XFEATURE_MASK_AVX512) { 1067 if (!(xcr0 & XFEATURE_MASK_YMM)) 1068 return 1; 1069 if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512) 1070 return 1; 1071 } 1072 1073 if ((xcr0 & XFEATURE_MASK_XTILE) && 1074 ((xcr0 & XFEATURE_MASK_XTILE) != XFEATURE_MASK_XTILE)) 1075 return 1; 1076 1077 vcpu->arch.xcr0 = xcr0; 1078 1079 if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND) 1080 kvm_update_cpuid_runtime(vcpu); 1081 return 0; 1082 } 1083 1084 int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu) 1085 { 1086 /* Note, #UD due to CR4.OSXSAVE=0 has priority over the intercept. */ 1087 if (static_call(kvm_x86_get_cpl)(vcpu) != 0 || 1088 __kvm_set_xcr(vcpu, kvm_rcx_read(vcpu), kvm_read_edx_eax(vcpu))) { 1089 kvm_inject_gp(vcpu, 0); 1090 return 1; 1091 } 1092 1093 return kvm_skip_emulated_instruction(vcpu); 1094 } 1095 EXPORT_SYMBOL_GPL(kvm_emulate_xsetbv); 1096 1097 bool __kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 1098 { 1099 if (cr4 & cr4_reserved_bits) 1100 return false; 1101 1102 if (cr4 & vcpu->arch.cr4_guest_rsvd_bits) 1103 return false; 1104 1105 return true; 1106 } 1107 EXPORT_SYMBOL_GPL(__kvm_is_valid_cr4); 1108 1109 static bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 1110 { 1111 return __kvm_is_valid_cr4(vcpu, cr4) && 1112 static_call(kvm_x86_is_valid_cr4)(vcpu, cr4); 1113 } 1114 1115 void kvm_post_set_cr4(struct kvm_vcpu *vcpu, unsigned long old_cr4, unsigned long cr4) 1116 { 1117 if ((cr4 ^ old_cr4) & KVM_MMU_CR4_ROLE_BITS) 1118 kvm_mmu_reset_context(vcpu); 1119 1120 /* 1121 * If CR4.PCIDE is changed 0 -> 1, there is no need to flush the TLB 1122 * according to the SDM; however, stale prev_roots could be reused 1123 * incorrectly in the future after a MOV to CR3 with NOFLUSH=1, so we 1124 * free them all. This is *not* a superset of KVM_REQ_TLB_FLUSH_GUEST 1125 * or KVM_REQ_TLB_FLUSH_CURRENT, because the hardware TLB is not flushed, 1126 * so fall through. 1127 */ 1128 if (!tdp_enabled && 1129 (cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) 1130 kvm_mmu_unload(vcpu); 1131 1132 /* 1133 * The TLB has to be flushed for all PCIDs if any of the following 1134 * (architecturally required) changes happen: 1135 * - CR4.PCIDE is changed from 1 to 0 1136 * - CR4.PGE is toggled 1137 * 1138 * This is a superset of KVM_REQ_TLB_FLUSH_CURRENT. 1139 */ 1140 if (((cr4 ^ old_cr4) & X86_CR4_PGE) || 1141 (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE))) 1142 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 1143 1144 /* 1145 * The TLB has to be flushed for the current PCID if any of the 1146 * following (architecturally required) changes happen: 1147 * - CR4.SMEP is changed from 0 to 1 1148 * - CR4.PAE is toggled 1149 */ 1150 else if (((cr4 ^ old_cr4) & X86_CR4_PAE) || 1151 ((cr4 & X86_CR4_SMEP) && !(old_cr4 & X86_CR4_SMEP))) 1152 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); 1153 1154 } 1155 EXPORT_SYMBOL_GPL(kvm_post_set_cr4); 1156 1157 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 1158 { 1159 unsigned long old_cr4 = kvm_read_cr4(vcpu); 1160 1161 if (!kvm_is_valid_cr4(vcpu, cr4)) 1162 return 1; 1163 1164 if (is_long_mode(vcpu)) { 1165 if (!(cr4 & X86_CR4_PAE)) 1166 return 1; 1167 if ((cr4 ^ old_cr4) & X86_CR4_LA57) 1168 return 1; 1169 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) 1170 && ((cr4 ^ old_cr4) & X86_CR4_PDPTR_BITS) 1171 && !load_pdptrs(vcpu, kvm_read_cr3(vcpu))) 1172 return 1; 1173 1174 if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) { 1175 if (!guest_cpuid_has(vcpu, X86_FEATURE_PCID)) 1176 return 1; 1177 1178 /* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */ 1179 if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu)) 1180 return 1; 1181 } 1182 1183 static_call(kvm_x86_set_cr4)(vcpu, cr4); 1184 1185 kvm_post_set_cr4(vcpu, old_cr4, cr4); 1186 1187 return 0; 1188 } 1189 EXPORT_SYMBOL_GPL(kvm_set_cr4); 1190 1191 static void kvm_invalidate_pcid(struct kvm_vcpu *vcpu, unsigned long pcid) 1192 { 1193 struct kvm_mmu *mmu = vcpu->arch.mmu; 1194 unsigned long roots_to_free = 0; 1195 int i; 1196 1197 /* 1198 * MOV CR3 and INVPCID are usually not intercepted when using TDP, but 1199 * this is reachable when running EPT=1 and unrestricted_guest=0, and 1200 * also via the emulator. KVM's TDP page tables are not in the scope of 1201 * the invalidation, but the guest's TLB entries need to be flushed as 1202 * the CPU may have cached entries in its TLB for the target PCID. 1203 */ 1204 if (unlikely(tdp_enabled)) { 1205 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 1206 return; 1207 } 1208 1209 /* 1210 * If neither the current CR3 nor any of the prev_roots use the given 1211 * PCID, then nothing needs to be done here because a resync will 1212 * happen anyway before switching to any other CR3. 1213 */ 1214 if (kvm_get_active_pcid(vcpu) == pcid) { 1215 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); 1216 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); 1217 } 1218 1219 /* 1220 * If PCID is disabled, there is no need to free prev_roots even if the 1221 * PCIDs for them are also 0, because MOV to CR3 always flushes the TLB 1222 * with PCIDE=0. 1223 */ 1224 if (!kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)) 1225 return; 1226 1227 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) 1228 if (kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd) == pcid) 1229 roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i); 1230 1231 kvm_mmu_free_roots(vcpu->kvm, mmu, roots_to_free); 1232 } 1233 1234 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) 1235 { 1236 bool skip_tlb_flush = false; 1237 unsigned long pcid = 0; 1238 #ifdef CONFIG_X86_64 1239 bool pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE); 1240 1241 if (pcid_enabled) { 1242 skip_tlb_flush = cr3 & X86_CR3_PCID_NOFLUSH; 1243 cr3 &= ~X86_CR3_PCID_NOFLUSH; 1244 pcid = cr3 & X86_CR3_PCID_MASK; 1245 } 1246 #endif 1247 1248 /* PDPTRs are always reloaded for PAE paging. */ 1249 if (cr3 == kvm_read_cr3(vcpu) && !is_pae_paging(vcpu)) 1250 goto handle_tlb_flush; 1251 1252 /* 1253 * Do not condition the GPA check on long mode, this helper is used to 1254 * stuff CR3, e.g. for RSM emulation, and there is no guarantee that 1255 * the current vCPU mode is accurate. 1256 */ 1257 if (kvm_vcpu_is_illegal_gpa(vcpu, cr3)) 1258 return 1; 1259 1260 if (is_pae_paging(vcpu) && !load_pdptrs(vcpu, cr3)) 1261 return 1; 1262 1263 if (cr3 != kvm_read_cr3(vcpu)) 1264 kvm_mmu_new_pgd(vcpu, cr3); 1265 1266 vcpu->arch.cr3 = cr3; 1267 kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); 1268 /* Do not call post_set_cr3, we do not get here for confidential guests. */ 1269 1270 handle_tlb_flush: 1271 /* 1272 * A load of CR3 that flushes the TLB flushes only the current PCID, 1273 * even if PCID is disabled, in which case PCID=0 is flushed. It's a 1274 * moot point in the end because _disabling_ PCID will flush all PCIDs, 1275 * and it's impossible to use a non-zero PCID when PCID is disabled, 1276 * i.e. only PCID=0 can be relevant. 1277 */ 1278 if (!skip_tlb_flush) 1279 kvm_invalidate_pcid(vcpu, pcid); 1280 1281 return 0; 1282 } 1283 EXPORT_SYMBOL_GPL(kvm_set_cr3); 1284 1285 int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) 1286 { 1287 if (cr8 & CR8_RESERVED_BITS) 1288 return 1; 1289 if (lapic_in_kernel(vcpu)) 1290 kvm_lapic_set_tpr(vcpu, cr8); 1291 else 1292 vcpu->arch.cr8 = cr8; 1293 return 0; 1294 } 1295 EXPORT_SYMBOL_GPL(kvm_set_cr8); 1296 1297 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu) 1298 { 1299 if (lapic_in_kernel(vcpu)) 1300 return kvm_lapic_get_cr8(vcpu); 1301 else 1302 return vcpu->arch.cr8; 1303 } 1304 EXPORT_SYMBOL_GPL(kvm_get_cr8); 1305 1306 static void kvm_update_dr0123(struct kvm_vcpu *vcpu) 1307 { 1308 int i; 1309 1310 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) { 1311 for (i = 0; i < KVM_NR_DB_REGS; i++) 1312 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; 1313 } 1314 } 1315 1316 void kvm_update_dr7(struct kvm_vcpu *vcpu) 1317 { 1318 unsigned long dr7; 1319 1320 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) 1321 dr7 = vcpu->arch.guest_debug_dr7; 1322 else 1323 dr7 = vcpu->arch.dr7; 1324 static_call(kvm_x86_set_dr7)(vcpu, dr7); 1325 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED; 1326 if (dr7 & DR7_BP_EN_MASK) 1327 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED; 1328 } 1329 EXPORT_SYMBOL_GPL(kvm_update_dr7); 1330 1331 static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu) 1332 { 1333 u64 fixed = DR6_FIXED_1; 1334 1335 if (!guest_cpuid_has(vcpu, X86_FEATURE_RTM)) 1336 fixed |= DR6_RTM; 1337 1338 if (!guest_cpuid_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT)) 1339 fixed |= DR6_BUS_LOCK; 1340 return fixed; 1341 } 1342 1343 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) 1344 { 1345 size_t size = ARRAY_SIZE(vcpu->arch.db); 1346 1347 switch (dr) { 1348 case 0 ... 3: 1349 vcpu->arch.db[array_index_nospec(dr, size)] = val; 1350 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) 1351 vcpu->arch.eff_db[dr] = val; 1352 break; 1353 case 4: 1354 case 6: 1355 if (!kvm_dr6_valid(val)) 1356 return 1; /* #GP */ 1357 vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu); 1358 break; 1359 case 5: 1360 default: /* 7 */ 1361 if (!kvm_dr7_valid(val)) 1362 return 1; /* #GP */ 1363 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; 1364 kvm_update_dr7(vcpu); 1365 break; 1366 } 1367 1368 return 0; 1369 } 1370 EXPORT_SYMBOL_GPL(kvm_set_dr); 1371 1372 void kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) 1373 { 1374 size_t size = ARRAY_SIZE(vcpu->arch.db); 1375 1376 switch (dr) { 1377 case 0 ... 3: 1378 *val = vcpu->arch.db[array_index_nospec(dr, size)]; 1379 break; 1380 case 4: 1381 case 6: 1382 *val = vcpu->arch.dr6; 1383 break; 1384 case 5: 1385 default: /* 7 */ 1386 *val = vcpu->arch.dr7; 1387 break; 1388 } 1389 } 1390 EXPORT_SYMBOL_GPL(kvm_get_dr); 1391 1392 int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu) 1393 { 1394 u32 ecx = kvm_rcx_read(vcpu); 1395 u64 data; 1396 1397 if (kvm_pmu_rdpmc(vcpu, ecx, &data)) { 1398 kvm_inject_gp(vcpu, 0); 1399 return 1; 1400 } 1401 1402 kvm_rax_write(vcpu, (u32)data); 1403 kvm_rdx_write(vcpu, data >> 32); 1404 return kvm_skip_emulated_instruction(vcpu); 1405 } 1406 EXPORT_SYMBOL_GPL(kvm_emulate_rdpmc); 1407 1408 /* 1409 * List of msr numbers which we expose to userspace through KVM_GET_MSRS 1410 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. 1411 * 1412 * The three MSR lists(msrs_to_save, emulated_msrs, msr_based_features) 1413 * extract the supported MSRs from the related const lists. 1414 * msrs_to_save is selected from the msrs_to_save_all to reflect the 1415 * capabilities of the host cpu. This capabilities test skips MSRs that are 1416 * kvm-specific. Those are put in emulated_msrs_all; filtering of emulated_msrs 1417 * may depend on host virtualization features rather than host cpu features. 1418 */ 1419 1420 static const u32 msrs_to_save_all[] = { 1421 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, 1422 MSR_STAR, 1423 #ifdef CONFIG_X86_64 1424 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR, 1425 #endif 1426 MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA, 1427 MSR_IA32_FEAT_CTL, MSR_IA32_BNDCFGS, MSR_TSC_AUX, 1428 MSR_IA32_SPEC_CTRL, 1429 MSR_IA32_RTIT_CTL, MSR_IA32_RTIT_STATUS, MSR_IA32_RTIT_CR3_MATCH, 1430 MSR_IA32_RTIT_OUTPUT_BASE, MSR_IA32_RTIT_OUTPUT_MASK, 1431 MSR_IA32_RTIT_ADDR0_A, MSR_IA32_RTIT_ADDR0_B, 1432 MSR_IA32_RTIT_ADDR1_A, MSR_IA32_RTIT_ADDR1_B, 1433 MSR_IA32_RTIT_ADDR2_A, MSR_IA32_RTIT_ADDR2_B, 1434 MSR_IA32_RTIT_ADDR3_A, MSR_IA32_RTIT_ADDR3_B, 1435 MSR_IA32_UMWAIT_CONTROL, 1436 1437 MSR_ARCH_PERFMON_FIXED_CTR0, MSR_ARCH_PERFMON_FIXED_CTR1, 1438 MSR_ARCH_PERFMON_FIXED_CTR0 + 2, 1439 MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_STATUS, 1440 MSR_CORE_PERF_GLOBAL_CTRL, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 1441 MSR_IA32_PEBS_ENABLE, MSR_IA32_DS_AREA, MSR_PEBS_DATA_CFG, 1442 1443 /* This part of MSRs should match KVM_INTEL_PMC_MAX_GENERIC. */ 1444 MSR_ARCH_PERFMON_PERFCTR0, MSR_ARCH_PERFMON_PERFCTR1, 1445 MSR_ARCH_PERFMON_PERFCTR0 + 2, MSR_ARCH_PERFMON_PERFCTR0 + 3, 1446 MSR_ARCH_PERFMON_PERFCTR0 + 4, MSR_ARCH_PERFMON_PERFCTR0 + 5, 1447 MSR_ARCH_PERFMON_PERFCTR0 + 6, MSR_ARCH_PERFMON_PERFCTR0 + 7, 1448 MSR_ARCH_PERFMON_EVENTSEL0, MSR_ARCH_PERFMON_EVENTSEL1, 1449 MSR_ARCH_PERFMON_EVENTSEL0 + 2, MSR_ARCH_PERFMON_EVENTSEL0 + 3, 1450 MSR_ARCH_PERFMON_EVENTSEL0 + 4, MSR_ARCH_PERFMON_EVENTSEL0 + 5, 1451 MSR_ARCH_PERFMON_EVENTSEL0 + 6, MSR_ARCH_PERFMON_EVENTSEL0 + 7, 1452 1453 MSR_K7_EVNTSEL0, MSR_K7_EVNTSEL1, MSR_K7_EVNTSEL2, MSR_K7_EVNTSEL3, 1454 MSR_K7_PERFCTR0, MSR_K7_PERFCTR1, MSR_K7_PERFCTR2, MSR_K7_PERFCTR3, 1455 1456 /* This part of MSRs should match KVM_AMD_PMC_MAX_GENERIC. */ 1457 MSR_F15H_PERF_CTL0, MSR_F15H_PERF_CTL1, MSR_F15H_PERF_CTL2, 1458 MSR_F15H_PERF_CTL3, MSR_F15H_PERF_CTL4, MSR_F15H_PERF_CTL5, 1459 MSR_F15H_PERF_CTR0, MSR_F15H_PERF_CTR1, MSR_F15H_PERF_CTR2, 1460 MSR_F15H_PERF_CTR3, MSR_F15H_PERF_CTR4, MSR_F15H_PERF_CTR5, 1461 1462 MSR_IA32_XFD, MSR_IA32_XFD_ERR, 1463 }; 1464 1465 static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_all)]; 1466 static unsigned num_msrs_to_save; 1467 1468 static const u32 emulated_msrs_all[] = { 1469 MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, 1470 MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW, 1471 HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL, 1472 HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC, 1473 HV_X64_MSR_TSC_FREQUENCY, HV_X64_MSR_APIC_FREQUENCY, 1474 HV_X64_MSR_CRASH_P0, HV_X64_MSR_CRASH_P1, HV_X64_MSR_CRASH_P2, 1475 HV_X64_MSR_CRASH_P3, HV_X64_MSR_CRASH_P4, HV_X64_MSR_CRASH_CTL, 1476 HV_X64_MSR_RESET, 1477 HV_X64_MSR_VP_INDEX, 1478 HV_X64_MSR_VP_RUNTIME, 1479 HV_X64_MSR_SCONTROL, 1480 HV_X64_MSR_STIMER0_CONFIG, 1481 HV_X64_MSR_VP_ASSIST_PAGE, 1482 HV_X64_MSR_REENLIGHTENMENT_CONTROL, HV_X64_MSR_TSC_EMULATION_CONTROL, 1483 HV_X64_MSR_TSC_EMULATION_STATUS, 1484 HV_X64_MSR_SYNDBG_OPTIONS, 1485 HV_X64_MSR_SYNDBG_CONTROL, HV_X64_MSR_SYNDBG_STATUS, 1486 HV_X64_MSR_SYNDBG_SEND_BUFFER, HV_X64_MSR_SYNDBG_RECV_BUFFER, 1487 HV_X64_MSR_SYNDBG_PENDING_BUFFER, 1488 1489 MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME, 1490 MSR_KVM_PV_EOI_EN, MSR_KVM_ASYNC_PF_INT, MSR_KVM_ASYNC_PF_ACK, 1491 1492 MSR_IA32_TSC_ADJUST, 1493 MSR_IA32_TSC_DEADLINE, 1494 MSR_IA32_ARCH_CAPABILITIES, 1495 MSR_IA32_PERF_CAPABILITIES, 1496 MSR_IA32_MISC_ENABLE, 1497 MSR_IA32_MCG_STATUS, 1498 MSR_IA32_MCG_CTL, 1499 MSR_IA32_MCG_EXT_CTL, 1500 MSR_IA32_SMBASE, 1501 MSR_SMI_COUNT, 1502 MSR_PLATFORM_INFO, 1503 MSR_MISC_FEATURES_ENABLES, 1504 MSR_AMD64_VIRT_SPEC_CTRL, 1505 MSR_AMD64_TSC_RATIO, 1506 MSR_IA32_POWER_CTL, 1507 MSR_IA32_UCODE_REV, 1508 1509 /* 1510 * The following list leaves out MSRs whose values are determined 1511 * by arch/x86/kvm/vmx/nested.c based on CPUID or other MSRs. 1512 * We always support the "true" VMX control MSRs, even if the host 1513 * processor does not, so I am putting these registers here rather 1514 * than in msrs_to_save_all. 1515 */ 1516 MSR_IA32_VMX_BASIC, 1517 MSR_IA32_VMX_TRUE_PINBASED_CTLS, 1518 MSR_IA32_VMX_TRUE_PROCBASED_CTLS, 1519 MSR_IA32_VMX_TRUE_EXIT_CTLS, 1520 MSR_IA32_VMX_TRUE_ENTRY_CTLS, 1521 MSR_IA32_VMX_MISC, 1522 MSR_IA32_VMX_CR0_FIXED0, 1523 MSR_IA32_VMX_CR4_FIXED0, 1524 MSR_IA32_VMX_VMCS_ENUM, 1525 MSR_IA32_VMX_PROCBASED_CTLS2, 1526 MSR_IA32_VMX_EPT_VPID_CAP, 1527 MSR_IA32_VMX_VMFUNC, 1528 1529 MSR_K7_HWCR, 1530 MSR_KVM_POLL_CONTROL, 1531 }; 1532 1533 static u32 emulated_msrs[ARRAY_SIZE(emulated_msrs_all)]; 1534 static unsigned num_emulated_msrs; 1535 1536 /* 1537 * List of msr numbers which are used to expose MSR-based features that 1538 * can be used by a hypervisor to validate requested CPU features. 1539 */ 1540 static const u32 msr_based_features_all[] = { 1541 MSR_IA32_VMX_BASIC, 1542 MSR_IA32_VMX_TRUE_PINBASED_CTLS, 1543 MSR_IA32_VMX_PINBASED_CTLS, 1544 MSR_IA32_VMX_TRUE_PROCBASED_CTLS, 1545 MSR_IA32_VMX_PROCBASED_CTLS, 1546 MSR_IA32_VMX_TRUE_EXIT_CTLS, 1547 MSR_IA32_VMX_EXIT_CTLS, 1548 MSR_IA32_VMX_TRUE_ENTRY_CTLS, 1549 MSR_IA32_VMX_ENTRY_CTLS, 1550 MSR_IA32_VMX_MISC, 1551 MSR_IA32_VMX_CR0_FIXED0, 1552 MSR_IA32_VMX_CR0_FIXED1, 1553 MSR_IA32_VMX_CR4_FIXED0, 1554 MSR_IA32_VMX_CR4_FIXED1, 1555 MSR_IA32_VMX_VMCS_ENUM, 1556 MSR_IA32_VMX_PROCBASED_CTLS2, 1557 MSR_IA32_VMX_EPT_VPID_CAP, 1558 MSR_IA32_VMX_VMFUNC, 1559 1560 MSR_F10H_DECFG, 1561 MSR_IA32_UCODE_REV, 1562 MSR_IA32_ARCH_CAPABILITIES, 1563 MSR_IA32_PERF_CAPABILITIES, 1564 }; 1565 1566 static u32 msr_based_features[ARRAY_SIZE(msr_based_features_all)]; 1567 static unsigned int num_msr_based_features; 1568 1569 /* 1570 * Some IA32_ARCH_CAPABILITIES bits have dependencies on MSRs that KVM 1571 * does not yet virtualize. These include: 1572 * 10 - MISC_PACKAGE_CTRLS 1573 * 11 - ENERGY_FILTERING_CTL 1574 * 12 - DOITM 1575 * 18 - FB_CLEAR_CTRL 1576 * 21 - XAPIC_DISABLE_STATUS 1577 * 23 - OVERCLOCKING_STATUS 1578 */ 1579 1580 #define KVM_SUPPORTED_ARCH_CAP \ 1581 (ARCH_CAP_RDCL_NO | ARCH_CAP_IBRS_ALL | ARCH_CAP_RSBA | \ 1582 ARCH_CAP_SKIP_VMENTRY_L1DFLUSH | ARCH_CAP_SSB_NO | ARCH_CAP_MDS_NO | \ 1583 ARCH_CAP_PSCHANGE_MC_NO | ARCH_CAP_TSX_CTRL_MSR | ARCH_CAP_TAA_NO | \ 1584 ARCH_CAP_SBDR_SSDP_NO | ARCH_CAP_FBSDP_NO | ARCH_CAP_PSDP_NO | \ 1585 ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO) 1586 1587 static u64 kvm_get_arch_capabilities(void) 1588 { 1589 u64 data = 0; 1590 1591 if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) { 1592 rdmsrl(MSR_IA32_ARCH_CAPABILITIES, data); 1593 data &= KVM_SUPPORTED_ARCH_CAP; 1594 } 1595 1596 /* 1597 * If nx_huge_pages is enabled, KVM's shadow paging will ensure that 1598 * the nested hypervisor runs with NX huge pages. If it is not, 1599 * L1 is anyway vulnerable to ITLB_MULTIHIT exploits from other 1600 * L1 guests, so it need not worry about its own (L2) guests. 1601 */ 1602 data |= ARCH_CAP_PSCHANGE_MC_NO; 1603 1604 /* 1605 * If we're doing cache flushes (either "always" or "cond") 1606 * we will do one whenever the guest does a vmlaunch/vmresume. 1607 * If an outer hypervisor is doing the cache flush for us 1608 * (VMENTER_L1D_FLUSH_NESTED_VM), we can safely pass that 1609 * capability to the guest too, and if EPT is disabled we're not 1610 * vulnerable. Overall, only VMENTER_L1D_FLUSH_NEVER will 1611 * require a nested hypervisor to do a flush of its own. 1612 */ 1613 if (l1tf_vmx_mitigation != VMENTER_L1D_FLUSH_NEVER) 1614 data |= ARCH_CAP_SKIP_VMENTRY_L1DFLUSH; 1615 1616 if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN)) 1617 data |= ARCH_CAP_RDCL_NO; 1618 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) 1619 data |= ARCH_CAP_SSB_NO; 1620 if (!boot_cpu_has_bug(X86_BUG_MDS)) 1621 data |= ARCH_CAP_MDS_NO; 1622 1623 if (!boot_cpu_has(X86_FEATURE_RTM)) { 1624 /* 1625 * If RTM=0 because the kernel has disabled TSX, the host might 1626 * have TAA_NO or TSX_CTRL. Clear TAA_NO (the guest sees RTM=0 1627 * and therefore knows that there cannot be TAA) but keep 1628 * TSX_CTRL: some buggy userspaces leave it set on tsx=on hosts, 1629 * and we want to allow migrating those guests to tsx=off hosts. 1630 */ 1631 data &= ~ARCH_CAP_TAA_NO; 1632 } else if (!boot_cpu_has_bug(X86_BUG_TAA)) { 1633 data |= ARCH_CAP_TAA_NO; 1634 } else { 1635 /* 1636 * Nothing to do here; we emulate TSX_CTRL if present on the 1637 * host so the guest can choose between disabling TSX or 1638 * using VERW to clear CPU buffers. 1639 */ 1640 } 1641 1642 return data; 1643 } 1644 1645 static int kvm_get_msr_feature(struct kvm_msr_entry *msr) 1646 { 1647 switch (msr->index) { 1648 case MSR_IA32_ARCH_CAPABILITIES: 1649 msr->data = kvm_get_arch_capabilities(); 1650 break; 1651 case MSR_IA32_PERF_CAPABILITIES: 1652 msr->data = kvm_caps.supported_perf_cap; 1653 break; 1654 case MSR_IA32_UCODE_REV: 1655 rdmsrl_safe(msr->index, &msr->data); 1656 break; 1657 default: 1658 return static_call(kvm_x86_get_msr_feature)(msr); 1659 } 1660 return 0; 1661 } 1662 1663 static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data) 1664 { 1665 struct kvm_msr_entry msr; 1666 int r; 1667 1668 msr.index = index; 1669 r = kvm_get_msr_feature(&msr); 1670 1671 if (r == KVM_MSR_RET_INVALID) { 1672 /* Unconditionally clear the output for simplicity */ 1673 *data = 0; 1674 if (kvm_msr_ignored_check(index, 0, false)) 1675 r = 0; 1676 } 1677 1678 if (r) 1679 return r; 1680 1681 *data = msr.data; 1682 1683 return 0; 1684 } 1685 1686 static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) 1687 { 1688 if (efer & EFER_FFXSR && !guest_cpuid_has(vcpu, X86_FEATURE_FXSR_OPT)) 1689 return false; 1690 1691 if (efer & EFER_SVME && !guest_cpuid_has(vcpu, X86_FEATURE_SVM)) 1692 return false; 1693 1694 if (efer & (EFER_LME | EFER_LMA) && 1695 !guest_cpuid_has(vcpu, X86_FEATURE_LM)) 1696 return false; 1697 1698 if (efer & EFER_NX && !guest_cpuid_has(vcpu, X86_FEATURE_NX)) 1699 return false; 1700 1701 return true; 1702 1703 } 1704 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) 1705 { 1706 if (efer & efer_reserved_bits) 1707 return false; 1708 1709 return __kvm_valid_efer(vcpu, efer); 1710 } 1711 EXPORT_SYMBOL_GPL(kvm_valid_efer); 1712 1713 static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 1714 { 1715 u64 old_efer = vcpu->arch.efer; 1716 u64 efer = msr_info->data; 1717 int r; 1718 1719 if (efer & efer_reserved_bits) 1720 return 1; 1721 1722 if (!msr_info->host_initiated) { 1723 if (!__kvm_valid_efer(vcpu, efer)) 1724 return 1; 1725 1726 if (is_paging(vcpu) && 1727 (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) 1728 return 1; 1729 } 1730 1731 efer &= ~EFER_LMA; 1732 efer |= vcpu->arch.efer & EFER_LMA; 1733 1734 r = static_call(kvm_x86_set_efer)(vcpu, efer); 1735 if (r) { 1736 WARN_ON(r > 0); 1737 return r; 1738 } 1739 1740 if ((efer ^ old_efer) & KVM_MMU_EFER_ROLE_BITS) 1741 kvm_mmu_reset_context(vcpu); 1742 1743 return 0; 1744 } 1745 1746 void kvm_enable_efer_bits(u64 mask) 1747 { 1748 efer_reserved_bits &= ~mask; 1749 } 1750 EXPORT_SYMBOL_GPL(kvm_enable_efer_bits); 1751 1752 bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type) 1753 { 1754 struct kvm_x86_msr_filter *msr_filter; 1755 struct msr_bitmap_range *ranges; 1756 struct kvm *kvm = vcpu->kvm; 1757 bool allowed; 1758 int idx; 1759 u32 i; 1760 1761 /* x2APIC MSRs do not support filtering. */ 1762 if (index >= 0x800 && index <= 0x8ff) 1763 return true; 1764 1765 idx = srcu_read_lock(&kvm->srcu); 1766 1767 msr_filter = srcu_dereference(kvm->arch.msr_filter, &kvm->srcu); 1768 if (!msr_filter) { 1769 allowed = true; 1770 goto out; 1771 } 1772 1773 allowed = msr_filter->default_allow; 1774 ranges = msr_filter->ranges; 1775 1776 for (i = 0; i < msr_filter->count; i++) { 1777 u32 start = ranges[i].base; 1778 u32 end = start + ranges[i].nmsrs; 1779 u32 flags = ranges[i].flags; 1780 unsigned long *bitmap = ranges[i].bitmap; 1781 1782 if ((index >= start) && (index < end) && (flags & type)) { 1783 allowed = !!test_bit(index - start, bitmap); 1784 break; 1785 } 1786 } 1787 1788 out: 1789 srcu_read_unlock(&kvm->srcu, idx); 1790 1791 return allowed; 1792 } 1793 EXPORT_SYMBOL_GPL(kvm_msr_allowed); 1794 1795 /* 1796 * Write @data into the MSR specified by @index. Select MSR specific fault 1797 * checks are bypassed if @host_initiated is %true. 1798 * Returns 0 on success, non-0 otherwise. 1799 * Assumes vcpu_load() was already called. 1800 */ 1801 static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data, 1802 bool host_initiated) 1803 { 1804 struct msr_data msr; 1805 1806 switch (index) { 1807 case MSR_FS_BASE: 1808 case MSR_GS_BASE: 1809 case MSR_KERNEL_GS_BASE: 1810 case MSR_CSTAR: 1811 case MSR_LSTAR: 1812 if (is_noncanonical_address(data, vcpu)) 1813 return 1; 1814 break; 1815 case MSR_IA32_SYSENTER_EIP: 1816 case MSR_IA32_SYSENTER_ESP: 1817 /* 1818 * IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if 1819 * non-canonical address is written on Intel but not on 1820 * AMD (which ignores the top 32-bits, because it does 1821 * not implement 64-bit SYSENTER). 1822 * 1823 * 64-bit code should hence be able to write a non-canonical 1824 * value on AMD. Making the address canonical ensures that 1825 * vmentry does not fail on Intel after writing a non-canonical 1826 * value, and that something deterministic happens if the guest 1827 * invokes 64-bit SYSENTER. 1828 */ 1829 data = __canonical_address(data, vcpu_virt_addr_bits(vcpu)); 1830 break; 1831 case MSR_TSC_AUX: 1832 if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX)) 1833 return 1; 1834 1835 if (!host_initiated && 1836 !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) && 1837 !guest_cpuid_has(vcpu, X86_FEATURE_RDPID)) 1838 return 1; 1839 1840 /* 1841 * Per Intel's SDM, bits 63:32 are reserved, but AMD's APM has 1842 * incomplete and conflicting architectural behavior. Current 1843 * AMD CPUs completely ignore bits 63:32, i.e. they aren't 1844 * reserved and always read as zeros. Enforce Intel's reserved 1845 * bits check if and only if the guest CPU is Intel, and clear 1846 * the bits in all other cases. This ensures cross-vendor 1847 * migration will provide consistent behavior for the guest. 1848 */ 1849 if (guest_cpuid_is_intel(vcpu) && (data >> 32) != 0) 1850 return 1; 1851 1852 data = (u32)data; 1853 break; 1854 } 1855 1856 msr.data = data; 1857 msr.index = index; 1858 msr.host_initiated = host_initiated; 1859 1860 return static_call(kvm_x86_set_msr)(vcpu, &msr); 1861 } 1862 1863 static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu, 1864 u32 index, u64 data, bool host_initiated) 1865 { 1866 int ret = __kvm_set_msr(vcpu, index, data, host_initiated); 1867 1868 if (ret == KVM_MSR_RET_INVALID) 1869 if (kvm_msr_ignored_check(index, data, true)) 1870 ret = 0; 1871 1872 return ret; 1873 } 1874 1875 /* 1876 * Read the MSR specified by @index into @data. Select MSR specific fault 1877 * checks are bypassed if @host_initiated is %true. 1878 * Returns 0 on success, non-0 otherwise. 1879 * Assumes vcpu_load() was already called. 1880 */ 1881 int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, 1882 bool host_initiated) 1883 { 1884 struct msr_data msr; 1885 int ret; 1886 1887 switch (index) { 1888 case MSR_TSC_AUX: 1889 if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX)) 1890 return 1; 1891 1892 if (!host_initiated && 1893 !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) && 1894 !guest_cpuid_has(vcpu, X86_FEATURE_RDPID)) 1895 return 1; 1896 break; 1897 } 1898 1899 msr.index = index; 1900 msr.host_initiated = host_initiated; 1901 1902 ret = static_call(kvm_x86_get_msr)(vcpu, &msr); 1903 if (!ret) 1904 *data = msr.data; 1905 return ret; 1906 } 1907 1908 static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu, 1909 u32 index, u64 *data, bool host_initiated) 1910 { 1911 int ret = __kvm_get_msr(vcpu, index, data, host_initiated); 1912 1913 if (ret == KVM_MSR_RET_INVALID) { 1914 /* Unconditionally clear *data for simplicity */ 1915 *data = 0; 1916 if (kvm_msr_ignored_check(index, 0, false)) 1917 ret = 0; 1918 } 1919 1920 return ret; 1921 } 1922 1923 static int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data) 1924 { 1925 if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_READ)) 1926 return KVM_MSR_RET_FILTERED; 1927 return kvm_get_msr_ignored_check(vcpu, index, data, false); 1928 } 1929 1930 static int kvm_set_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 data) 1931 { 1932 if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_WRITE)) 1933 return KVM_MSR_RET_FILTERED; 1934 return kvm_set_msr_ignored_check(vcpu, index, data, false); 1935 } 1936 1937 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data) 1938 { 1939 return kvm_get_msr_ignored_check(vcpu, index, data, false); 1940 } 1941 EXPORT_SYMBOL_GPL(kvm_get_msr); 1942 1943 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data) 1944 { 1945 return kvm_set_msr_ignored_check(vcpu, index, data, false); 1946 } 1947 EXPORT_SYMBOL_GPL(kvm_set_msr); 1948 1949 static void complete_userspace_rdmsr(struct kvm_vcpu *vcpu) 1950 { 1951 if (!vcpu->run->msr.error) { 1952 kvm_rax_write(vcpu, (u32)vcpu->run->msr.data); 1953 kvm_rdx_write(vcpu, vcpu->run->msr.data >> 32); 1954 } 1955 } 1956 1957 static int complete_emulated_msr_access(struct kvm_vcpu *vcpu) 1958 { 1959 return complete_emulated_insn_gp(vcpu, vcpu->run->msr.error); 1960 } 1961 1962 static int complete_emulated_rdmsr(struct kvm_vcpu *vcpu) 1963 { 1964 complete_userspace_rdmsr(vcpu); 1965 return complete_emulated_msr_access(vcpu); 1966 } 1967 1968 static int complete_fast_msr_access(struct kvm_vcpu *vcpu) 1969 { 1970 return static_call(kvm_x86_complete_emulated_msr)(vcpu, vcpu->run->msr.error); 1971 } 1972 1973 static int complete_fast_rdmsr(struct kvm_vcpu *vcpu) 1974 { 1975 complete_userspace_rdmsr(vcpu); 1976 return complete_fast_msr_access(vcpu); 1977 } 1978 1979 static u64 kvm_msr_reason(int r) 1980 { 1981 switch (r) { 1982 case KVM_MSR_RET_INVALID: 1983 return KVM_MSR_EXIT_REASON_UNKNOWN; 1984 case KVM_MSR_RET_FILTERED: 1985 return KVM_MSR_EXIT_REASON_FILTER; 1986 default: 1987 return KVM_MSR_EXIT_REASON_INVAL; 1988 } 1989 } 1990 1991 static int kvm_msr_user_space(struct kvm_vcpu *vcpu, u32 index, 1992 u32 exit_reason, u64 data, 1993 int (*completion)(struct kvm_vcpu *vcpu), 1994 int r) 1995 { 1996 u64 msr_reason = kvm_msr_reason(r); 1997 1998 /* Check if the user wanted to know about this MSR fault */ 1999 if (!(vcpu->kvm->arch.user_space_msr_mask & msr_reason)) 2000 return 0; 2001 2002 vcpu->run->exit_reason = exit_reason; 2003 vcpu->run->msr.error = 0; 2004 memset(vcpu->run->msr.pad, 0, sizeof(vcpu->run->msr.pad)); 2005 vcpu->run->msr.reason = msr_reason; 2006 vcpu->run->msr.index = index; 2007 vcpu->run->msr.data = data; 2008 vcpu->arch.complete_userspace_io = completion; 2009 2010 return 1; 2011 } 2012 2013 int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu) 2014 { 2015 u32 ecx = kvm_rcx_read(vcpu); 2016 u64 data; 2017 int r; 2018 2019 r = kvm_get_msr_with_filter(vcpu, ecx, &data); 2020 2021 if (!r) { 2022 trace_kvm_msr_read(ecx, data); 2023 2024 kvm_rax_write(vcpu, data & -1u); 2025 kvm_rdx_write(vcpu, (data >> 32) & -1u); 2026 } else { 2027 /* MSR read failed? See if we should ask user space */ 2028 if (kvm_msr_user_space(vcpu, ecx, KVM_EXIT_X86_RDMSR, 0, 2029 complete_fast_rdmsr, r)) 2030 return 0; 2031 trace_kvm_msr_read_ex(ecx); 2032 } 2033 2034 return static_call(kvm_x86_complete_emulated_msr)(vcpu, r); 2035 } 2036 EXPORT_SYMBOL_GPL(kvm_emulate_rdmsr); 2037 2038 int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu) 2039 { 2040 u32 ecx = kvm_rcx_read(vcpu); 2041 u64 data = kvm_read_edx_eax(vcpu); 2042 int r; 2043 2044 r = kvm_set_msr_with_filter(vcpu, ecx, data); 2045 2046 if (!r) { 2047 trace_kvm_msr_write(ecx, data); 2048 } else { 2049 /* MSR write failed? See if we should ask user space */ 2050 if (kvm_msr_user_space(vcpu, ecx, KVM_EXIT_X86_WRMSR, data, 2051 complete_fast_msr_access, r)) 2052 return 0; 2053 /* Signal all other negative errors to userspace */ 2054 if (r < 0) 2055 return r; 2056 trace_kvm_msr_write_ex(ecx, data); 2057 } 2058 2059 return static_call(kvm_x86_complete_emulated_msr)(vcpu, r); 2060 } 2061 EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr); 2062 2063 int kvm_emulate_as_nop(struct kvm_vcpu *vcpu) 2064 { 2065 return kvm_skip_emulated_instruction(vcpu); 2066 } 2067 EXPORT_SYMBOL_GPL(kvm_emulate_as_nop); 2068 2069 int kvm_emulate_invd(struct kvm_vcpu *vcpu) 2070 { 2071 /* Treat an INVD instruction as a NOP and just skip it. */ 2072 return kvm_emulate_as_nop(vcpu); 2073 } 2074 EXPORT_SYMBOL_GPL(kvm_emulate_invd); 2075 2076 int kvm_handle_invalid_op(struct kvm_vcpu *vcpu) 2077 { 2078 kvm_queue_exception(vcpu, UD_VECTOR); 2079 return 1; 2080 } 2081 EXPORT_SYMBOL_GPL(kvm_handle_invalid_op); 2082 2083 2084 static int kvm_emulate_monitor_mwait(struct kvm_vcpu *vcpu, const char *insn) 2085 { 2086 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS) && 2087 !guest_cpuid_has(vcpu, X86_FEATURE_MWAIT)) 2088 return kvm_handle_invalid_op(vcpu); 2089 2090 pr_warn_once("kvm: %s instruction emulated as NOP!\n", insn); 2091 return kvm_emulate_as_nop(vcpu); 2092 } 2093 int kvm_emulate_mwait(struct kvm_vcpu *vcpu) 2094 { 2095 return kvm_emulate_monitor_mwait(vcpu, "MWAIT"); 2096 } 2097 EXPORT_SYMBOL_GPL(kvm_emulate_mwait); 2098 2099 int kvm_emulate_monitor(struct kvm_vcpu *vcpu) 2100 { 2101 return kvm_emulate_monitor_mwait(vcpu, "MONITOR"); 2102 } 2103 EXPORT_SYMBOL_GPL(kvm_emulate_monitor); 2104 2105 static inline bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu) 2106 { 2107 xfer_to_guest_mode_prepare(); 2108 return vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu) || 2109 xfer_to_guest_mode_work_pending(); 2110 } 2111 2112 /* 2113 * The fast path for frequent and performance sensitive wrmsr emulation, 2114 * i.e. the sending of IPI, sending IPI early in the VM-Exit flow reduces 2115 * the latency of virtual IPI by avoiding the expensive bits of transitioning 2116 * from guest to host, e.g. reacquiring KVM's SRCU lock. In contrast to the 2117 * other cases which must be called after interrupts are enabled on the host. 2118 */ 2119 static int handle_fastpath_set_x2apic_icr_irqoff(struct kvm_vcpu *vcpu, u64 data) 2120 { 2121 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(vcpu->arch.apic)) 2122 return 1; 2123 2124 if (((data & APIC_SHORT_MASK) == APIC_DEST_NOSHORT) && 2125 ((data & APIC_DEST_MASK) == APIC_DEST_PHYSICAL) && 2126 ((data & APIC_MODE_MASK) == APIC_DM_FIXED) && 2127 ((u32)(data >> 32) != X2APIC_BROADCAST)) 2128 return kvm_x2apic_icr_write(vcpu->arch.apic, data); 2129 2130 return 1; 2131 } 2132 2133 static int handle_fastpath_set_tscdeadline(struct kvm_vcpu *vcpu, u64 data) 2134 { 2135 if (!kvm_can_use_hv_timer(vcpu)) 2136 return 1; 2137 2138 kvm_set_lapic_tscdeadline_msr(vcpu, data); 2139 return 0; 2140 } 2141 2142 fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu) 2143 { 2144 u32 msr = kvm_rcx_read(vcpu); 2145 u64 data; 2146 fastpath_t ret = EXIT_FASTPATH_NONE; 2147 2148 switch (msr) { 2149 case APIC_BASE_MSR + (APIC_ICR >> 4): 2150 data = kvm_read_edx_eax(vcpu); 2151 if (!handle_fastpath_set_x2apic_icr_irqoff(vcpu, data)) { 2152 kvm_skip_emulated_instruction(vcpu); 2153 ret = EXIT_FASTPATH_EXIT_HANDLED; 2154 } 2155 break; 2156 case MSR_IA32_TSC_DEADLINE: 2157 data = kvm_read_edx_eax(vcpu); 2158 if (!handle_fastpath_set_tscdeadline(vcpu, data)) { 2159 kvm_skip_emulated_instruction(vcpu); 2160 ret = EXIT_FASTPATH_REENTER_GUEST; 2161 } 2162 break; 2163 default: 2164 break; 2165 } 2166 2167 if (ret != EXIT_FASTPATH_NONE) 2168 trace_kvm_msr_write(msr, data); 2169 2170 return ret; 2171 } 2172 EXPORT_SYMBOL_GPL(handle_fastpath_set_msr_irqoff); 2173 2174 /* 2175 * Adapt set_msr() to msr_io()'s calling convention 2176 */ 2177 static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) 2178 { 2179 return kvm_get_msr_ignored_check(vcpu, index, data, true); 2180 } 2181 2182 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) 2183 { 2184 return kvm_set_msr_ignored_check(vcpu, index, *data, true); 2185 } 2186 2187 #ifdef CONFIG_X86_64 2188 struct pvclock_clock { 2189 int vclock_mode; 2190 u64 cycle_last; 2191 u64 mask; 2192 u32 mult; 2193 u32 shift; 2194 u64 base_cycles; 2195 u64 offset; 2196 }; 2197 2198 struct pvclock_gtod_data { 2199 seqcount_t seq; 2200 2201 struct pvclock_clock clock; /* extract of a clocksource struct */ 2202 struct pvclock_clock raw_clock; /* extract of a clocksource struct */ 2203 2204 ktime_t offs_boot; 2205 u64 wall_time_sec; 2206 }; 2207 2208 static struct pvclock_gtod_data pvclock_gtod_data; 2209 2210 static void update_pvclock_gtod(struct timekeeper *tk) 2211 { 2212 struct pvclock_gtod_data *vdata = &pvclock_gtod_data; 2213 2214 write_seqcount_begin(&vdata->seq); 2215 2216 /* copy pvclock gtod data */ 2217 vdata->clock.vclock_mode = tk->tkr_mono.clock->vdso_clock_mode; 2218 vdata->clock.cycle_last = tk->tkr_mono.cycle_last; 2219 vdata->clock.mask = tk->tkr_mono.mask; 2220 vdata->clock.mult = tk->tkr_mono.mult; 2221 vdata->clock.shift = tk->tkr_mono.shift; 2222 vdata->clock.base_cycles = tk->tkr_mono.xtime_nsec; 2223 vdata->clock.offset = tk->tkr_mono.base; 2224 2225 vdata->raw_clock.vclock_mode = tk->tkr_raw.clock->vdso_clock_mode; 2226 vdata->raw_clock.cycle_last = tk->tkr_raw.cycle_last; 2227 vdata->raw_clock.mask = tk->tkr_raw.mask; 2228 vdata->raw_clock.mult = tk->tkr_raw.mult; 2229 vdata->raw_clock.shift = tk->tkr_raw.shift; 2230 vdata->raw_clock.base_cycles = tk->tkr_raw.xtime_nsec; 2231 vdata->raw_clock.offset = tk->tkr_raw.base; 2232 2233 vdata->wall_time_sec = tk->xtime_sec; 2234 2235 vdata->offs_boot = tk->offs_boot; 2236 2237 write_seqcount_end(&vdata->seq); 2238 } 2239 2240 static s64 get_kvmclock_base_ns(void) 2241 { 2242 /* Count up from boot time, but with the frequency of the raw clock. */ 2243 return ktime_to_ns(ktime_add(ktime_get_raw(), pvclock_gtod_data.offs_boot)); 2244 } 2245 #else 2246 static s64 get_kvmclock_base_ns(void) 2247 { 2248 /* Master clock not used, so we can just use CLOCK_BOOTTIME. */ 2249 return ktime_get_boottime_ns(); 2250 } 2251 #endif 2252 2253 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock, int sec_hi_ofs) 2254 { 2255 int version; 2256 int r; 2257 struct pvclock_wall_clock wc; 2258 u32 wc_sec_hi; 2259 u64 wall_nsec; 2260 2261 if (!wall_clock) 2262 return; 2263 2264 r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version)); 2265 if (r) 2266 return; 2267 2268 if (version & 1) 2269 ++version; /* first time write, random junk */ 2270 2271 ++version; 2272 2273 if (kvm_write_guest(kvm, wall_clock, &version, sizeof(version))) 2274 return; 2275 2276 /* 2277 * The guest calculates current wall clock time by adding 2278 * system time (updated by kvm_guest_time_update below) to the 2279 * wall clock specified here. We do the reverse here. 2280 */ 2281 wall_nsec = ktime_get_real_ns() - get_kvmclock_ns(kvm); 2282 2283 wc.nsec = do_div(wall_nsec, 1000000000); 2284 wc.sec = (u32)wall_nsec; /* overflow in 2106 guest time */ 2285 wc.version = version; 2286 2287 kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc)); 2288 2289 if (sec_hi_ofs) { 2290 wc_sec_hi = wall_nsec >> 32; 2291 kvm_write_guest(kvm, wall_clock + sec_hi_ofs, 2292 &wc_sec_hi, sizeof(wc_sec_hi)); 2293 } 2294 2295 version++; 2296 kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); 2297 } 2298 2299 static void kvm_write_system_time(struct kvm_vcpu *vcpu, gpa_t system_time, 2300 bool old_msr, bool host_initiated) 2301 { 2302 struct kvm_arch *ka = &vcpu->kvm->arch; 2303 2304 if (vcpu->vcpu_id == 0 && !host_initiated) { 2305 if (ka->boot_vcpu_runs_old_kvmclock != old_msr) 2306 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); 2307 2308 ka->boot_vcpu_runs_old_kvmclock = old_msr; 2309 } 2310 2311 vcpu->arch.time = system_time; 2312 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu); 2313 2314 /* we verify if the enable bit is set... */ 2315 if (system_time & 1) { 2316 kvm_gpc_activate(vcpu->kvm, &vcpu->arch.pv_time, vcpu, 2317 KVM_HOST_USES_PFN, system_time & ~1ULL, 2318 sizeof(struct pvclock_vcpu_time_info)); 2319 } else { 2320 kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.pv_time); 2321 } 2322 2323 return; 2324 } 2325 2326 static uint32_t div_frac(uint32_t dividend, uint32_t divisor) 2327 { 2328 do_shl32_div32(dividend, divisor); 2329 return dividend; 2330 } 2331 2332 static void kvm_get_time_scale(uint64_t scaled_hz, uint64_t base_hz, 2333 s8 *pshift, u32 *pmultiplier) 2334 { 2335 uint64_t scaled64; 2336 int32_t shift = 0; 2337 uint64_t tps64; 2338 uint32_t tps32; 2339 2340 tps64 = base_hz; 2341 scaled64 = scaled_hz; 2342 while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) { 2343 tps64 >>= 1; 2344 shift--; 2345 } 2346 2347 tps32 = (uint32_t)tps64; 2348 while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) { 2349 if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000) 2350 scaled64 >>= 1; 2351 else 2352 tps32 <<= 1; 2353 shift++; 2354 } 2355 2356 *pshift = shift; 2357 *pmultiplier = div_frac(scaled64, tps32); 2358 } 2359 2360 #ifdef CONFIG_X86_64 2361 static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0); 2362 #endif 2363 2364 static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz); 2365 static unsigned long max_tsc_khz; 2366 2367 static u32 adjust_tsc_khz(u32 khz, s32 ppm) 2368 { 2369 u64 v = (u64)khz * (1000000 + ppm); 2370 do_div(v, 1000000); 2371 return v; 2372 } 2373 2374 static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier); 2375 2376 static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale) 2377 { 2378 u64 ratio; 2379 2380 /* Guest TSC same frequency as host TSC? */ 2381 if (!scale) { 2382 kvm_vcpu_write_tsc_multiplier(vcpu, kvm_caps.default_tsc_scaling_ratio); 2383 return 0; 2384 } 2385 2386 /* TSC scaling supported? */ 2387 if (!kvm_caps.has_tsc_control) { 2388 if (user_tsc_khz > tsc_khz) { 2389 vcpu->arch.tsc_catchup = 1; 2390 vcpu->arch.tsc_always_catchup = 1; 2391 return 0; 2392 } else { 2393 pr_warn_ratelimited("user requested TSC rate below hardware speed\n"); 2394 return -1; 2395 } 2396 } 2397 2398 /* TSC scaling required - calculate ratio */ 2399 ratio = mul_u64_u32_div(1ULL << kvm_caps.tsc_scaling_ratio_frac_bits, 2400 user_tsc_khz, tsc_khz); 2401 2402 if (ratio == 0 || ratio >= kvm_caps.max_tsc_scaling_ratio) { 2403 pr_warn_ratelimited("Invalid TSC scaling ratio - virtual-tsc-khz=%u\n", 2404 user_tsc_khz); 2405 return -1; 2406 } 2407 2408 kvm_vcpu_write_tsc_multiplier(vcpu, ratio); 2409 return 0; 2410 } 2411 2412 static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz) 2413 { 2414 u32 thresh_lo, thresh_hi; 2415 int use_scaling = 0; 2416 2417 /* tsc_khz can be zero if TSC calibration fails */ 2418 if (user_tsc_khz == 0) { 2419 /* set tsc_scaling_ratio to a safe value */ 2420 kvm_vcpu_write_tsc_multiplier(vcpu, kvm_caps.default_tsc_scaling_ratio); 2421 return -1; 2422 } 2423 2424 /* Compute a scale to convert nanoseconds in TSC cycles */ 2425 kvm_get_time_scale(user_tsc_khz * 1000LL, NSEC_PER_SEC, 2426 &vcpu->arch.virtual_tsc_shift, 2427 &vcpu->arch.virtual_tsc_mult); 2428 vcpu->arch.virtual_tsc_khz = user_tsc_khz; 2429 2430 /* 2431 * Compute the variation in TSC rate which is acceptable 2432 * within the range of tolerance and decide if the 2433 * rate being applied is within that bounds of the hardware 2434 * rate. If so, no scaling or compensation need be done. 2435 */ 2436 thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm); 2437 thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm); 2438 if (user_tsc_khz < thresh_lo || user_tsc_khz > thresh_hi) { 2439 pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", user_tsc_khz, thresh_lo, thresh_hi); 2440 use_scaling = 1; 2441 } 2442 return set_tsc_khz(vcpu, user_tsc_khz, use_scaling); 2443 } 2444 2445 static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns) 2446 { 2447 u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec, 2448 vcpu->arch.virtual_tsc_mult, 2449 vcpu->arch.virtual_tsc_shift); 2450 tsc += vcpu->arch.this_tsc_write; 2451 return tsc; 2452 } 2453 2454 #ifdef CONFIG_X86_64 2455 static inline int gtod_is_based_on_tsc(int mode) 2456 { 2457 return mode == VDSO_CLOCKMODE_TSC || mode == VDSO_CLOCKMODE_HVCLOCK; 2458 } 2459 #endif 2460 2461 static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu) 2462 { 2463 #ifdef CONFIG_X86_64 2464 bool vcpus_matched; 2465 struct kvm_arch *ka = &vcpu->kvm->arch; 2466 struct pvclock_gtod_data *gtod = &pvclock_gtod_data; 2467 2468 vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == 2469 atomic_read(&vcpu->kvm->online_vcpus)); 2470 2471 /* 2472 * Once the masterclock is enabled, always perform request in 2473 * order to update it. 2474 * 2475 * In order to enable masterclock, the host clocksource must be TSC 2476 * and the vcpus need to have matched TSCs. When that happens, 2477 * perform request to enable masterclock. 2478 */ 2479 if (ka->use_master_clock || 2480 (gtod_is_based_on_tsc(gtod->clock.vclock_mode) && vcpus_matched)) 2481 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); 2482 2483 trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc, 2484 atomic_read(&vcpu->kvm->online_vcpus), 2485 ka->use_master_clock, gtod->clock.vclock_mode); 2486 #endif 2487 } 2488 2489 /* 2490 * Multiply tsc by a fixed point number represented by ratio. 2491 * 2492 * The most significant 64-N bits (mult) of ratio represent the 2493 * integral part of the fixed point number; the remaining N bits 2494 * (frac) represent the fractional part, ie. ratio represents a fixed 2495 * point number (mult + frac * 2^(-N)). 2496 * 2497 * N equals to kvm_caps.tsc_scaling_ratio_frac_bits. 2498 */ 2499 static inline u64 __scale_tsc(u64 ratio, u64 tsc) 2500 { 2501 return mul_u64_u64_shr(tsc, ratio, kvm_caps.tsc_scaling_ratio_frac_bits); 2502 } 2503 2504 u64 kvm_scale_tsc(u64 tsc, u64 ratio) 2505 { 2506 u64 _tsc = tsc; 2507 2508 if (ratio != kvm_caps.default_tsc_scaling_ratio) 2509 _tsc = __scale_tsc(ratio, tsc); 2510 2511 return _tsc; 2512 } 2513 EXPORT_SYMBOL_GPL(kvm_scale_tsc); 2514 2515 static u64 kvm_compute_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) 2516 { 2517 u64 tsc; 2518 2519 tsc = kvm_scale_tsc(rdtsc(), vcpu->arch.l1_tsc_scaling_ratio); 2520 2521 return target_tsc - tsc; 2522 } 2523 2524 u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc) 2525 { 2526 return vcpu->arch.l1_tsc_offset + 2527 kvm_scale_tsc(host_tsc, vcpu->arch.l1_tsc_scaling_ratio); 2528 } 2529 EXPORT_SYMBOL_GPL(kvm_read_l1_tsc); 2530 2531 u64 kvm_calc_nested_tsc_offset(u64 l1_offset, u64 l2_offset, u64 l2_multiplier) 2532 { 2533 u64 nested_offset; 2534 2535 if (l2_multiplier == kvm_caps.default_tsc_scaling_ratio) 2536 nested_offset = l1_offset; 2537 else 2538 nested_offset = mul_s64_u64_shr((s64) l1_offset, l2_multiplier, 2539 kvm_caps.tsc_scaling_ratio_frac_bits); 2540 2541 nested_offset += l2_offset; 2542 return nested_offset; 2543 } 2544 EXPORT_SYMBOL_GPL(kvm_calc_nested_tsc_offset); 2545 2546 u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier) 2547 { 2548 if (l2_multiplier != kvm_caps.default_tsc_scaling_ratio) 2549 return mul_u64_u64_shr(l1_multiplier, l2_multiplier, 2550 kvm_caps.tsc_scaling_ratio_frac_bits); 2551 2552 return l1_multiplier; 2553 } 2554 EXPORT_SYMBOL_GPL(kvm_calc_nested_tsc_multiplier); 2555 2556 static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 l1_offset) 2557 { 2558 trace_kvm_write_tsc_offset(vcpu->vcpu_id, 2559 vcpu->arch.l1_tsc_offset, 2560 l1_offset); 2561 2562 vcpu->arch.l1_tsc_offset = l1_offset; 2563 2564 /* 2565 * If we are here because L1 chose not to trap WRMSR to TSC then 2566 * according to the spec this should set L1's TSC (as opposed to 2567 * setting L1's offset for L2). 2568 */ 2569 if (is_guest_mode(vcpu)) 2570 vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset( 2571 l1_offset, 2572 static_call(kvm_x86_get_l2_tsc_offset)(vcpu), 2573 static_call(kvm_x86_get_l2_tsc_multiplier)(vcpu)); 2574 else 2575 vcpu->arch.tsc_offset = l1_offset; 2576 2577 static_call(kvm_x86_write_tsc_offset)(vcpu, vcpu->arch.tsc_offset); 2578 } 2579 2580 static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier) 2581 { 2582 vcpu->arch.l1_tsc_scaling_ratio = l1_multiplier; 2583 2584 /* Userspace is changing the multiplier while L2 is active */ 2585 if (is_guest_mode(vcpu)) 2586 vcpu->arch.tsc_scaling_ratio = kvm_calc_nested_tsc_multiplier( 2587 l1_multiplier, 2588 static_call(kvm_x86_get_l2_tsc_multiplier)(vcpu)); 2589 else 2590 vcpu->arch.tsc_scaling_ratio = l1_multiplier; 2591 2592 if (kvm_caps.has_tsc_control) 2593 static_call(kvm_x86_write_tsc_multiplier)( 2594 vcpu, vcpu->arch.tsc_scaling_ratio); 2595 } 2596 2597 static inline bool kvm_check_tsc_unstable(void) 2598 { 2599 #ifdef CONFIG_X86_64 2600 /* 2601 * TSC is marked unstable when we're running on Hyper-V, 2602 * 'TSC page' clocksource is good. 2603 */ 2604 if (pvclock_gtod_data.clock.vclock_mode == VDSO_CLOCKMODE_HVCLOCK) 2605 return false; 2606 #endif 2607 return check_tsc_unstable(); 2608 } 2609 2610 /* 2611 * Infers attempts to synchronize the guest's tsc from host writes. Sets the 2612 * offset for the vcpu and tracks the TSC matching generation that the vcpu 2613 * participates in. 2614 */ 2615 static void __kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 offset, u64 tsc, 2616 u64 ns, bool matched) 2617 { 2618 struct kvm *kvm = vcpu->kvm; 2619 2620 lockdep_assert_held(&kvm->arch.tsc_write_lock); 2621 2622 /* 2623 * We also track th most recent recorded KHZ, write and time to 2624 * allow the matching interval to be extended at each write. 2625 */ 2626 kvm->arch.last_tsc_nsec = ns; 2627 kvm->arch.last_tsc_write = tsc; 2628 kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; 2629 kvm->arch.last_tsc_offset = offset; 2630 2631 vcpu->arch.last_guest_tsc = tsc; 2632 2633 kvm_vcpu_write_tsc_offset(vcpu, offset); 2634 2635 if (!matched) { 2636 /* 2637 * We split periods of matched TSC writes into generations. 2638 * For each generation, we track the original measured 2639 * nanosecond time, offset, and write, so if TSCs are in 2640 * sync, we can match exact offset, and if not, we can match 2641 * exact software computation in compute_guest_tsc() 2642 * 2643 * These values are tracked in kvm->arch.cur_xxx variables. 2644 */ 2645 kvm->arch.cur_tsc_generation++; 2646 kvm->arch.cur_tsc_nsec = ns; 2647 kvm->arch.cur_tsc_write = tsc; 2648 kvm->arch.cur_tsc_offset = offset; 2649 kvm->arch.nr_vcpus_matched_tsc = 0; 2650 } else if (vcpu->arch.this_tsc_generation != kvm->arch.cur_tsc_generation) { 2651 kvm->arch.nr_vcpus_matched_tsc++; 2652 } 2653 2654 /* Keep track of which generation this VCPU has synchronized to */ 2655 vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation; 2656 vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; 2657 vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; 2658 2659 kvm_track_tsc_matching(vcpu); 2660 } 2661 2662 static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data) 2663 { 2664 struct kvm *kvm = vcpu->kvm; 2665 u64 offset, ns, elapsed; 2666 unsigned long flags; 2667 bool matched = false; 2668 bool synchronizing = false; 2669 2670 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); 2671 offset = kvm_compute_l1_tsc_offset(vcpu, data); 2672 ns = get_kvmclock_base_ns(); 2673 elapsed = ns - kvm->arch.last_tsc_nsec; 2674 2675 if (vcpu->arch.virtual_tsc_khz) { 2676 if (data == 0) { 2677 /* 2678 * detection of vcpu initialization -- need to sync 2679 * with other vCPUs. This particularly helps to keep 2680 * kvm_clock stable after CPU hotplug 2681 */ 2682 synchronizing = true; 2683 } else { 2684 u64 tsc_exp = kvm->arch.last_tsc_write + 2685 nsec_to_cycles(vcpu, elapsed); 2686 u64 tsc_hz = vcpu->arch.virtual_tsc_khz * 1000LL; 2687 /* 2688 * Special case: TSC write with a small delta (1 second) 2689 * of virtual cycle time against real time is 2690 * interpreted as an attempt to synchronize the CPU. 2691 */ 2692 synchronizing = data < tsc_exp + tsc_hz && 2693 data + tsc_hz > tsc_exp; 2694 } 2695 } 2696 2697 /* 2698 * For a reliable TSC, we can match TSC offsets, and for an unstable 2699 * TSC, we add elapsed time in this computation. We could let the 2700 * compensation code attempt to catch up if we fall behind, but 2701 * it's better to try to match offsets from the beginning. 2702 */ 2703 if (synchronizing && 2704 vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) { 2705 if (!kvm_check_tsc_unstable()) { 2706 offset = kvm->arch.cur_tsc_offset; 2707 } else { 2708 u64 delta = nsec_to_cycles(vcpu, elapsed); 2709 data += delta; 2710 offset = kvm_compute_l1_tsc_offset(vcpu, data); 2711 } 2712 matched = true; 2713 } 2714 2715 __kvm_synchronize_tsc(vcpu, offset, data, ns, matched); 2716 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); 2717 } 2718 2719 static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, 2720 s64 adjustment) 2721 { 2722 u64 tsc_offset = vcpu->arch.l1_tsc_offset; 2723 kvm_vcpu_write_tsc_offset(vcpu, tsc_offset + adjustment); 2724 } 2725 2726 static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment) 2727 { 2728 if (vcpu->arch.l1_tsc_scaling_ratio != kvm_caps.default_tsc_scaling_ratio) 2729 WARN_ON(adjustment < 0); 2730 adjustment = kvm_scale_tsc((u64) adjustment, 2731 vcpu->arch.l1_tsc_scaling_ratio); 2732 adjust_tsc_offset_guest(vcpu, adjustment); 2733 } 2734 2735 #ifdef CONFIG_X86_64 2736 2737 static u64 read_tsc(void) 2738 { 2739 u64 ret = (u64)rdtsc_ordered(); 2740 u64 last = pvclock_gtod_data.clock.cycle_last; 2741 2742 if (likely(ret >= last)) 2743 return ret; 2744 2745 /* 2746 * GCC likes to generate cmov here, but this branch is extremely 2747 * predictable (it's just a function of time and the likely is 2748 * very likely) and there's a data dependence, so force GCC 2749 * to generate a branch instead. I don't barrier() because 2750 * we don't actually need a barrier, and if this function 2751 * ever gets inlined it will generate worse code. 2752 */ 2753 asm volatile (""); 2754 return last; 2755 } 2756 2757 static inline u64 vgettsc(struct pvclock_clock *clock, u64 *tsc_timestamp, 2758 int *mode) 2759 { 2760 long v; 2761 u64 tsc_pg_val; 2762 2763 switch (clock->vclock_mode) { 2764 case VDSO_CLOCKMODE_HVCLOCK: 2765 tsc_pg_val = hv_read_tsc_page_tsc(hv_get_tsc_page(), 2766 tsc_timestamp); 2767 if (tsc_pg_val != U64_MAX) { 2768 /* TSC page valid */ 2769 *mode = VDSO_CLOCKMODE_HVCLOCK; 2770 v = (tsc_pg_val - clock->cycle_last) & 2771 clock->mask; 2772 } else { 2773 /* TSC page invalid */ 2774 *mode = VDSO_CLOCKMODE_NONE; 2775 } 2776 break; 2777 case VDSO_CLOCKMODE_TSC: 2778 *mode = VDSO_CLOCKMODE_TSC; 2779 *tsc_timestamp = read_tsc(); 2780 v = (*tsc_timestamp - clock->cycle_last) & 2781 clock->mask; 2782 break; 2783 default: 2784 *mode = VDSO_CLOCKMODE_NONE; 2785 } 2786 2787 if (*mode == VDSO_CLOCKMODE_NONE) 2788 *tsc_timestamp = v = 0; 2789 2790 return v * clock->mult; 2791 } 2792 2793 static int do_monotonic_raw(s64 *t, u64 *tsc_timestamp) 2794 { 2795 struct pvclock_gtod_data *gtod = &pvclock_gtod_data; 2796 unsigned long seq; 2797 int mode; 2798 u64 ns; 2799 2800 do { 2801 seq = read_seqcount_begin(>od->seq); 2802 ns = gtod->raw_clock.base_cycles; 2803 ns += vgettsc(>od->raw_clock, tsc_timestamp, &mode); 2804 ns >>= gtod->raw_clock.shift; 2805 ns += ktime_to_ns(ktime_add(gtod->raw_clock.offset, gtod->offs_boot)); 2806 } while (unlikely(read_seqcount_retry(>od->seq, seq))); 2807 *t = ns; 2808 2809 return mode; 2810 } 2811 2812 static int do_realtime(struct timespec64 *ts, u64 *tsc_timestamp) 2813 { 2814 struct pvclock_gtod_data *gtod = &pvclock_gtod_data; 2815 unsigned long seq; 2816 int mode; 2817 u64 ns; 2818 2819 do { 2820 seq = read_seqcount_begin(>od->seq); 2821 ts->tv_sec = gtod->wall_time_sec; 2822 ns = gtod->clock.base_cycles; 2823 ns += vgettsc(>od->clock, tsc_timestamp, &mode); 2824 ns >>= gtod->clock.shift; 2825 } while (unlikely(read_seqcount_retry(>od->seq, seq))); 2826 2827 ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); 2828 ts->tv_nsec = ns; 2829 2830 return mode; 2831 } 2832 2833 /* returns true if host is using TSC based clocksource */ 2834 static bool kvm_get_time_and_clockread(s64 *kernel_ns, u64 *tsc_timestamp) 2835 { 2836 /* checked again under seqlock below */ 2837 if (!gtod_is_based_on_tsc(pvclock_gtod_data.clock.vclock_mode)) 2838 return false; 2839 2840 return gtod_is_based_on_tsc(do_monotonic_raw(kernel_ns, 2841 tsc_timestamp)); 2842 } 2843 2844 /* returns true if host is using TSC based clocksource */ 2845 static bool kvm_get_walltime_and_clockread(struct timespec64 *ts, 2846 u64 *tsc_timestamp) 2847 { 2848 /* checked again under seqlock below */ 2849 if (!gtod_is_based_on_tsc(pvclock_gtod_data.clock.vclock_mode)) 2850 return false; 2851 2852 return gtod_is_based_on_tsc(do_realtime(ts, tsc_timestamp)); 2853 } 2854 #endif 2855 2856 /* 2857 * 2858 * Assuming a stable TSC across physical CPUS, and a stable TSC 2859 * across virtual CPUs, the following condition is possible. 2860 * Each numbered line represents an event visible to both 2861 * CPUs at the next numbered event. 2862 * 2863 * "timespecX" represents host monotonic time. "tscX" represents 2864 * RDTSC value. 2865 * 2866 * VCPU0 on CPU0 | VCPU1 on CPU1 2867 * 2868 * 1. read timespec0,tsc0 2869 * 2. | timespec1 = timespec0 + N 2870 * | tsc1 = tsc0 + M 2871 * 3. transition to guest | transition to guest 2872 * 4. ret0 = timespec0 + (rdtsc - tsc0) | 2873 * 5. | ret1 = timespec1 + (rdtsc - tsc1) 2874 * | ret1 = timespec0 + N + (rdtsc - (tsc0 + M)) 2875 * 2876 * Since ret0 update is visible to VCPU1 at time 5, to obey monotonicity: 2877 * 2878 * - ret0 < ret1 2879 * - timespec0 + (rdtsc - tsc0) < timespec0 + N + (rdtsc - (tsc0 + M)) 2880 * ... 2881 * - 0 < N - M => M < N 2882 * 2883 * That is, when timespec0 != timespec1, M < N. Unfortunately that is not 2884 * always the case (the difference between two distinct xtime instances 2885 * might be smaller then the difference between corresponding TSC reads, 2886 * when updating guest vcpus pvclock areas). 2887 * 2888 * To avoid that problem, do not allow visibility of distinct 2889 * system_timestamp/tsc_timestamp values simultaneously: use a master 2890 * copy of host monotonic time values. Update that master copy 2891 * in lockstep. 2892 * 2893 * Rely on synchronization of host TSCs and guest TSCs for monotonicity. 2894 * 2895 */ 2896 2897 static void pvclock_update_vm_gtod_copy(struct kvm *kvm) 2898 { 2899 #ifdef CONFIG_X86_64 2900 struct kvm_arch *ka = &kvm->arch; 2901 int vclock_mode; 2902 bool host_tsc_clocksource, vcpus_matched; 2903 2904 lockdep_assert_held(&kvm->arch.tsc_write_lock); 2905 vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == 2906 atomic_read(&kvm->online_vcpus)); 2907 2908 /* 2909 * If the host uses TSC clock, then passthrough TSC as stable 2910 * to the guest. 2911 */ 2912 host_tsc_clocksource = kvm_get_time_and_clockread( 2913 &ka->master_kernel_ns, 2914 &ka->master_cycle_now); 2915 2916 ka->use_master_clock = host_tsc_clocksource && vcpus_matched 2917 && !ka->backwards_tsc_observed 2918 && !ka->boot_vcpu_runs_old_kvmclock; 2919 2920 if (ka->use_master_clock) 2921 atomic_set(&kvm_guest_has_master_clock, 1); 2922 2923 vclock_mode = pvclock_gtod_data.clock.vclock_mode; 2924 trace_kvm_update_master_clock(ka->use_master_clock, vclock_mode, 2925 vcpus_matched); 2926 #endif 2927 } 2928 2929 static void kvm_make_mclock_inprogress_request(struct kvm *kvm) 2930 { 2931 kvm_make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS); 2932 } 2933 2934 static void __kvm_start_pvclock_update(struct kvm *kvm) 2935 { 2936 raw_spin_lock_irq(&kvm->arch.tsc_write_lock); 2937 write_seqcount_begin(&kvm->arch.pvclock_sc); 2938 } 2939 2940 static void kvm_start_pvclock_update(struct kvm *kvm) 2941 { 2942 kvm_make_mclock_inprogress_request(kvm); 2943 2944 /* no guest entries from this point */ 2945 __kvm_start_pvclock_update(kvm); 2946 } 2947 2948 static void kvm_end_pvclock_update(struct kvm *kvm) 2949 { 2950 struct kvm_arch *ka = &kvm->arch; 2951 struct kvm_vcpu *vcpu; 2952 unsigned long i; 2953 2954 write_seqcount_end(&ka->pvclock_sc); 2955 raw_spin_unlock_irq(&ka->tsc_write_lock); 2956 kvm_for_each_vcpu(i, vcpu, kvm) 2957 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 2958 2959 /* guest entries allowed */ 2960 kvm_for_each_vcpu(i, vcpu, kvm) 2961 kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu); 2962 } 2963 2964 static void kvm_update_masterclock(struct kvm *kvm) 2965 { 2966 kvm_hv_request_tsc_page_update(kvm); 2967 kvm_start_pvclock_update(kvm); 2968 pvclock_update_vm_gtod_copy(kvm); 2969 kvm_end_pvclock_update(kvm); 2970 } 2971 2972 /* Called within read_seqcount_begin/retry for kvm->pvclock_sc. */ 2973 static void __get_kvmclock(struct kvm *kvm, struct kvm_clock_data *data) 2974 { 2975 struct kvm_arch *ka = &kvm->arch; 2976 struct pvclock_vcpu_time_info hv_clock; 2977 2978 /* both __this_cpu_read() and rdtsc() should be on the same cpu */ 2979 get_cpu(); 2980 2981 data->flags = 0; 2982 if (ka->use_master_clock && __this_cpu_read(cpu_tsc_khz)) { 2983 #ifdef CONFIG_X86_64 2984 struct timespec64 ts; 2985 2986 if (kvm_get_walltime_and_clockread(&ts, &data->host_tsc)) { 2987 data->realtime = ts.tv_nsec + NSEC_PER_SEC * ts.tv_sec; 2988 data->flags |= KVM_CLOCK_REALTIME | KVM_CLOCK_HOST_TSC; 2989 } else 2990 #endif 2991 data->host_tsc = rdtsc(); 2992 2993 data->flags |= KVM_CLOCK_TSC_STABLE; 2994 hv_clock.tsc_timestamp = ka->master_cycle_now; 2995 hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset; 2996 kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL, 2997 &hv_clock.tsc_shift, 2998 &hv_clock.tsc_to_system_mul); 2999 data->clock = __pvclock_read_cycles(&hv_clock, data->host_tsc); 3000 } else { 3001 data->clock = get_kvmclock_base_ns() + ka->kvmclock_offset; 3002 } 3003 3004 put_cpu(); 3005 } 3006 3007 static void get_kvmclock(struct kvm *kvm, struct kvm_clock_data *data) 3008 { 3009 struct kvm_arch *ka = &kvm->arch; 3010 unsigned seq; 3011 3012 do { 3013 seq = read_seqcount_begin(&ka->pvclock_sc); 3014 __get_kvmclock(kvm, data); 3015 } while (read_seqcount_retry(&ka->pvclock_sc, seq)); 3016 } 3017 3018 u64 get_kvmclock_ns(struct kvm *kvm) 3019 { 3020 struct kvm_clock_data data; 3021 3022 get_kvmclock(kvm, &data); 3023 return data.clock; 3024 } 3025 3026 static void kvm_setup_guest_pvclock(struct kvm_vcpu *v, 3027 struct gfn_to_pfn_cache *gpc, 3028 unsigned int offset) 3029 { 3030 struct kvm_vcpu_arch *vcpu = &v->arch; 3031 struct pvclock_vcpu_time_info *guest_hv_clock; 3032 unsigned long flags; 3033 3034 read_lock_irqsave(&gpc->lock, flags); 3035 while (!kvm_gfn_to_pfn_cache_check(v->kvm, gpc, gpc->gpa, 3036 offset + sizeof(*guest_hv_clock))) { 3037 read_unlock_irqrestore(&gpc->lock, flags); 3038 3039 if (kvm_gfn_to_pfn_cache_refresh(v->kvm, gpc, gpc->gpa, 3040 offset + sizeof(*guest_hv_clock))) 3041 return; 3042 3043 read_lock_irqsave(&gpc->lock, flags); 3044 } 3045 3046 guest_hv_clock = (void *)(gpc->khva + offset); 3047 3048 /* 3049 * This VCPU is paused, but it's legal for a guest to read another 3050 * VCPU's kvmclock, so we really have to follow the specification where 3051 * it says that version is odd if data is being modified, and even after 3052 * it is consistent. 3053 */ 3054 3055 guest_hv_clock->version = vcpu->hv_clock.version = (guest_hv_clock->version + 1) | 1; 3056 smp_wmb(); 3057 3058 /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */ 3059 vcpu->hv_clock.flags |= (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED); 3060 3061 if (vcpu->pvclock_set_guest_stopped_request) { 3062 vcpu->hv_clock.flags |= PVCLOCK_GUEST_STOPPED; 3063 vcpu->pvclock_set_guest_stopped_request = false; 3064 } 3065 3066 memcpy(guest_hv_clock, &vcpu->hv_clock, sizeof(*guest_hv_clock)); 3067 smp_wmb(); 3068 3069 guest_hv_clock->version = ++vcpu->hv_clock.version; 3070 3071 mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT); 3072 read_unlock_irqrestore(&gpc->lock, flags); 3073 3074 trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock); 3075 } 3076 3077 static int kvm_guest_time_update(struct kvm_vcpu *v) 3078 { 3079 unsigned long flags, tgt_tsc_khz; 3080 unsigned seq; 3081 struct kvm_vcpu_arch *vcpu = &v->arch; 3082 struct kvm_arch *ka = &v->kvm->arch; 3083 s64 kernel_ns; 3084 u64 tsc_timestamp, host_tsc; 3085 u8 pvclock_flags; 3086 bool use_master_clock; 3087 3088 kernel_ns = 0; 3089 host_tsc = 0; 3090 3091 /* 3092 * If the host uses TSC clock, then passthrough TSC as stable 3093 * to the guest. 3094 */ 3095 do { 3096 seq = read_seqcount_begin(&ka->pvclock_sc); 3097 use_master_clock = ka->use_master_clock; 3098 if (use_master_clock) { 3099 host_tsc = ka->master_cycle_now; 3100 kernel_ns = ka->master_kernel_ns; 3101 } 3102 } while (read_seqcount_retry(&ka->pvclock_sc, seq)); 3103 3104 /* Keep irq disabled to prevent changes to the clock */ 3105 local_irq_save(flags); 3106 tgt_tsc_khz = __this_cpu_read(cpu_tsc_khz); 3107 if (unlikely(tgt_tsc_khz == 0)) { 3108 local_irq_restore(flags); 3109 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); 3110 return 1; 3111 } 3112 if (!use_master_clock) { 3113 host_tsc = rdtsc(); 3114 kernel_ns = get_kvmclock_base_ns(); 3115 } 3116 3117 tsc_timestamp = kvm_read_l1_tsc(v, host_tsc); 3118 3119 /* 3120 * We may have to catch up the TSC to match elapsed wall clock 3121 * time for two reasons, even if kvmclock is used. 3122 * 1) CPU could have been running below the maximum TSC rate 3123 * 2) Broken TSC compensation resets the base at each VCPU 3124 * entry to avoid unknown leaps of TSC even when running 3125 * again on the same CPU. This may cause apparent elapsed 3126 * time to disappear, and the guest to stand still or run 3127 * very slowly. 3128 */ 3129 if (vcpu->tsc_catchup) { 3130 u64 tsc = compute_guest_tsc(v, kernel_ns); 3131 if (tsc > tsc_timestamp) { 3132 adjust_tsc_offset_guest(v, tsc - tsc_timestamp); 3133 tsc_timestamp = tsc; 3134 } 3135 } 3136 3137 local_irq_restore(flags); 3138 3139 /* With all the info we got, fill in the values */ 3140 3141 if (kvm_caps.has_tsc_control) 3142 tgt_tsc_khz = kvm_scale_tsc(tgt_tsc_khz, 3143 v->arch.l1_tsc_scaling_ratio); 3144 3145 if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) { 3146 kvm_get_time_scale(NSEC_PER_SEC, tgt_tsc_khz * 1000LL, 3147 &vcpu->hv_clock.tsc_shift, 3148 &vcpu->hv_clock.tsc_to_system_mul); 3149 vcpu->hw_tsc_khz = tgt_tsc_khz; 3150 } 3151 3152 vcpu->hv_clock.tsc_timestamp = tsc_timestamp; 3153 vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset; 3154 vcpu->last_guest_tsc = tsc_timestamp; 3155 3156 /* If the host uses TSC clocksource, then it is stable */ 3157 pvclock_flags = 0; 3158 if (use_master_clock) 3159 pvclock_flags |= PVCLOCK_TSC_STABLE_BIT; 3160 3161 vcpu->hv_clock.flags = pvclock_flags; 3162 3163 if (vcpu->pv_time.active) 3164 kvm_setup_guest_pvclock(v, &vcpu->pv_time, 0); 3165 if (vcpu->xen.vcpu_info_cache.active) 3166 kvm_setup_guest_pvclock(v, &vcpu->xen.vcpu_info_cache, 3167 offsetof(struct compat_vcpu_info, time)); 3168 if (vcpu->xen.vcpu_time_info_cache.active) 3169 kvm_setup_guest_pvclock(v, &vcpu->xen.vcpu_time_info_cache, 0); 3170 kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock); 3171 return 0; 3172 } 3173 3174 /* 3175 * kvmclock updates which are isolated to a given vcpu, such as 3176 * vcpu->cpu migration, should not allow system_timestamp from 3177 * the rest of the vcpus to remain static. Otherwise ntp frequency 3178 * correction applies to one vcpu's system_timestamp but not 3179 * the others. 3180 * 3181 * So in those cases, request a kvmclock update for all vcpus. 3182 * We need to rate-limit these requests though, as they can 3183 * considerably slow guests that have a large number of vcpus. 3184 * The time for a remote vcpu to update its kvmclock is bound 3185 * by the delay we use to rate-limit the updates. 3186 */ 3187 3188 #define KVMCLOCK_UPDATE_DELAY msecs_to_jiffies(100) 3189 3190 static void kvmclock_update_fn(struct work_struct *work) 3191 { 3192 unsigned long i; 3193 struct delayed_work *dwork = to_delayed_work(work); 3194 struct kvm_arch *ka = container_of(dwork, struct kvm_arch, 3195 kvmclock_update_work); 3196 struct kvm *kvm = container_of(ka, struct kvm, arch); 3197 struct kvm_vcpu *vcpu; 3198 3199 kvm_for_each_vcpu(i, vcpu, kvm) { 3200 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 3201 kvm_vcpu_kick(vcpu); 3202 } 3203 } 3204 3205 static void kvm_gen_kvmclock_update(struct kvm_vcpu *v) 3206 { 3207 struct kvm *kvm = v->kvm; 3208 3209 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); 3210 schedule_delayed_work(&kvm->arch.kvmclock_update_work, 3211 KVMCLOCK_UPDATE_DELAY); 3212 } 3213 3214 #define KVMCLOCK_SYNC_PERIOD (300 * HZ) 3215 3216 static void kvmclock_sync_fn(struct work_struct *work) 3217 { 3218 struct delayed_work *dwork = to_delayed_work(work); 3219 struct kvm_arch *ka = container_of(dwork, struct kvm_arch, 3220 kvmclock_sync_work); 3221 struct kvm *kvm = container_of(ka, struct kvm, arch); 3222 3223 if (!kvmclock_periodic_sync) 3224 return; 3225 3226 schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0); 3227 schedule_delayed_work(&kvm->arch.kvmclock_sync_work, 3228 KVMCLOCK_SYNC_PERIOD); 3229 } 3230 3231 /* These helpers are safe iff @msr is known to be an MCx bank MSR. */ 3232 static bool is_mci_control_msr(u32 msr) 3233 { 3234 return (msr & 3) == 0; 3235 } 3236 static bool is_mci_status_msr(u32 msr) 3237 { 3238 return (msr & 3) == 1; 3239 } 3240 3241 /* 3242 * On AMD, HWCR[McStatusWrEn] controls whether setting MCi_STATUS results in #GP. 3243 */ 3244 static bool can_set_mci_status(struct kvm_vcpu *vcpu) 3245 { 3246 /* McStatusWrEn enabled? */ 3247 if (guest_cpuid_is_amd_or_hygon(vcpu)) 3248 return !!(vcpu->arch.msr_hwcr & BIT_ULL(18)); 3249 3250 return false; 3251 } 3252 3253 static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 3254 { 3255 u64 mcg_cap = vcpu->arch.mcg_cap; 3256 unsigned bank_num = mcg_cap & 0xff; 3257 u32 msr = msr_info->index; 3258 u64 data = msr_info->data; 3259 u32 offset, last_msr; 3260 3261 switch (msr) { 3262 case MSR_IA32_MCG_STATUS: 3263 vcpu->arch.mcg_status = data; 3264 break; 3265 case MSR_IA32_MCG_CTL: 3266 if (!(mcg_cap & MCG_CTL_P) && 3267 (data || !msr_info->host_initiated)) 3268 return 1; 3269 if (data != 0 && data != ~(u64)0) 3270 return 1; 3271 vcpu->arch.mcg_ctl = data; 3272 break; 3273 case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1: 3274 last_msr = MSR_IA32_MCx_CTL2(bank_num) - 1; 3275 if (msr > last_msr) 3276 return 1; 3277 3278 if (!(mcg_cap & MCG_CMCI_P) && (data || !msr_info->host_initiated)) 3279 return 1; 3280 /* An attempt to write a 1 to a reserved bit raises #GP */ 3281 if (data & ~(MCI_CTL2_CMCI_EN | MCI_CTL2_CMCI_THRESHOLD_MASK)) 3282 return 1; 3283 offset = array_index_nospec(msr - MSR_IA32_MC0_CTL2, 3284 last_msr + 1 - MSR_IA32_MC0_CTL2); 3285 vcpu->arch.mci_ctl2_banks[offset] = data; 3286 break; 3287 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: 3288 last_msr = MSR_IA32_MCx_CTL(bank_num) - 1; 3289 if (msr > last_msr) 3290 return 1; 3291 3292 /* 3293 * Only 0 or all 1s can be written to IA32_MCi_CTL, all other 3294 * values are architecturally undefined. But, some Linux 3295 * kernels clear bit 10 in bank 4 to workaround a BIOS/GART TLB 3296 * issue on AMD K8s, allow bit 10 to be clear when setting all 3297 * other bits in order to avoid an uncaught #GP in the guest. 3298 * 3299 * UNIXWARE clears bit 0 of MC1_CTL to ignore correctable, 3300 * single-bit ECC data errors. 3301 */ 3302 if (is_mci_control_msr(msr) && 3303 data != 0 && (data | (1 << 10) | 1) != ~(u64)0) 3304 return 1; 3305 3306 /* 3307 * All CPUs allow writing 0 to MCi_STATUS MSRs to clear the MSR. 3308 * AMD-based CPUs allow non-zero values, but if and only if 3309 * HWCR[McStatusWrEn] is set. 3310 */ 3311 if (!msr_info->host_initiated && is_mci_status_msr(msr) && 3312 data != 0 && !can_set_mci_status(vcpu)) 3313 return 1; 3314 3315 offset = array_index_nospec(msr - MSR_IA32_MC0_CTL, 3316 last_msr + 1 - MSR_IA32_MC0_CTL); 3317 vcpu->arch.mce_banks[offset] = data; 3318 break; 3319 default: 3320 return 1; 3321 } 3322 return 0; 3323 } 3324 3325 static inline bool kvm_pv_async_pf_enabled(struct kvm_vcpu *vcpu) 3326 { 3327 u64 mask = KVM_ASYNC_PF_ENABLED | KVM_ASYNC_PF_DELIVERY_AS_INT; 3328 3329 return (vcpu->arch.apf.msr_en_val & mask) == mask; 3330 } 3331 3332 static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) 3333 { 3334 gpa_t gpa = data & ~0x3f; 3335 3336 /* Bits 4:5 are reserved, Should be zero */ 3337 if (data & 0x30) 3338 return 1; 3339 3340 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_VMEXIT) && 3341 (data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT)) 3342 return 1; 3343 3344 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT) && 3345 (data & KVM_ASYNC_PF_DELIVERY_AS_INT)) 3346 return 1; 3347 3348 if (!lapic_in_kernel(vcpu)) 3349 return data ? 1 : 0; 3350 3351 vcpu->arch.apf.msr_en_val = data; 3352 3353 if (!kvm_pv_async_pf_enabled(vcpu)) { 3354 kvm_clear_async_pf_completion_queue(vcpu); 3355 kvm_async_pf_hash_reset(vcpu); 3356 return 0; 3357 } 3358 3359 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, 3360 sizeof(u64))) 3361 return 1; 3362 3363 vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); 3364 vcpu->arch.apf.delivery_as_pf_vmexit = data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT; 3365 3366 kvm_async_pf_wakeup_all(vcpu); 3367 3368 return 0; 3369 } 3370 3371 static int kvm_pv_enable_async_pf_int(struct kvm_vcpu *vcpu, u64 data) 3372 { 3373 /* Bits 8-63 are reserved */ 3374 if (data >> 8) 3375 return 1; 3376 3377 if (!lapic_in_kernel(vcpu)) 3378 return 1; 3379 3380 vcpu->arch.apf.msr_int_val = data; 3381 3382 vcpu->arch.apf.vec = data & KVM_ASYNC_PF_VEC_MASK; 3383 3384 return 0; 3385 } 3386 3387 static void kvmclock_reset(struct kvm_vcpu *vcpu) 3388 { 3389 kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.pv_time); 3390 vcpu->arch.time = 0; 3391 } 3392 3393 static void kvm_vcpu_flush_tlb_all(struct kvm_vcpu *vcpu) 3394 { 3395 ++vcpu->stat.tlb_flush; 3396 static_call(kvm_x86_flush_tlb_all)(vcpu); 3397 } 3398 3399 static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu) 3400 { 3401 ++vcpu->stat.tlb_flush; 3402 3403 if (!tdp_enabled) { 3404 /* 3405 * A TLB flush on behalf of the guest is equivalent to 3406 * INVPCID(all), toggling CR4.PGE, etc., which requires 3407 * a forced sync of the shadow page tables. Ensure all the 3408 * roots are synced and the guest TLB in hardware is clean. 3409 */ 3410 kvm_mmu_sync_roots(vcpu); 3411 kvm_mmu_sync_prev_roots(vcpu); 3412 } 3413 3414 static_call(kvm_x86_flush_tlb_guest)(vcpu); 3415 } 3416 3417 3418 static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu) 3419 { 3420 ++vcpu->stat.tlb_flush; 3421 static_call(kvm_x86_flush_tlb_current)(vcpu); 3422 } 3423 3424 /* 3425 * Service "local" TLB flush requests, which are specific to the current MMU 3426 * context. In addition to the generic event handling in vcpu_enter_guest(), 3427 * TLB flushes that are targeted at an MMU context also need to be serviced 3428 * prior before nested VM-Enter/VM-Exit. 3429 */ 3430 void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu) 3431 { 3432 if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu)) 3433 kvm_vcpu_flush_tlb_current(vcpu); 3434 3435 if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu)) 3436 kvm_vcpu_flush_tlb_guest(vcpu); 3437 } 3438 EXPORT_SYMBOL_GPL(kvm_service_local_tlb_flush_requests); 3439 3440 static void record_steal_time(struct kvm_vcpu *vcpu) 3441 { 3442 struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache; 3443 struct kvm_steal_time __user *st; 3444 struct kvm_memslots *slots; 3445 gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS; 3446 u64 steal; 3447 u32 version; 3448 3449 if (kvm_xen_msr_enabled(vcpu->kvm)) { 3450 kvm_xen_runstate_set_running(vcpu); 3451 return; 3452 } 3453 3454 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) 3455 return; 3456 3457 if (WARN_ON_ONCE(current->mm != vcpu->kvm->mm)) 3458 return; 3459 3460 slots = kvm_memslots(vcpu->kvm); 3461 3462 if (unlikely(slots->generation != ghc->generation || 3463 gpa != ghc->gpa || 3464 kvm_is_error_hva(ghc->hva) || !ghc->memslot)) { 3465 /* We rely on the fact that it fits in a single page. */ 3466 BUILD_BUG_ON((sizeof(*st) - 1) & KVM_STEAL_VALID_BITS); 3467 3468 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st)) || 3469 kvm_is_error_hva(ghc->hva) || !ghc->memslot) 3470 return; 3471 } 3472 3473 st = (struct kvm_steal_time __user *)ghc->hva; 3474 /* 3475 * Doing a TLB flush here, on the guest's behalf, can avoid 3476 * expensive IPIs. 3477 */ 3478 if (guest_pv_has(vcpu, KVM_FEATURE_PV_TLB_FLUSH)) { 3479 u8 st_preempted = 0; 3480 int err = -EFAULT; 3481 3482 if (!user_access_begin(st, sizeof(*st))) 3483 return; 3484 3485 asm volatile("1: xchgb %0, %2\n" 3486 "xor %1, %1\n" 3487 "2:\n" 3488 _ASM_EXTABLE_UA(1b, 2b) 3489 : "+q" (st_preempted), 3490 "+&r" (err), 3491 "+m" (st->preempted)); 3492 if (err) 3493 goto out; 3494 3495 user_access_end(); 3496 3497 vcpu->arch.st.preempted = 0; 3498 3499 trace_kvm_pv_tlb_flush(vcpu->vcpu_id, 3500 st_preempted & KVM_VCPU_FLUSH_TLB); 3501 if (st_preempted & KVM_VCPU_FLUSH_TLB) 3502 kvm_vcpu_flush_tlb_guest(vcpu); 3503 3504 if (!user_access_begin(st, sizeof(*st))) 3505 goto dirty; 3506 } else { 3507 if (!user_access_begin(st, sizeof(*st))) 3508 return; 3509 3510 unsafe_put_user(0, &st->preempted, out); 3511 vcpu->arch.st.preempted = 0; 3512 } 3513 3514 unsafe_get_user(version, &st->version, out); 3515 if (version & 1) 3516 version += 1; /* first time write, random junk */ 3517 3518 version += 1; 3519 unsafe_put_user(version, &st->version, out); 3520 3521 smp_wmb(); 3522 3523 unsafe_get_user(steal, &st->steal, out); 3524 steal += current->sched_info.run_delay - 3525 vcpu->arch.st.last_steal; 3526 vcpu->arch.st.last_steal = current->sched_info.run_delay; 3527 unsafe_put_user(steal, &st->steal, out); 3528 3529 version += 1; 3530 unsafe_put_user(version, &st->version, out); 3531 3532 out: 3533 user_access_end(); 3534 dirty: 3535 mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa)); 3536 } 3537 3538 int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 3539 { 3540 bool pr = false; 3541 u32 msr = msr_info->index; 3542 u64 data = msr_info->data; 3543 3544 if (msr && msr == vcpu->kvm->arch.xen_hvm_config.msr) 3545 return kvm_xen_write_hypercall_page(vcpu, data); 3546 3547 switch (msr) { 3548 case MSR_AMD64_NB_CFG: 3549 case MSR_IA32_UCODE_WRITE: 3550 case MSR_VM_HSAVE_PA: 3551 case MSR_AMD64_PATCH_LOADER: 3552 case MSR_AMD64_BU_CFG2: 3553 case MSR_AMD64_DC_CFG: 3554 case MSR_F15H_EX_CFG: 3555 break; 3556 3557 case MSR_IA32_UCODE_REV: 3558 if (msr_info->host_initiated) 3559 vcpu->arch.microcode_version = data; 3560 break; 3561 case MSR_IA32_ARCH_CAPABILITIES: 3562 if (!msr_info->host_initiated) 3563 return 1; 3564 vcpu->arch.arch_capabilities = data; 3565 break; 3566 case MSR_IA32_PERF_CAPABILITIES: { 3567 struct kvm_msr_entry msr_ent = {.index = msr, .data = 0}; 3568 3569 if (!msr_info->host_initiated) 3570 return 1; 3571 if (kvm_get_msr_feature(&msr_ent)) 3572 return 1; 3573 if (data & ~msr_ent.data) 3574 return 1; 3575 3576 vcpu->arch.perf_capabilities = data; 3577 kvm_pmu_refresh(vcpu); 3578 return 0; 3579 } 3580 case MSR_EFER: 3581 return set_efer(vcpu, msr_info); 3582 case MSR_K7_HWCR: 3583 data &= ~(u64)0x40; /* ignore flush filter disable */ 3584 data &= ~(u64)0x100; /* ignore ignne emulation enable */ 3585 data &= ~(u64)0x8; /* ignore TLB cache disable */ 3586 3587 /* Handle McStatusWrEn */ 3588 if (data == BIT_ULL(18)) { 3589 vcpu->arch.msr_hwcr = data; 3590 } else if (data != 0) { 3591 vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n", 3592 data); 3593 return 1; 3594 } 3595 break; 3596 case MSR_FAM10H_MMIO_CONF_BASE: 3597 if (data != 0) { 3598 vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: " 3599 "0x%llx\n", data); 3600 return 1; 3601 } 3602 break; 3603 case 0x200 ... MSR_IA32_MC0_CTL2 - 1: 3604 case MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) ... 0x2ff: 3605 return kvm_mtrr_set_msr(vcpu, msr, data); 3606 case MSR_IA32_APICBASE: 3607 return kvm_set_apic_base(vcpu, msr_info); 3608 case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff: 3609 return kvm_x2apic_msr_write(vcpu, msr, data); 3610 case MSR_IA32_TSC_DEADLINE: 3611 kvm_set_lapic_tscdeadline_msr(vcpu, data); 3612 break; 3613 case MSR_IA32_TSC_ADJUST: 3614 if (guest_cpuid_has(vcpu, X86_FEATURE_TSC_ADJUST)) { 3615 if (!msr_info->host_initiated) { 3616 s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr; 3617 adjust_tsc_offset_guest(vcpu, adj); 3618 /* Before back to guest, tsc_timestamp must be adjusted 3619 * as well, otherwise guest's percpu pvclock time could jump. 3620 */ 3621 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 3622 } 3623 vcpu->arch.ia32_tsc_adjust_msr = data; 3624 } 3625 break; 3626 case MSR_IA32_MISC_ENABLE: { 3627 u64 old_val = vcpu->arch.ia32_misc_enable_msr; 3628 3629 if (!msr_info->host_initiated) { 3630 /* RO bits */ 3631 if ((old_val ^ data) & MSR_IA32_MISC_ENABLE_PMU_RO_MASK) 3632 return 1; 3633 3634 /* R bits, i.e. writes are ignored, but don't fault. */ 3635 data = data & ~MSR_IA32_MISC_ENABLE_EMON; 3636 data |= old_val & MSR_IA32_MISC_ENABLE_EMON; 3637 } 3638 3639 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT) && 3640 ((old_val ^ data) & MSR_IA32_MISC_ENABLE_MWAIT)) { 3641 if (!guest_cpuid_has(vcpu, X86_FEATURE_XMM3)) 3642 return 1; 3643 vcpu->arch.ia32_misc_enable_msr = data; 3644 kvm_update_cpuid_runtime(vcpu); 3645 } else { 3646 vcpu->arch.ia32_misc_enable_msr = data; 3647 } 3648 break; 3649 } 3650 case MSR_IA32_SMBASE: 3651 if (!msr_info->host_initiated) 3652 return 1; 3653 vcpu->arch.smbase = data; 3654 break; 3655 case MSR_IA32_POWER_CTL: 3656 vcpu->arch.msr_ia32_power_ctl = data; 3657 break; 3658 case MSR_IA32_TSC: 3659 if (msr_info->host_initiated) { 3660 kvm_synchronize_tsc(vcpu, data); 3661 } else { 3662 u64 adj = kvm_compute_l1_tsc_offset(vcpu, data) - vcpu->arch.l1_tsc_offset; 3663 adjust_tsc_offset_guest(vcpu, adj); 3664 vcpu->arch.ia32_tsc_adjust_msr += adj; 3665 } 3666 break; 3667 case MSR_IA32_XSS: 3668 if (!msr_info->host_initiated && 3669 !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)) 3670 return 1; 3671 /* 3672 * KVM supports exposing PT to the guest, but does not support 3673 * IA32_XSS[bit 8]. Guests have to use RDMSR/WRMSR rather than 3674 * XSAVES/XRSTORS to save/restore PT MSRs. 3675 */ 3676 if (data & ~kvm_caps.supported_xss) 3677 return 1; 3678 vcpu->arch.ia32_xss = data; 3679 kvm_update_cpuid_runtime(vcpu); 3680 break; 3681 case MSR_SMI_COUNT: 3682 if (!msr_info->host_initiated) 3683 return 1; 3684 vcpu->arch.smi_count = data; 3685 break; 3686 case MSR_KVM_WALL_CLOCK_NEW: 3687 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) 3688 return 1; 3689 3690 vcpu->kvm->arch.wall_clock = data; 3691 kvm_write_wall_clock(vcpu->kvm, data, 0); 3692 break; 3693 case MSR_KVM_WALL_CLOCK: 3694 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) 3695 return 1; 3696 3697 vcpu->kvm->arch.wall_clock = data; 3698 kvm_write_wall_clock(vcpu->kvm, data, 0); 3699 break; 3700 case MSR_KVM_SYSTEM_TIME_NEW: 3701 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) 3702 return 1; 3703 3704 kvm_write_system_time(vcpu, data, false, msr_info->host_initiated); 3705 break; 3706 case MSR_KVM_SYSTEM_TIME: 3707 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) 3708 return 1; 3709 3710 kvm_write_system_time(vcpu, data, true, msr_info->host_initiated); 3711 break; 3712 case MSR_KVM_ASYNC_PF_EN: 3713 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF)) 3714 return 1; 3715 3716 if (kvm_pv_enable_async_pf(vcpu, data)) 3717 return 1; 3718 break; 3719 case MSR_KVM_ASYNC_PF_INT: 3720 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) 3721 return 1; 3722 3723 if (kvm_pv_enable_async_pf_int(vcpu, data)) 3724 return 1; 3725 break; 3726 case MSR_KVM_ASYNC_PF_ACK: 3727 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) 3728 return 1; 3729 if (data & 0x1) { 3730 vcpu->arch.apf.pageready_pending = false; 3731 kvm_check_async_pf_completion(vcpu); 3732 } 3733 break; 3734 case MSR_KVM_STEAL_TIME: 3735 if (!guest_pv_has(vcpu, KVM_FEATURE_STEAL_TIME)) 3736 return 1; 3737 3738 if (unlikely(!sched_info_on())) 3739 return 1; 3740 3741 if (data & KVM_STEAL_RESERVED_MASK) 3742 return 1; 3743 3744 vcpu->arch.st.msr_val = data; 3745 3746 if (!(data & KVM_MSR_ENABLED)) 3747 break; 3748 3749 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); 3750 3751 break; 3752 case MSR_KVM_PV_EOI_EN: 3753 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_EOI)) 3754 return 1; 3755 3756 if (kvm_lapic_set_pv_eoi(vcpu, data, sizeof(u8))) 3757 return 1; 3758 break; 3759 3760 case MSR_KVM_POLL_CONTROL: 3761 if (!guest_pv_has(vcpu, KVM_FEATURE_POLL_CONTROL)) 3762 return 1; 3763 3764 /* only enable bit supported */ 3765 if (data & (-1ULL << 1)) 3766 return 1; 3767 3768 vcpu->arch.msr_kvm_poll_control = data; 3769 break; 3770 3771 case MSR_IA32_MCG_CTL: 3772 case MSR_IA32_MCG_STATUS: 3773 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: 3774 case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1: 3775 return set_msr_mce(vcpu, msr_info); 3776 3777 case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3: 3778 case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1: 3779 pr = true; 3780 fallthrough; 3781 case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3: 3782 case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1: 3783 if (kvm_pmu_is_valid_msr(vcpu, msr)) 3784 return kvm_pmu_set_msr(vcpu, msr_info); 3785 3786 if (pr || data != 0) 3787 vcpu_unimpl(vcpu, "disabled perfctr wrmsr: " 3788 "0x%x data 0x%llx\n", msr, data); 3789 break; 3790 case MSR_K7_CLK_CTL: 3791 /* 3792 * Ignore all writes to this no longer documented MSR. 3793 * Writes are only relevant for old K7 processors, 3794 * all pre-dating SVM, but a recommended workaround from 3795 * AMD for these chips. It is possible to specify the 3796 * affected processor models on the command line, hence 3797 * the need to ignore the workaround. 3798 */ 3799 break; 3800 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: 3801 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER: 3802 case HV_X64_MSR_SYNDBG_OPTIONS: 3803 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: 3804 case HV_X64_MSR_CRASH_CTL: 3805 case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT: 3806 case HV_X64_MSR_REENLIGHTENMENT_CONTROL: 3807 case HV_X64_MSR_TSC_EMULATION_CONTROL: 3808 case HV_X64_MSR_TSC_EMULATION_STATUS: 3809 return kvm_hv_set_msr_common(vcpu, msr, data, 3810 msr_info->host_initiated); 3811 case MSR_IA32_BBL_CR_CTL3: 3812 /* Drop writes to this legacy MSR -- see rdmsr 3813 * counterpart for further detail. 3814 */ 3815 if (report_ignored_msrs) 3816 vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n", 3817 msr, data); 3818 break; 3819 case MSR_AMD64_OSVW_ID_LENGTH: 3820 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) 3821 return 1; 3822 vcpu->arch.osvw.length = data; 3823 break; 3824 case MSR_AMD64_OSVW_STATUS: 3825 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) 3826 return 1; 3827 vcpu->arch.osvw.status = data; 3828 break; 3829 case MSR_PLATFORM_INFO: 3830 if (!msr_info->host_initiated || 3831 (!(data & MSR_PLATFORM_INFO_CPUID_FAULT) && 3832 cpuid_fault_enabled(vcpu))) 3833 return 1; 3834 vcpu->arch.msr_platform_info = data; 3835 break; 3836 case MSR_MISC_FEATURES_ENABLES: 3837 if (data & ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT || 3838 (data & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT && 3839 !supports_cpuid_fault(vcpu))) 3840 return 1; 3841 vcpu->arch.msr_misc_features_enables = data; 3842 break; 3843 #ifdef CONFIG_X86_64 3844 case MSR_IA32_XFD: 3845 if (!msr_info->host_initiated && 3846 !guest_cpuid_has(vcpu, X86_FEATURE_XFD)) 3847 return 1; 3848 3849 if (data & ~kvm_guest_supported_xfd(vcpu)) 3850 return 1; 3851 3852 fpu_update_guest_xfd(&vcpu->arch.guest_fpu, data); 3853 break; 3854 case MSR_IA32_XFD_ERR: 3855 if (!msr_info->host_initiated && 3856 !guest_cpuid_has(vcpu, X86_FEATURE_XFD)) 3857 return 1; 3858 3859 if (data & ~kvm_guest_supported_xfd(vcpu)) 3860 return 1; 3861 3862 vcpu->arch.guest_fpu.xfd_err = data; 3863 break; 3864 #endif 3865 case MSR_IA32_PEBS_ENABLE: 3866 case MSR_IA32_DS_AREA: 3867 case MSR_PEBS_DATA_CFG: 3868 case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5: 3869 if (kvm_pmu_is_valid_msr(vcpu, msr)) 3870 return kvm_pmu_set_msr(vcpu, msr_info); 3871 /* 3872 * Userspace is allowed to write '0' to MSRs that KVM reports 3873 * as to-be-saved, even if an MSRs isn't fully supported. 3874 */ 3875 return !msr_info->host_initiated || data; 3876 default: 3877 if (kvm_pmu_is_valid_msr(vcpu, msr)) 3878 return kvm_pmu_set_msr(vcpu, msr_info); 3879 return KVM_MSR_RET_INVALID; 3880 } 3881 return 0; 3882 } 3883 EXPORT_SYMBOL_GPL(kvm_set_msr_common); 3884 3885 static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) 3886 { 3887 u64 data; 3888 u64 mcg_cap = vcpu->arch.mcg_cap; 3889 unsigned bank_num = mcg_cap & 0xff; 3890 u32 offset, last_msr; 3891 3892 switch (msr) { 3893 case MSR_IA32_P5_MC_ADDR: 3894 case MSR_IA32_P5_MC_TYPE: 3895 data = 0; 3896 break; 3897 case MSR_IA32_MCG_CAP: 3898 data = vcpu->arch.mcg_cap; 3899 break; 3900 case MSR_IA32_MCG_CTL: 3901 if (!(mcg_cap & MCG_CTL_P) && !host) 3902 return 1; 3903 data = vcpu->arch.mcg_ctl; 3904 break; 3905 case MSR_IA32_MCG_STATUS: 3906 data = vcpu->arch.mcg_status; 3907 break; 3908 case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1: 3909 last_msr = MSR_IA32_MCx_CTL2(bank_num) - 1; 3910 if (msr > last_msr) 3911 return 1; 3912 3913 if (!(mcg_cap & MCG_CMCI_P) && !host) 3914 return 1; 3915 offset = array_index_nospec(msr - MSR_IA32_MC0_CTL2, 3916 last_msr + 1 - MSR_IA32_MC0_CTL2); 3917 data = vcpu->arch.mci_ctl2_banks[offset]; 3918 break; 3919 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: 3920 last_msr = MSR_IA32_MCx_CTL(bank_num) - 1; 3921 if (msr > last_msr) 3922 return 1; 3923 3924 offset = array_index_nospec(msr - MSR_IA32_MC0_CTL, 3925 last_msr + 1 - MSR_IA32_MC0_CTL); 3926 data = vcpu->arch.mce_banks[offset]; 3927 break; 3928 default: 3929 return 1; 3930 } 3931 *pdata = data; 3932 return 0; 3933 } 3934 3935 int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 3936 { 3937 switch (msr_info->index) { 3938 case MSR_IA32_PLATFORM_ID: 3939 case MSR_IA32_EBL_CR_POWERON: 3940 case MSR_IA32_LASTBRANCHFROMIP: 3941 case MSR_IA32_LASTBRANCHTOIP: 3942 case MSR_IA32_LASTINTFROMIP: 3943 case MSR_IA32_LASTINTTOIP: 3944 case MSR_AMD64_SYSCFG: 3945 case MSR_K8_TSEG_ADDR: 3946 case MSR_K8_TSEG_MASK: 3947 case MSR_VM_HSAVE_PA: 3948 case MSR_K8_INT_PENDING_MSG: 3949 case MSR_AMD64_NB_CFG: 3950 case MSR_FAM10H_MMIO_CONF_BASE: 3951 case MSR_AMD64_BU_CFG2: 3952 case MSR_IA32_PERF_CTL: 3953 case MSR_AMD64_DC_CFG: 3954 case MSR_F15H_EX_CFG: 3955 /* 3956 * Intel Sandy Bridge CPUs must support the RAPL (running average power 3957 * limit) MSRs. Just return 0, as we do not want to expose the host 3958 * data here. Do not conditionalize this on CPUID, as KVM does not do 3959 * so for existing CPU-specific MSRs. 3960 */ 3961 case MSR_RAPL_POWER_UNIT: 3962 case MSR_PP0_ENERGY_STATUS: /* Power plane 0 (core) */ 3963 case MSR_PP1_ENERGY_STATUS: /* Power plane 1 (graphics uncore) */ 3964 case MSR_PKG_ENERGY_STATUS: /* Total package */ 3965 case MSR_DRAM_ENERGY_STATUS: /* DRAM controller */ 3966 msr_info->data = 0; 3967 break; 3968 case MSR_IA32_PEBS_ENABLE: 3969 case MSR_IA32_DS_AREA: 3970 case MSR_PEBS_DATA_CFG: 3971 case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5: 3972 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) 3973 return kvm_pmu_get_msr(vcpu, msr_info); 3974 /* 3975 * Userspace is allowed to read MSRs that KVM reports as 3976 * to-be-saved, even if an MSR isn't fully supported. 3977 */ 3978 if (!msr_info->host_initiated) 3979 return 1; 3980 msr_info->data = 0; 3981 break; 3982 case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3: 3983 case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3: 3984 case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1: 3985 case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1: 3986 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) 3987 return kvm_pmu_get_msr(vcpu, msr_info); 3988 msr_info->data = 0; 3989 break; 3990 case MSR_IA32_UCODE_REV: 3991 msr_info->data = vcpu->arch.microcode_version; 3992 break; 3993 case MSR_IA32_ARCH_CAPABILITIES: 3994 if (!msr_info->host_initiated && 3995 !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES)) 3996 return 1; 3997 msr_info->data = vcpu->arch.arch_capabilities; 3998 break; 3999 case MSR_IA32_PERF_CAPABILITIES: 4000 if (!msr_info->host_initiated && 4001 !guest_cpuid_has(vcpu, X86_FEATURE_PDCM)) 4002 return 1; 4003 msr_info->data = vcpu->arch.perf_capabilities; 4004 break; 4005 case MSR_IA32_POWER_CTL: 4006 msr_info->data = vcpu->arch.msr_ia32_power_ctl; 4007 break; 4008 case MSR_IA32_TSC: { 4009 /* 4010 * Intel SDM states that MSR_IA32_TSC read adds the TSC offset 4011 * even when not intercepted. AMD manual doesn't explicitly 4012 * state this but appears to behave the same. 4013 * 4014 * On userspace reads and writes, however, we unconditionally 4015 * return L1's TSC value to ensure backwards-compatible 4016 * behavior for migration. 4017 */ 4018 u64 offset, ratio; 4019 4020 if (msr_info->host_initiated) { 4021 offset = vcpu->arch.l1_tsc_offset; 4022 ratio = vcpu->arch.l1_tsc_scaling_ratio; 4023 } else { 4024 offset = vcpu->arch.tsc_offset; 4025 ratio = vcpu->arch.tsc_scaling_ratio; 4026 } 4027 4028 msr_info->data = kvm_scale_tsc(rdtsc(), ratio) + offset; 4029 break; 4030 } 4031 case MSR_MTRRcap: 4032 case 0x200 ... MSR_IA32_MC0_CTL2 - 1: 4033 case MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) ... 0x2ff: 4034 return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data); 4035 case 0xcd: /* fsb frequency */ 4036 msr_info->data = 3; 4037 break; 4038 /* 4039 * MSR_EBC_FREQUENCY_ID 4040 * Conservative value valid for even the basic CPU models. 4041 * Models 0,1: 000 in bits 23:21 indicating a bus speed of 4042 * 100MHz, model 2 000 in bits 18:16 indicating 100MHz, 4043 * and 266MHz for model 3, or 4. Set Core Clock 4044 * Frequency to System Bus Frequency Ratio to 1 (bits 4045 * 31:24) even though these are only valid for CPU 4046 * models > 2, however guests may end up dividing or 4047 * multiplying by zero otherwise. 4048 */ 4049 case MSR_EBC_FREQUENCY_ID: 4050 msr_info->data = 1 << 24; 4051 break; 4052 case MSR_IA32_APICBASE: 4053 msr_info->data = kvm_get_apic_base(vcpu); 4054 break; 4055 case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff: 4056 return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data); 4057 case MSR_IA32_TSC_DEADLINE: 4058 msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu); 4059 break; 4060 case MSR_IA32_TSC_ADJUST: 4061 msr_info->data = (u64)vcpu->arch.ia32_tsc_adjust_msr; 4062 break; 4063 case MSR_IA32_MISC_ENABLE: 4064 msr_info->data = vcpu->arch.ia32_misc_enable_msr; 4065 break; 4066 case MSR_IA32_SMBASE: 4067 if (!msr_info->host_initiated) 4068 return 1; 4069 msr_info->data = vcpu->arch.smbase; 4070 break; 4071 case MSR_SMI_COUNT: 4072 msr_info->data = vcpu->arch.smi_count; 4073 break; 4074 case MSR_IA32_PERF_STATUS: 4075 /* TSC increment by tick */ 4076 msr_info->data = 1000ULL; 4077 /* CPU multiplier */ 4078 msr_info->data |= (((uint64_t)4ULL) << 40); 4079 break; 4080 case MSR_EFER: 4081 msr_info->data = vcpu->arch.efer; 4082 break; 4083 case MSR_KVM_WALL_CLOCK: 4084 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) 4085 return 1; 4086 4087 msr_info->data = vcpu->kvm->arch.wall_clock; 4088 break; 4089 case MSR_KVM_WALL_CLOCK_NEW: 4090 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) 4091 return 1; 4092 4093 msr_info->data = vcpu->kvm->arch.wall_clock; 4094 break; 4095 case MSR_KVM_SYSTEM_TIME: 4096 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) 4097 return 1; 4098 4099 msr_info->data = vcpu->arch.time; 4100 break; 4101 case MSR_KVM_SYSTEM_TIME_NEW: 4102 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) 4103 return 1; 4104 4105 msr_info->data = vcpu->arch.time; 4106 break; 4107 case MSR_KVM_ASYNC_PF_EN: 4108 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF)) 4109 return 1; 4110 4111 msr_info->data = vcpu->arch.apf.msr_en_val; 4112 break; 4113 case MSR_KVM_ASYNC_PF_INT: 4114 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) 4115 return 1; 4116 4117 msr_info->data = vcpu->arch.apf.msr_int_val; 4118 break; 4119 case MSR_KVM_ASYNC_PF_ACK: 4120 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) 4121 return 1; 4122 4123 msr_info->data = 0; 4124 break; 4125 case MSR_KVM_STEAL_TIME: 4126 if (!guest_pv_has(vcpu, KVM_FEATURE_STEAL_TIME)) 4127 return 1; 4128 4129 msr_info->data = vcpu->arch.st.msr_val; 4130 break; 4131 case MSR_KVM_PV_EOI_EN: 4132 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_EOI)) 4133 return 1; 4134 4135 msr_info->data = vcpu->arch.pv_eoi.msr_val; 4136 break; 4137 case MSR_KVM_POLL_CONTROL: 4138 if (!guest_pv_has(vcpu, KVM_FEATURE_POLL_CONTROL)) 4139 return 1; 4140 4141 msr_info->data = vcpu->arch.msr_kvm_poll_control; 4142 break; 4143 case MSR_IA32_P5_MC_ADDR: 4144 case MSR_IA32_P5_MC_TYPE: 4145 case MSR_IA32_MCG_CAP: 4146 case MSR_IA32_MCG_CTL: 4147 case MSR_IA32_MCG_STATUS: 4148 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: 4149 case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1: 4150 return get_msr_mce(vcpu, msr_info->index, &msr_info->data, 4151 msr_info->host_initiated); 4152 case MSR_IA32_XSS: 4153 if (!msr_info->host_initiated && 4154 !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)) 4155 return 1; 4156 msr_info->data = vcpu->arch.ia32_xss; 4157 break; 4158 case MSR_K7_CLK_CTL: 4159 /* 4160 * Provide expected ramp-up count for K7. All other 4161 * are set to zero, indicating minimum divisors for 4162 * every field. 4163 * 4164 * This prevents guest kernels on AMD host with CPU 4165 * type 6, model 8 and higher from exploding due to 4166 * the rdmsr failing. 4167 */ 4168 msr_info->data = 0x20000000; 4169 break; 4170 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: 4171 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER: 4172 case HV_X64_MSR_SYNDBG_OPTIONS: 4173 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: 4174 case HV_X64_MSR_CRASH_CTL: 4175 case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT: 4176 case HV_X64_MSR_REENLIGHTENMENT_CONTROL: 4177 case HV_X64_MSR_TSC_EMULATION_CONTROL: 4178 case HV_X64_MSR_TSC_EMULATION_STATUS: 4179 return kvm_hv_get_msr_common(vcpu, 4180 msr_info->index, &msr_info->data, 4181 msr_info->host_initiated); 4182 case MSR_IA32_BBL_CR_CTL3: 4183 /* This legacy MSR exists but isn't fully documented in current 4184 * silicon. It is however accessed by winxp in very narrow 4185 * scenarios where it sets bit #19, itself documented as 4186 * a "reserved" bit. Best effort attempt to source coherent 4187 * read data here should the balance of the register be 4188 * interpreted by the guest: 4189 * 4190 * L2 cache control register 3: 64GB range, 256KB size, 4191 * enabled, latency 0x1, configured 4192 */ 4193 msr_info->data = 0xbe702111; 4194 break; 4195 case MSR_AMD64_OSVW_ID_LENGTH: 4196 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) 4197 return 1; 4198 msr_info->data = vcpu->arch.osvw.length; 4199 break; 4200 case MSR_AMD64_OSVW_STATUS: 4201 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) 4202 return 1; 4203 msr_info->data = vcpu->arch.osvw.status; 4204 break; 4205 case MSR_PLATFORM_INFO: 4206 if (!msr_info->host_initiated && 4207 !vcpu->kvm->arch.guest_can_read_msr_platform_info) 4208 return 1; 4209 msr_info->data = vcpu->arch.msr_platform_info; 4210 break; 4211 case MSR_MISC_FEATURES_ENABLES: 4212 msr_info->data = vcpu->arch.msr_misc_features_enables; 4213 break; 4214 case MSR_K7_HWCR: 4215 msr_info->data = vcpu->arch.msr_hwcr; 4216 break; 4217 #ifdef CONFIG_X86_64 4218 case MSR_IA32_XFD: 4219 if (!msr_info->host_initiated && 4220 !guest_cpuid_has(vcpu, X86_FEATURE_XFD)) 4221 return 1; 4222 4223 msr_info->data = vcpu->arch.guest_fpu.fpstate->xfd; 4224 break; 4225 case MSR_IA32_XFD_ERR: 4226 if (!msr_info->host_initiated && 4227 !guest_cpuid_has(vcpu, X86_FEATURE_XFD)) 4228 return 1; 4229 4230 msr_info->data = vcpu->arch.guest_fpu.xfd_err; 4231 break; 4232 #endif 4233 default: 4234 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) 4235 return kvm_pmu_get_msr(vcpu, msr_info); 4236 return KVM_MSR_RET_INVALID; 4237 } 4238 return 0; 4239 } 4240 EXPORT_SYMBOL_GPL(kvm_get_msr_common); 4241 4242 /* 4243 * Read or write a bunch of msrs. All parameters are kernel addresses. 4244 * 4245 * @return number of msrs set successfully. 4246 */ 4247 static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs, 4248 struct kvm_msr_entry *entries, 4249 int (*do_msr)(struct kvm_vcpu *vcpu, 4250 unsigned index, u64 *data)) 4251 { 4252 int i; 4253 4254 for (i = 0; i < msrs->nmsrs; ++i) 4255 if (do_msr(vcpu, entries[i].index, &entries[i].data)) 4256 break; 4257 4258 return i; 4259 } 4260 4261 /* 4262 * Read or write a bunch of msrs. Parameters are user addresses. 4263 * 4264 * @return number of msrs set successfully. 4265 */ 4266 static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs, 4267 int (*do_msr)(struct kvm_vcpu *vcpu, 4268 unsigned index, u64 *data), 4269 int writeback) 4270 { 4271 struct kvm_msrs msrs; 4272 struct kvm_msr_entry *entries; 4273 int r, n; 4274 unsigned size; 4275 4276 r = -EFAULT; 4277 if (copy_from_user(&msrs, user_msrs, sizeof(msrs))) 4278 goto out; 4279 4280 r = -E2BIG; 4281 if (msrs.nmsrs >= MAX_IO_MSRS) 4282 goto out; 4283 4284 size = sizeof(struct kvm_msr_entry) * msrs.nmsrs; 4285 entries = memdup_user(user_msrs->entries, size); 4286 if (IS_ERR(entries)) { 4287 r = PTR_ERR(entries); 4288 goto out; 4289 } 4290 4291 r = n = __msr_io(vcpu, &msrs, entries, do_msr); 4292 if (r < 0) 4293 goto out_free; 4294 4295 r = -EFAULT; 4296 if (writeback && copy_to_user(user_msrs->entries, entries, size)) 4297 goto out_free; 4298 4299 r = n; 4300 4301 out_free: 4302 kfree(entries); 4303 out: 4304 return r; 4305 } 4306 4307 static inline bool kvm_can_mwait_in_guest(void) 4308 { 4309 return boot_cpu_has(X86_FEATURE_MWAIT) && 4310 !boot_cpu_has_bug(X86_BUG_MONITOR) && 4311 boot_cpu_has(X86_FEATURE_ARAT); 4312 } 4313 4314 static int kvm_ioctl_get_supported_hv_cpuid(struct kvm_vcpu *vcpu, 4315 struct kvm_cpuid2 __user *cpuid_arg) 4316 { 4317 struct kvm_cpuid2 cpuid; 4318 int r; 4319 4320 r = -EFAULT; 4321 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) 4322 return r; 4323 4324 r = kvm_get_hv_cpuid(vcpu, &cpuid, cpuid_arg->entries); 4325 if (r) 4326 return r; 4327 4328 r = -EFAULT; 4329 if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid))) 4330 return r; 4331 4332 return 0; 4333 } 4334 4335 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) 4336 { 4337 int r = 0; 4338 4339 switch (ext) { 4340 case KVM_CAP_IRQCHIP: 4341 case KVM_CAP_HLT: 4342 case KVM_CAP_MMU_SHADOW_CACHE_CONTROL: 4343 case KVM_CAP_SET_TSS_ADDR: 4344 case KVM_CAP_EXT_CPUID: 4345 case KVM_CAP_EXT_EMUL_CPUID: 4346 case KVM_CAP_CLOCKSOURCE: 4347 case KVM_CAP_PIT: 4348 case KVM_CAP_NOP_IO_DELAY: 4349 case KVM_CAP_MP_STATE: 4350 case KVM_CAP_SYNC_MMU: 4351 case KVM_CAP_USER_NMI: 4352 case KVM_CAP_REINJECT_CONTROL: 4353 case KVM_CAP_IRQ_INJECT_STATUS: 4354 case KVM_CAP_IOEVENTFD: 4355 case KVM_CAP_IOEVENTFD_NO_LENGTH: 4356 case KVM_CAP_PIT2: 4357 case KVM_CAP_PIT_STATE2: 4358 case KVM_CAP_SET_IDENTITY_MAP_ADDR: 4359 case KVM_CAP_VCPU_EVENTS: 4360 case KVM_CAP_HYPERV: 4361 case KVM_CAP_HYPERV_VAPIC: 4362 case KVM_CAP_HYPERV_SPIN: 4363 case KVM_CAP_HYPERV_SYNIC: 4364 case KVM_CAP_HYPERV_SYNIC2: 4365 case KVM_CAP_HYPERV_VP_INDEX: 4366 case KVM_CAP_HYPERV_EVENTFD: 4367 case KVM_CAP_HYPERV_TLBFLUSH: 4368 case KVM_CAP_HYPERV_SEND_IPI: 4369 case KVM_CAP_HYPERV_CPUID: 4370 case KVM_CAP_HYPERV_ENFORCE_CPUID: 4371 case KVM_CAP_SYS_HYPERV_CPUID: 4372 case KVM_CAP_PCI_SEGMENT: 4373 case KVM_CAP_DEBUGREGS: 4374 case KVM_CAP_X86_ROBUST_SINGLESTEP: 4375 case KVM_CAP_XSAVE: 4376 case KVM_CAP_ASYNC_PF: 4377 case KVM_CAP_ASYNC_PF_INT: 4378 case KVM_CAP_GET_TSC_KHZ: 4379 case KVM_CAP_KVMCLOCK_CTRL: 4380 case KVM_CAP_READONLY_MEM: 4381 case KVM_CAP_HYPERV_TIME: 4382 case KVM_CAP_IOAPIC_POLARITY_IGNORED: 4383 case KVM_CAP_TSC_DEADLINE_TIMER: 4384 case KVM_CAP_DISABLE_QUIRKS: 4385 case KVM_CAP_SET_BOOT_CPU_ID: 4386 case KVM_CAP_SPLIT_IRQCHIP: 4387 case KVM_CAP_IMMEDIATE_EXIT: 4388 case KVM_CAP_PMU_EVENT_FILTER: 4389 case KVM_CAP_GET_MSR_FEATURES: 4390 case KVM_CAP_MSR_PLATFORM_INFO: 4391 case KVM_CAP_EXCEPTION_PAYLOAD: 4392 case KVM_CAP_X86_TRIPLE_FAULT_EVENT: 4393 case KVM_CAP_SET_GUEST_DEBUG: 4394 case KVM_CAP_LAST_CPU: 4395 case KVM_CAP_X86_USER_SPACE_MSR: 4396 case KVM_CAP_X86_MSR_FILTER: 4397 case KVM_CAP_ENFORCE_PV_FEATURE_CPUID: 4398 #ifdef CONFIG_X86_SGX_KVM 4399 case KVM_CAP_SGX_ATTRIBUTE: 4400 #endif 4401 case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM: 4402 case KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM: 4403 case KVM_CAP_SREGS2: 4404 case KVM_CAP_EXIT_ON_EMULATION_FAILURE: 4405 case KVM_CAP_VCPU_ATTRIBUTES: 4406 case KVM_CAP_SYS_ATTRIBUTES: 4407 case KVM_CAP_VAPIC: 4408 case KVM_CAP_ENABLE_CAP: 4409 case KVM_CAP_VM_DISABLE_NX_HUGE_PAGES: 4410 r = 1; 4411 break; 4412 case KVM_CAP_EXIT_HYPERCALL: 4413 r = KVM_EXIT_HYPERCALL_VALID_MASK; 4414 break; 4415 case KVM_CAP_SET_GUEST_DEBUG2: 4416 return KVM_GUESTDBG_VALID_MASK; 4417 #ifdef CONFIG_KVM_XEN 4418 case KVM_CAP_XEN_HVM: 4419 r = KVM_XEN_HVM_CONFIG_HYPERCALL_MSR | 4420 KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL | 4421 KVM_XEN_HVM_CONFIG_SHARED_INFO | 4422 KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL | 4423 KVM_XEN_HVM_CONFIG_EVTCHN_SEND; 4424 if (sched_info_on()) 4425 r |= KVM_XEN_HVM_CONFIG_RUNSTATE; 4426 break; 4427 #endif 4428 case KVM_CAP_SYNC_REGS: 4429 r = KVM_SYNC_X86_VALID_FIELDS; 4430 break; 4431 case KVM_CAP_ADJUST_CLOCK: 4432 r = KVM_CLOCK_VALID_FLAGS; 4433 break; 4434 case KVM_CAP_X86_DISABLE_EXITS: 4435 r |= KVM_X86_DISABLE_EXITS_HLT | KVM_X86_DISABLE_EXITS_PAUSE | 4436 KVM_X86_DISABLE_EXITS_CSTATE; 4437 if(kvm_can_mwait_in_guest()) 4438 r |= KVM_X86_DISABLE_EXITS_MWAIT; 4439 break; 4440 case KVM_CAP_X86_SMM: 4441 /* SMBASE is usually relocated above 1M on modern chipsets, 4442 * and SMM handlers might indeed rely on 4G segment limits, 4443 * so do not report SMM to be available if real mode is 4444 * emulated via vm86 mode. Still, do not go to great lengths 4445 * to avoid userspace's usage of the feature, because it is a 4446 * fringe case that is not enabled except via specific settings 4447 * of the module parameters. 4448 */ 4449 r = static_call(kvm_x86_has_emulated_msr)(kvm, MSR_IA32_SMBASE); 4450 break; 4451 case KVM_CAP_NR_VCPUS: 4452 r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS); 4453 break; 4454 case KVM_CAP_MAX_VCPUS: 4455 r = KVM_MAX_VCPUS; 4456 break; 4457 case KVM_CAP_MAX_VCPU_ID: 4458 r = KVM_MAX_VCPU_IDS; 4459 break; 4460 case KVM_CAP_PV_MMU: /* obsolete */ 4461 r = 0; 4462 break; 4463 case KVM_CAP_MCE: 4464 r = KVM_MAX_MCE_BANKS; 4465 break; 4466 case KVM_CAP_XCRS: 4467 r = boot_cpu_has(X86_FEATURE_XSAVE); 4468 break; 4469 case KVM_CAP_TSC_CONTROL: 4470 case KVM_CAP_VM_TSC_CONTROL: 4471 r = kvm_caps.has_tsc_control; 4472 break; 4473 case KVM_CAP_X2APIC_API: 4474 r = KVM_X2APIC_API_VALID_FLAGS; 4475 break; 4476 case KVM_CAP_NESTED_STATE: 4477 r = kvm_x86_ops.nested_ops->get_state ? 4478 kvm_x86_ops.nested_ops->get_state(NULL, NULL, 0) : 0; 4479 break; 4480 case KVM_CAP_HYPERV_DIRECT_TLBFLUSH: 4481 r = kvm_x86_ops.enable_direct_tlbflush != NULL; 4482 break; 4483 case KVM_CAP_HYPERV_ENLIGHTENED_VMCS: 4484 r = kvm_x86_ops.nested_ops->enable_evmcs != NULL; 4485 break; 4486 case KVM_CAP_SMALLER_MAXPHYADDR: 4487 r = (int) allow_smaller_maxphyaddr; 4488 break; 4489 case KVM_CAP_STEAL_TIME: 4490 r = sched_info_on(); 4491 break; 4492 case KVM_CAP_X86_BUS_LOCK_EXIT: 4493 if (kvm_caps.has_bus_lock_exit) 4494 r = KVM_BUS_LOCK_DETECTION_OFF | 4495 KVM_BUS_LOCK_DETECTION_EXIT; 4496 else 4497 r = 0; 4498 break; 4499 case KVM_CAP_XSAVE2: { 4500 u64 guest_perm = xstate_get_guest_group_perm(); 4501 4502 r = xstate_required_size(kvm_caps.supported_xcr0 & guest_perm, false); 4503 if (r < sizeof(struct kvm_xsave)) 4504 r = sizeof(struct kvm_xsave); 4505 break; 4506 } 4507 case KVM_CAP_PMU_CAPABILITY: 4508 r = enable_pmu ? KVM_CAP_PMU_VALID_MASK : 0; 4509 break; 4510 case KVM_CAP_DISABLE_QUIRKS2: 4511 r = KVM_X86_VALID_QUIRKS; 4512 break; 4513 case KVM_CAP_X86_NOTIFY_VMEXIT: 4514 r = kvm_caps.has_notify_vmexit; 4515 break; 4516 default: 4517 break; 4518 } 4519 return r; 4520 } 4521 4522 static inline void __user *kvm_get_attr_addr(struct kvm_device_attr *attr) 4523 { 4524 void __user *uaddr = (void __user*)(unsigned long)attr->addr; 4525 4526 if ((u64)(unsigned long)uaddr != attr->addr) 4527 return ERR_PTR_USR(-EFAULT); 4528 return uaddr; 4529 } 4530 4531 static int kvm_x86_dev_get_attr(struct kvm_device_attr *attr) 4532 { 4533 u64 __user *uaddr = kvm_get_attr_addr(attr); 4534 4535 if (attr->group) 4536 return -ENXIO; 4537 4538 if (IS_ERR(uaddr)) 4539 return PTR_ERR(uaddr); 4540 4541 switch (attr->attr) { 4542 case KVM_X86_XCOMP_GUEST_SUPP: 4543 if (put_user(kvm_caps.supported_xcr0, uaddr)) 4544 return -EFAULT; 4545 return 0; 4546 default: 4547 return -ENXIO; 4548 break; 4549 } 4550 } 4551 4552 static int kvm_x86_dev_has_attr(struct kvm_device_attr *attr) 4553 { 4554 if (attr->group) 4555 return -ENXIO; 4556 4557 switch (attr->attr) { 4558 case KVM_X86_XCOMP_GUEST_SUPP: 4559 return 0; 4560 default: 4561 return -ENXIO; 4562 } 4563 } 4564 4565 long kvm_arch_dev_ioctl(struct file *filp, 4566 unsigned int ioctl, unsigned long arg) 4567 { 4568 void __user *argp = (void __user *)arg; 4569 long r; 4570 4571 switch (ioctl) { 4572 case KVM_GET_MSR_INDEX_LIST: { 4573 struct kvm_msr_list __user *user_msr_list = argp; 4574 struct kvm_msr_list msr_list; 4575 unsigned n; 4576 4577 r = -EFAULT; 4578 if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list))) 4579 goto out; 4580 n = msr_list.nmsrs; 4581 msr_list.nmsrs = num_msrs_to_save + num_emulated_msrs; 4582 if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list))) 4583 goto out; 4584 r = -E2BIG; 4585 if (n < msr_list.nmsrs) 4586 goto out; 4587 r = -EFAULT; 4588 if (copy_to_user(user_msr_list->indices, &msrs_to_save, 4589 num_msrs_to_save * sizeof(u32))) 4590 goto out; 4591 if (copy_to_user(user_msr_list->indices + num_msrs_to_save, 4592 &emulated_msrs, 4593 num_emulated_msrs * sizeof(u32))) 4594 goto out; 4595 r = 0; 4596 break; 4597 } 4598 case KVM_GET_SUPPORTED_CPUID: 4599 case KVM_GET_EMULATED_CPUID: { 4600 struct kvm_cpuid2 __user *cpuid_arg = argp; 4601 struct kvm_cpuid2 cpuid; 4602 4603 r = -EFAULT; 4604 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) 4605 goto out; 4606 4607 r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries, 4608 ioctl); 4609 if (r) 4610 goto out; 4611 4612 r = -EFAULT; 4613 if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid))) 4614 goto out; 4615 r = 0; 4616 break; 4617 } 4618 case KVM_X86_GET_MCE_CAP_SUPPORTED: 4619 r = -EFAULT; 4620 if (copy_to_user(argp, &kvm_caps.supported_mce_cap, 4621 sizeof(kvm_caps.supported_mce_cap))) 4622 goto out; 4623 r = 0; 4624 break; 4625 case KVM_GET_MSR_FEATURE_INDEX_LIST: { 4626 struct kvm_msr_list __user *user_msr_list = argp; 4627 struct kvm_msr_list msr_list; 4628 unsigned int n; 4629 4630 r = -EFAULT; 4631 if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list))) 4632 goto out; 4633 n = msr_list.nmsrs; 4634 msr_list.nmsrs = num_msr_based_features; 4635 if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list))) 4636 goto out; 4637 r = -E2BIG; 4638 if (n < msr_list.nmsrs) 4639 goto out; 4640 r = -EFAULT; 4641 if (copy_to_user(user_msr_list->indices, &msr_based_features, 4642 num_msr_based_features * sizeof(u32))) 4643 goto out; 4644 r = 0; 4645 break; 4646 } 4647 case KVM_GET_MSRS: 4648 r = msr_io(NULL, argp, do_get_msr_feature, 1); 4649 break; 4650 case KVM_GET_SUPPORTED_HV_CPUID: 4651 r = kvm_ioctl_get_supported_hv_cpuid(NULL, argp); 4652 break; 4653 case KVM_GET_DEVICE_ATTR: { 4654 struct kvm_device_attr attr; 4655 r = -EFAULT; 4656 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 4657 break; 4658 r = kvm_x86_dev_get_attr(&attr); 4659 break; 4660 } 4661 case KVM_HAS_DEVICE_ATTR: { 4662 struct kvm_device_attr attr; 4663 r = -EFAULT; 4664 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 4665 break; 4666 r = kvm_x86_dev_has_attr(&attr); 4667 break; 4668 } 4669 default: 4670 r = -EINVAL; 4671 break; 4672 } 4673 out: 4674 return r; 4675 } 4676 4677 static void wbinvd_ipi(void *garbage) 4678 { 4679 wbinvd(); 4680 } 4681 4682 static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu) 4683 { 4684 return kvm_arch_has_noncoherent_dma(vcpu->kvm); 4685 } 4686 4687 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 4688 { 4689 /* Address WBINVD may be executed by guest */ 4690 if (need_emulate_wbinvd(vcpu)) { 4691 if (static_call(kvm_x86_has_wbinvd_exit)()) 4692 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); 4693 else if (vcpu->cpu != -1 && vcpu->cpu != cpu) 4694 smp_call_function_single(vcpu->cpu, 4695 wbinvd_ipi, NULL, 1); 4696 } 4697 4698 static_call(kvm_x86_vcpu_load)(vcpu, cpu); 4699 4700 /* Save host pkru register if supported */ 4701 vcpu->arch.host_pkru = read_pkru(); 4702 4703 /* Apply any externally detected TSC adjustments (due to suspend) */ 4704 if (unlikely(vcpu->arch.tsc_offset_adjustment)) { 4705 adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment); 4706 vcpu->arch.tsc_offset_adjustment = 0; 4707 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 4708 } 4709 4710 if (unlikely(vcpu->cpu != cpu) || kvm_check_tsc_unstable()) { 4711 s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 : 4712 rdtsc() - vcpu->arch.last_host_tsc; 4713 if (tsc_delta < 0) 4714 mark_tsc_unstable("KVM discovered backwards TSC"); 4715 4716 if (kvm_check_tsc_unstable()) { 4717 u64 offset = kvm_compute_l1_tsc_offset(vcpu, 4718 vcpu->arch.last_guest_tsc); 4719 kvm_vcpu_write_tsc_offset(vcpu, offset); 4720 vcpu->arch.tsc_catchup = 1; 4721 } 4722 4723 if (kvm_lapic_hv_timer_in_use(vcpu)) 4724 kvm_lapic_restart_hv_timer(vcpu); 4725 4726 /* 4727 * On a host with synchronized TSC, there is no need to update 4728 * kvmclock on vcpu->cpu migration 4729 */ 4730 if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1) 4731 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu); 4732 if (vcpu->cpu != cpu) 4733 kvm_make_request(KVM_REQ_MIGRATE_TIMER, vcpu); 4734 vcpu->cpu = cpu; 4735 } 4736 4737 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); 4738 } 4739 4740 static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) 4741 { 4742 struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache; 4743 struct kvm_steal_time __user *st; 4744 struct kvm_memslots *slots; 4745 static const u8 preempted = KVM_VCPU_PREEMPTED; 4746 gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS; 4747 4748 /* 4749 * The vCPU can be marked preempted if and only if the VM-Exit was on 4750 * an instruction boundary and will not trigger guest emulation of any 4751 * kind (see vcpu_run). Vendor specific code controls (conservatively) 4752 * when this is true, for example allowing the vCPU to be marked 4753 * preempted if and only if the VM-Exit was due to a host interrupt. 4754 */ 4755 if (!vcpu->arch.at_instruction_boundary) { 4756 vcpu->stat.preemption_other++; 4757 return; 4758 } 4759 4760 vcpu->stat.preemption_reported++; 4761 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) 4762 return; 4763 4764 if (vcpu->arch.st.preempted) 4765 return; 4766 4767 /* This happens on process exit */ 4768 if (unlikely(current->mm != vcpu->kvm->mm)) 4769 return; 4770 4771 slots = kvm_memslots(vcpu->kvm); 4772 4773 if (unlikely(slots->generation != ghc->generation || 4774 gpa != ghc->gpa || 4775 kvm_is_error_hva(ghc->hva) || !ghc->memslot)) 4776 return; 4777 4778 st = (struct kvm_steal_time __user *)ghc->hva; 4779 BUILD_BUG_ON(sizeof(st->preempted) != sizeof(preempted)); 4780 4781 if (!copy_to_user_nofault(&st->preempted, &preempted, sizeof(preempted))) 4782 vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED; 4783 4784 mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa)); 4785 } 4786 4787 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 4788 { 4789 int idx; 4790 4791 if (vcpu->preempted) { 4792 if (!vcpu->arch.guest_state_protected) 4793 vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu); 4794 4795 /* 4796 * Take the srcu lock as memslots will be accessed to check the gfn 4797 * cache generation against the memslots generation. 4798 */ 4799 idx = srcu_read_lock(&vcpu->kvm->srcu); 4800 if (kvm_xen_msr_enabled(vcpu->kvm)) 4801 kvm_xen_runstate_set_preempted(vcpu); 4802 else 4803 kvm_steal_time_set_preempted(vcpu); 4804 srcu_read_unlock(&vcpu->kvm->srcu, idx); 4805 } 4806 4807 static_call(kvm_x86_vcpu_put)(vcpu); 4808 vcpu->arch.last_host_tsc = rdtsc(); 4809 } 4810 4811 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, 4812 struct kvm_lapic_state *s) 4813 { 4814 static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); 4815 4816 return kvm_apic_get_state(vcpu, s); 4817 } 4818 4819 static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu, 4820 struct kvm_lapic_state *s) 4821 { 4822 int r; 4823 4824 r = kvm_apic_set_state(vcpu, s); 4825 if (r) 4826 return r; 4827 update_cr8_intercept(vcpu); 4828 4829 return 0; 4830 } 4831 4832 static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu) 4833 { 4834 /* 4835 * We can accept userspace's request for interrupt injection 4836 * as long as we have a place to store the interrupt number. 4837 * The actual injection will happen when the CPU is able to 4838 * deliver the interrupt. 4839 */ 4840 if (kvm_cpu_has_extint(vcpu)) 4841 return false; 4842 4843 /* Acknowledging ExtINT does not happen if LINT0 is masked. */ 4844 return (!lapic_in_kernel(vcpu) || 4845 kvm_apic_accept_pic_intr(vcpu)); 4846 } 4847 4848 static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu) 4849 { 4850 /* 4851 * Do not cause an interrupt window exit if an exception 4852 * is pending or an event needs reinjection; userspace 4853 * might want to inject the interrupt manually using KVM_SET_REGS 4854 * or KVM_SET_SREGS. For that to work, we must be at an 4855 * instruction boundary and with no events half-injected. 4856 */ 4857 return (kvm_arch_interrupt_allowed(vcpu) && 4858 kvm_cpu_accept_dm_intr(vcpu) && 4859 !kvm_event_needs_reinjection(vcpu) && 4860 !kvm_is_exception_pending(vcpu)); 4861 } 4862 4863 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, 4864 struct kvm_interrupt *irq) 4865 { 4866 if (irq->irq >= KVM_NR_INTERRUPTS) 4867 return -EINVAL; 4868 4869 if (!irqchip_in_kernel(vcpu->kvm)) { 4870 kvm_queue_interrupt(vcpu, irq->irq, false); 4871 kvm_make_request(KVM_REQ_EVENT, vcpu); 4872 return 0; 4873 } 4874 4875 /* 4876 * With in-kernel LAPIC, we only use this to inject EXTINT, so 4877 * fail for in-kernel 8259. 4878 */ 4879 if (pic_in_kernel(vcpu->kvm)) 4880 return -ENXIO; 4881 4882 if (vcpu->arch.pending_external_vector != -1) 4883 return -EEXIST; 4884 4885 vcpu->arch.pending_external_vector = irq->irq; 4886 kvm_make_request(KVM_REQ_EVENT, vcpu); 4887 return 0; 4888 } 4889 4890 static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu) 4891 { 4892 kvm_inject_nmi(vcpu); 4893 4894 return 0; 4895 } 4896 4897 static int kvm_vcpu_ioctl_smi(struct kvm_vcpu *vcpu) 4898 { 4899 kvm_make_request(KVM_REQ_SMI, vcpu); 4900 4901 return 0; 4902 } 4903 4904 static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu, 4905 struct kvm_tpr_access_ctl *tac) 4906 { 4907 if (tac->flags) 4908 return -EINVAL; 4909 vcpu->arch.tpr_access_reporting = !!tac->enabled; 4910 return 0; 4911 } 4912 4913 static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu, 4914 u64 mcg_cap) 4915 { 4916 int r; 4917 unsigned bank_num = mcg_cap & 0xff, bank; 4918 4919 r = -EINVAL; 4920 if (!bank_num || bank_num > KVM_MAX_MCE_BANKS) 4921 goto out; 4922 if (mcg_cap & ~(kvm_caps.supported_mce_cap | 0xff | 0xff0000)) 4923 goto out; 4924 r = 0; 4925 vcpu->arch.mcg_cap = mcg_cap; 4926 /* Init IA32_MCG_CTL to all 1s */ 4927 if (mcg_cap & MCG_CTL_P) 4928 vcpu->arch.mcg_ctl = ~(u64)0; 4929 /* Init IA32_MCi_CTL to all 1s, IA32_MCi_CTL2 to all 0s */ 4930 for (bank = 0; bank < bank_num; bank++) { 4931 vcpu->arch.mce_banks[bank*4] = ~(u64)0; 4932 if (mcg_cap & MCG_CMCI_P) 4933 vcpu->arch.mci_ctl2_banks[bank] = 0; 4934 } 4935 4936 kvm_apic_after_set_mcg_cap(vcpu); 4937 4938 static_call(kvm_x86_setup_mce)(vcpu); 4939 out: 4940 return r; 4941 } 4942 4943 /* 4944 * Validate this is an UCNA (uncorrectable no action) error by checking the 4945 * MCG_STATUS and MCi_STATUS registers: 4946 * - none of the bits for Machine Check Exceptions are set 4947 * - both the VAL (valid) and UC (uncorrectable) bits are set 4948 * MCI_STATUS_PCC - Processor Context Corrupted 4949 * MCI_STATUS_S - Signaled as a Machine Check Exception 4950 * MCI_STATUS_AR - Software recoverable Action Required 4951 */ 4952 static bool is_ucna(struct kvm_x86_mce *mce) 4953 { 4954 return !mce->mcg_status && 4955 !(mce->status & (MCI_STATUS_PCC | MCI_STATUS_S | MCI_STATUS_AR)) && 4956 (mce->status & MCI_STATUS_VAL) && 4957 (mce->status & MCI_STATUS_UC); 4958 } 4959 4960 static int kvm_vcpu_x86_set_ucna(struct kvm_vcpu *vcpu, struct kvm_x86_mce *mce, u64* banks) 4961 { 4962 u64 mcg_cap = vcpu->arch.mcg_cap; 4963 4964 banks[1] = mce->status; 4965 banks[2] = mce->addr; 4966 banks[3] = mce->misc; 4967 vcpu->arch.mcg_status = mce->mcg_status; 4968 4969 if (!(mcg_cap & MCG_CMCI_P) || 4970 !(vcpu->arch.mci_ctl2_banks[mce->bank] & MCI_CTL2_CMCI_EN)) 4971 return 0; 4972 4973 if (lapic_in_kernel(vcpu)) 4974 kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTCMCI); 4975 4976 return 0; 4977 } 4978 4979 static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu, 4980 struct kvm_x86_mce *mce) 4981 { 4982 u64 mcg_cap = vcpu->arch.mcg_cap; 4983 unsigned bank_num = mcg_cap & 0xff; 4984 u64 *banks = vcpu->arch.mce_banks; 4985 4986 if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL)) 4987 return -EINVAL; 4988 4989 banks += array_index_nospec(4 * mce->bank, 4 * bank_num); 4990 4991 if (is_ucna(mce)) 4992 return kvm_vcpu_x86_set_ucna(vcpu, mce, banks); 4993 4994 /* 4995 * if IA32_MCG_CTL is not all 1s, the uncorrected error 4996 * reporting is disabled 4997 */ 4998 if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) && 4999 vcpu->arch.mcg_ctl != ~(u64)0) 5000 return 0; 5001 /* 5002 * if IA32_MCi_CTL is not all 1s, the uncorrected error 5003 * reporting is disabled for the bank 5004 */ 5005 if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0) 5006 return 0; 5007 if (mce->status & MCI_STATUS_UC) { 5008 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) || 5009 !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) { 5010 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 5011 return 0; 5012 } 5013 if (banks[1] & MCI_STATUS_VAL) 5014 mce->status |= MCI_STATUS_OVER; 5015 banks[2] = mce->addr; 5016 banks[3] = mce->misc; 5017 vcpu->arch.mcg_status = mce->mcg_status; 5018 banks[1] = mce->status; 5019 kvm_queue_exception(vcpu, MC_VECTOR); 5020 } else if (!(banks[1] & MCI_STATUS_VAL) 5021 || !(banks[1] & MCI_STATUS_UC)) { 5022 if (banks[1] & MCI_STATUS_VAL) 5023 mce->status |= MCI_STATUS_OVER; 5024 banks[2] = mce->addr; 5025 banks[3] = mce->misc; 5026 banks[1] = mce->status; 5027 } else 5028 banks[1] |= MCI_STATUS_OVER; 5029 return 0; 5030 } 5031 5032 static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, 5033 struct kvm_vcpu_events *events) 5034 { 5035 struct kvm_queued_exception *ex; 5036 5037 process_nmi(vcpu); 5038 5039 if (kvm_check_request(KVM_REQ_SMI, vcpu)) 5040 process_smi(vcpu); 5041 5042 /* 5043 * KVM's ABI only allows for one exception to be migrated. Luckily, 5044 * the only time there can be two queued exceptions is if there's a 5045 * non-exiting _injected_ exception, and a pending exiting exception. 5046 * In that case, ignore the VM-Exiting exception as it's an extension 5047 * of the injected exception. 5048 */ 5049 if (vcpu->arch.exception_vmexit.pending && 5050 !vcpu->arch.exception.pending && 5051 !vcpu->arch.exception.injected) 5052 ex = &vcpu->arch.exception_vmexit; 5053 else 5054 ex = &vcpu->arch.exception; 5055 5056 /* 5057 * In guest mode, payload delivery should be deferred if the exception 5058 * will be intercepted by L1, e.g. KVM should not modifying CR2 if L1 5059 * intercepts #PF, ditto for DR6 and #DBs. If the per-VM capability, 5060 * KVM_CAP_EXCEPTION_PAYLOAD, is not set, userspace may or may not 5061 * propagate the payload and so it cannot be safely deferred. Deliver 5062 * the payload if the capability hasn't been requested. 5063 */ 5064 if (!vcpu->kvm->arch.exception_payload_enabled && 5065 ex->pending && ex->has_payload) 5066 kvm_deliver_exception_payload(vcpu, ex); 5067 5068 /* 5069 * The API doesn't provide the instruction length for software 5070 * exceptions, so don't report them. As long as the guest RIP 5071 * isn't advanced, we should expect to encounter the exception 5072 * again. 5073 */ 5074 if (kvm_exception_is_soft(ex->vector)) { 5075 events->exception.injected = 0; 5076 events->exception.pending = 0; 5077 } else { 5078 events->exception.injected = ex->injected; 5079 events->exception.pending = ex->pending; 5080 /* 5081 * For ABI compatibility, deliberately conflate 5082 * pending and injected exceptions when 5083 * KVM_CAP_EXCEPTION_PAYLOAD isn't enabled. 5084 */ 5085 if (!vcpu->kvm->arch.exception_payload_enabled) 5086 events->exception.injected |= ex->pending; 5087 } 5088 events->exception.nr = ex->vector; 5089 events->exception.has_error_code = ex->has_error_code; 5090 events->exception.error_code = ex->error_code; 5091 events->exception_has_payload = ex->has_payload; 5092 events->exception_payload = ex->payload; 5093 5094 events->interrupt.injected = 5095 vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft; 5096 events->interrupt.nr = vcpu->arch.interrupt.nr; 5097 events->interrupt.soft = 0; 5098 events->interrupt.shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu); 5099 5100 events->nmi.injected = vcpu->arch.nmi_injected; 5101 events->nmi.pending = vcpu->arch.nmi_pending != 0; 5102 events->nmi.masked = static_call(kvm_x86_get_nmi_mask)(vcpu); 5103 events->nmi.pad = 0; 5104 5105 events->sipi_vector = 0; /* never valid when reporting to user space */ 5106 5107 events->smi.smm = is_smm(vcpu); 5108 events->smi.pending = vcpu->arch.smi_pending; 5109 events->smi.smm_inside_nmi = 5110 !!(vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK); 5111 events->smi.latched_init = kvm_lapic_latched_init(vcpu); 5112 5113 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING 5114 | KVM_VCPUEVENT_VALID_SHADOW 5115 | KVM_VCPUEVENT_VALID_SMM); 5116 if (vcpu->kvm->arch.exception_payload_enabled) 5117 events->flags |= KVM_VCPUEVENT_VALID_PAYLOAD; 5118 if (vcpu->kvm->arch.triple_fault_event) { 5119 events->triple_fault.pending = kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu); 5120 events->flags |= KVM_VCPUEVENT_VALID_TRIPLE_FAULT; 5121 } 5122 5123 memset(&events->reserved, 0, sizeof(events->reserved)); 5124 } 5125 5126 static void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm); 5127 5128 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, 5129 struct kvm_vcpu_events *events) 5130 { 5131 if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING 5132 | KVM_VCPUEVENT_VALID_SIPI_VECTOR 5133 | KVM_VCPUEVENT_VALID_SHADOW 5134 | KVM_VCPUEVENT_VALID_SMM 5135 | KVM_VCPUEVENT_VALID_PAYLOAD 5136 | KVM_VCPUEVENT_VALID_TRIPLE_FAULT)) 5137 return -EINVAL; 5138 5139 if (events->flags & KVM_VCPUEVENT_VALID_PAYLOAD) { 5140 if (!vcpu->kvm->arch.exception_payload_enabled) 5141 return -EINVAL; 5142 if (events->exception.pending) 5143 events->exception.injected = 0; 5144 else 5145 events->exception_has_payload = 0; 5146 } else { 5147 events->exception.pending = 0; 5148 events->exception_has_payload = 0; 5149 } 5150 5151 if ((events->exception.injected || events->exception.pending) && 5152 (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR)) 5153 return -EINVAL; 5154 5155 /* INITs are latched while in SMM */ 5156 if (events->flags & KVM_VCPUEVENT_VALID_SMM && 5157 (events->smi.smm || events->smi.pending) && 5158 vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) 5159 return -EINVAL; 5160 5161 process_nmi(vcpu); 5162 5163 /* 5164 * Flag that userspace is stuffing an exception, the next KVM_RUN will 5165 * morph the exception to a VM-Exit if appropriate. Do this only for 5166 * pending exceptions, already-injected exceptions are not subject to 5167 * intercpetion. Note, userspace that conflates pending and injected 5168 * is hosed, and will incorrectly convert an injected exception into a 5169 * pending exception, which in turn may cause a spurious VM-Exit. 5170 */ 5171 vcpu->arch.exception_from_userspace = events->exception.pending; 5172 5173 vcpu->arch.exception_vmexit.pending = false; 5174 5175 vcpu->arch.exception.injected = events->exception.injected; 5176 vcpu->arch.exception.pending = events->exception.pending; 5177 vcpu->arch.exception.vector = events->exception.nr; 5178 vcpu->arch.exception.has_error_code = events->exception.has_error_code; 5179 vcpu->arch.exception.error_code = events->exception.error_code; 5180 vcpu->arch.exception.has_payload = events->exception_has_payload; 5181 vcpu->arch.exception.payload = events->exception_payload; 5182 5183 vcpu->arch.interrupt.injected = events->interrupt.injected; 5184 vcpu->arch.interrupt.nr = events->interrupt.nr; 5185 vcpu->arch.interrupt.soft = events->interrupt.soft; 5186 if (events->flags & KVM_VCPUEVENT_VALID_SHADOW) 5187 static_call(kvm_x86_set_interrupt_shadow)(vcpu, 5188 events->interrupt.shadow); 5189 5190 vcpu->arch.nmi_injected = events->nmi.injected; 5191 if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) 5192 vcpu->arch.nmi_pending = events->nmi.pending; 5193 static_call(kvm_x86_set_nmi_mask)(vcpu, events->nmi.masked); 5194 5195 if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR && 5196 lapic_in_kernel(vcpu)) 5197 vcpu->arch.apic->sipi_vector = events->sipi_vector; 5198 5199 if (events->flags & KVM_VCPUEVENT_VALID_SMM) { 5200 if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) { 5201 kvm_x86_ops.nested_ops->leave_nested(vcpu); 5202 kvm_smm_changed(vcpu, events->smi.smm); 5203 } 5204 5205 vcpu->arch.smi_pending = events->smi.pending; 5206 5207 if (events->smi.smm) { 5208 if (events->smi.smm_inside_nmi) 5209 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; 5210 else 5211 vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK; 5212 } 5213 5214 if (lapic_in_kernel(vcpu)) { 5215 if (events->smi.latched_init) 5216 set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); 5217 else 5218 clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); 5219 } 5220 } 5221 5222 if (events->flags & KVM_VCPUEVENT_VALID_TRIPLE_FAULT) { 5223 if (!vcpu->kvm->arch.triple_fault_event) 5224 return -EINVAL; 5225 if (events->triple_fault.pending) 5226 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 5227 else 5228 kvm_clear_request(KVM_REQ_TRIPLE_FAULT, vcpu); 5229 } 5230 5231 kvm_make_request(KVM_REQ_EVENT, vcpu); 5232 5233 return 0; 5234 } 5235 5236 static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu, 5237 struct kvm_debugregs *dbgregs) 5238 { 5239 unsigned long val; 5240 5241 memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db)); 5242 kvm_get_dr(vcpu, 6, &val); 5243 dbgregs->dr6 = val; 5244 dbgregs->dr7 = vcpu->arch.dr7; 5245 dbgregs->flags = 0; 5246 memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved)); 5247 } 5248 5249 static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, 5250 struct kvm_debugregs *dbgregs) 5251 { 5252 if (dbgregs->flags) 5253 return -EINVAL; 5254 5255 if (!kvm_dr6_valid(dbgregs->dr6)) 5256 return -EINVAL; 5257 if (!kvm_dr7_valid(dbgregs->dr7)) 5258 return -EINVAL; 5259 5260 memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); 5261 kvm_update_dr0123(vcpu); 5262 vcpu->arch.dr6 = dbgregs->dr6; 5263 vcpu->arch.dr7 = dbgregs->dr7; 5264 kvm_update_dr7(vcpu); 5265 5266 return 0; 5267 } 5268 5269 static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, 5270 struct kvm_xsave *guest_xsave) 5271 { 5272 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) 5273 return; 5274 5275 fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu, 5276 guest_xsave->region, 5277 sizeof(guest_xsave->region), 5278 vcpu->arch.pkru); 5279 } 5280 5281 static void kvm_vcpu_ioctl_x86_get_xsave2(struct kvm_vcpu *vcpu, 5282 u8 *state, unsigned int size) 5283 { 5284 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) 5285 return; 5286 5287 fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu, 5288 state, size, vcpu->arch.pkru); 5289 } 5290 5291 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, 5292 struct kvm_xsave *guest_xsave) 5293 { 5294 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) 5295 return 0; 5296 5297 return fpu_copy_uabi_to_guest_fpstate(&vcpu->arch.guest_fpu, 5298 guest_xsave->region, 5299 kvm_caps.supported_xcr0, 5300 &vcpu->arch.pkru); 5301 } 5302 5303 static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu, 5304 struct kvm_xcrs *guest_xcrs) 5305 { 5306 if (!boot_cpu_has(X86_FEATURE_XSAVE)) { 5307 guest_xcrs->nr_xcrs = 0; 5308 return; 5309 } 5310 5311 guest_xcrs->nr_xcrs = 1; 5312 guest_xcrs->flags = 0; 5313 guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK; 5314 guest_xcrs->xcrs[0].value = vcpu->arch.xcr0; 5315 } 5316 5317 static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu, 5318 struct kvm_xcrs *guest_xcrs) 5319 { 5320 int i, r = 0; 5321 5322 if (!boot_cpu_has(X86_FEATURE_XSAVE)) 5323 return -EINVAL; 5324 5325 if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags) 5326 return -EINVAL; 5327 5328 for (i = 0; i < guest_xcrs->nr_xcrs; i++) 5329 /* Only support XCR0 currently */ 5330 if (guest_xcrs->xcrs[i].xcr == XCR_XFEATURE_ENABLED_MASK) { 5331 r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK, 5332 guest_xcrs->xcrs[i].value); 5333 break; 5334 } 5335 if (r) 5336 r = -EINVAL; 5337 return r; 5338 } 5339 5340 /* 5341 * kvm_set_guest_paused() indicates to the guest kernel that it has been 5342 * stopped by the hypervisor. This function will be called from the host only. 5343 * EINVAL is returned when the host attempts to set the flag for a guest that 5344 * does not support pv clocks. 5345 */ 5346 static int kvm_set_guest_paused(struct kvm_vcpu *vcpu) 5347 { 5348 if (!vcpu->arch.pv_time.active) 5349 return -EINVAL; 5350 vcpu->arch.pvclock_set_guest_stopped_request = true; 5351 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 5352 return 0; 5353 } 5354 5355 static int kvm_arch_tsc_has_attr(struct kvm_vcpu *vcpu, 5356 struct kvm_device_attr *attr) 5357 { 5358 int r; 5359 5360 switch (attr->attr) { 5361 case KVM_VCPU_TSC_OFFSET: 5362 r = 0; 5363 break; 5364 default: 5365 r = -ENXIO; 5366 } 5367 5368 return r; 5369 } 5370 5371 static int kvm_arch_tsc_get_attr(struct kvm_vcpu *vcpu, 5372 struct kvm_device_attr *attr) 5373 { 5374 u64 __user *uaddr = kvm_get_attr_addr(attr); 5375 int r; 5376 5377 if (IS_ERR(uaddr)) 5378 return PTR_ERR(uaddr); 5379 5380 switch (attr->attr) { 5381 case KVM_VCPU_TSC_OFFSET: 5382 r = -EFAULT; 5383 if (put_user(vcpu->arch.l1_tsc_offset, uaddr)) 5384 break; 5385 r = 0; 5386 break; 5387 default: 5388 r = -ENXIO; 5389 } 5390 5391 return r; 5392 } 5393 5394 static int kvm_arch_tsc_set_attr(struct kvm_vcpu *vcpu, 5395 struct kvm_device_attr *attr) 5396 { 5397 u64 __user *uaddr = kvm_get_attr_addr(attr); 5398 struct kvm *kvm = vcpu->kvm; 5399 int r; 5400 5401 if (IS_ERR(uaddr)) 5402 return PTR_ERR(uaddr); 5403 5404 switch (attr->attr) { 5405 case KVM_VCPU_TSC_OFFSET: { 5406 u64 offset, tsc, ns; 5407 unsigned long flags; 5408 bool matched; 5409 5410 r = -EFAULT; 5411 if (get_user(offset, uaddr)) 5412 break; 5413 5414 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); 5415 5416 matched = (vcpu->arch.virtual_tsc_khz && 5417 kvm->arch.last_tsc_khz == vcpu->arch.virtual_tsc_khz && 5418 kvm->arch.last_tsc_offset == offset); 5419 5420 tsc = kvm_scale_tsc(rdtsc(), vcpu->arch.l1_tsc_scaling_ratio) + offset; 5421 ns = get_kvmclock_base_ns(); 5422 5423 __kvm_synchronize_tsc(vcpu, offset, tsc, ns, matched); 5424 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); 5425 5426 r = 0; 5427 break; 5428 } 5429 default: 5430 r = -ENXIO; 5431 } 5432 5433 return r; 5434 } 5435 5436 static int kvm_vcpu_ioctl_device_attr(struct kvm_vcpu *vcpu, 5437 unsigned int ioctl, 5438 void __user *argp) 5439 { 5440 struct kvm_device_attr attr; 5441 int r; 5442 5443 if (copy_from_user(&attr, argp, sizeof(attr))) 5444 return -EFAULT; 5445 5446 if (attr.group != KVM_VCPU_TSC_CTRL) 5447 return -ENXIO; 5448 5449 switch (ioctl) { 5450 case KVM_HAS_DEVICE_ATTR: 5451 r = kvm_arch_tsc_has_attr(vcpu, &attr); 5452 break; 5453 case KVM_GET_DEVICE_ATTR: 5454 r = kvm_arch_tsc_get_attr(vcpu, &attr); 5455 break; 5456 case KVM_SET_DEVICE_ATTR: 5457 r = kvm_arch_tsc_set_attr(vcpu, &attr); 5458 break; 5459 } 5460 5461 return r; 5462 } 5463 5464 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, 5465 struct kvm_enable_cap *cap) 5466 { 5467 int r; 5468 uint16_t vmcs_version; 5469 void __user *user_ptr; 5470 5471 if (cap->flags) 5472 return -EINVAL; 5473 5474 switch (cap->cap) { 5475 case KVM_CAP_HYPERV_SYNIC2: 5476 if (cap->args[0]) 5477 return -EINVAL; 5478 fallthrough; 5479 5480 case KVM_CAP_HYPERV_SYNIC: 5481 if (!irqchip_in_kernel(vcpu->kvm)) 5482 return -EINVAL; 5483 return kvm_hv_activate_synic(vcpu, cap->cap == 5484 KVM_CAP_HYPERV_SYNIC2); 5485 case KVM_CAP_HYPERV_ENLIGHTENED_VMCS: 5486 if (!kvm_x86_ops.nested_ops->enable_evmcs) 5487 return -ENOTTY; 5488 r = kvm_x86_ops.nested_ops->enable_evmcs(vcpu, &vmcs_version); 5489 if (!r) { 5490 user_ptr = (void __user *)(uintptr_t)cap->args[0]; 5491 if (copy_to_user(user_ptr, &vmcs_version, 5492 sizeof(vmcs_version))) 5493 r = -EFAULT; 5494 } 5495 return r; 5496 case KVM_CAP_HYPERV_DIRECT_TLBFLUSH: 5497 if (!kvm_x86_ops.enable_direct_tlbflush) 5498 return -ENOTTY; 5499 5500 return static_call(kvm_x86_enable_direct_tlbflush)(vcpu); 5501 5502 case KVM_CAP_HYPERV_ENFORCE_CPUID: 5503 return kvm_hv_set_enforce_cpuid(vcpu, cap->args[0]); 5504 5505 case KVM_CAP_ENFORCE_PV_FEATURE_CPUID: 5506 vcpu->arch.pv_cpuid.enforce = cap->args[0]; 5507 if (vcpu->arch.pv_cpuid.enforce) 5508 kvm_update_pv_runtime(vcpu); 5509 5510 return 0; 5511 default: 5512 return -EINVAL; 5513 } 5514 } 5515 5516 long kvm_arch_vcpu_ioctl(struct file *filp, 5517 unsigned int ioctl, unsigned long arg) 5518 { 5519 struct kvm_vcpu *vcpu = filp->private_data; 5520 void __user *argp = (void __user *)arg; 5521 int r; 5522 union { 5523 struct kvm_sregs2 *sregs2; 5524 struct kvm_lapic_state *lapic; 5525 struct kvm_xsave *xsave; 5526 struct kvm_xcrs *xcrs; 5527 void *buffer; 5528 } u; 5529 5530 vcpu_load(vcpu); 5531 5532 u.buffer = NULL; 5533 switch (ioctl) { 5534 case KVM_GET_LAPIC: { 5535 r = -EINVAL; 5536 if (!lapic_in_kernel(vcpu)) 5537 goto out; 5538 u.lapic = kzalloc(sizeof(struct kvm_lapic_state), 5539 GFP_KERNEL_ACCOUNT); 5540 5541 r = -ENOMEM; 5542 if (!u.lapic) 5543 goto out; 5544 r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic); 5545 if (r) 5546 goto out; 5547 r = -EFAULT; 5548 if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state))) 5549 goto out; 5550 r = 0; 5551 break; 5552 } 5553 case KVM_SET_LAPIC: { 5554 r = -EINVAL; 5555 if (!lapic_in_kernel(vcpu)) 5556 goto out; 5557 u.lapic = memdup_user(argp, sizeof(*u.lapic)); 5558 if (IS_ERR(u.lapic)) { 5559 r = PTR_ERR(u.lapic); 5560 goto out_nofree; 5561 } 5562 5563 r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic); 5564 break; 5565 } 5566 case KVM_INTERRUPT: { 5567 struct kvm_interrupt irq; 5568 5569 r = -EFAULT; 5570 if (copy_from_user(&irq, argp, sizeof(irq))) 5571 goto out; 5572 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); 5573 break; 5574 } 5575 case KVM_NMI: { 5576 r = kvm_vcpu_ioctl_nmi(vcpu); 5577 break; 5578 } 5579 case KVM_SMI: { 5580 r = kvm_vcpu_ioctl_smi(vcpu); 5581 break; 5582 } 5583 case KVM_SET_CPUID: { 5584 struct kvm_cpuid __user *cpuid_arg = argp; 5585 struct kvm_cpuid cpuid; 5586 5587 r = -EFAULT; 5588 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) 5589 goto out; 5590 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries); 5591 break; 5592 } 5593 case KVM_SET_CPUID2: { 5594 struct kvm_cpuid2 __user *cpuid_arg = argp; 5595 struct kvm_cpuid2 cpuid; 5596 5597 r = -EFAULT; 5598 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) 5599 goto out; 5600 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid, 5601 cpuid_arg->entries); 5602 break; 5603 } 5604 case KVM_GET_CPUID2: { 5605 struct kvm_cpuid2 __user *cpuid_arg = argp; 5606 struct kvm_cpuid2 cpuid; 5607 5608 r = -EFAULT; 5609 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) 5610 goto out; 5611 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid, 5612 cpuid_arg->entries); 5613 if (r) 5614 goto out; 5615 r = -EFAULT; 5616 if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid))) 5617 goto out; 5618 r = 0; 5619 break; 5620 } 5621 case KVM_GET_MSRS: { 5622 int idx = srcu_read_lock(&vcpu->kvm->srcu); 5623 r = msr_io(vcpu, argp, do_get_msr, 1); 5624 srcu_read_unlock(&vcpu->kvm->srcu, idx); 5625 break; 5626 } 5627 case KVM_SET_MSRS: { 5628 int idx = srcu_read_lock(&vcpu->kvm->srcu); 5629 r = msr_io(vcpu, argp, do_set_msr, 0); 5630 srcu_read_unlock(&vcpu->kvm->srcu, idx); 5631 break; 5632 } 5633 case KVM_TPR_ACCESS_REPORTING: { 5634 struct kvm_tpr_access_ctl tac; 5635 5636 r = -EFAULT; 5637 if (copy_from_user(&tac, argp, sizeof(tac))) 5638 goto out; 5639 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac); 5640 if (r) 5641 goto out; 5642 r = -EFAULT; 5643 if (copy_to_user(argp, &tac, sizeof(tac))) 5644 goto out; 5645 r = 0; 5646 break; 5647 }; 5648 case KVM_SET_VAPIC_ADDR: { 5649 struct kvm_vapic_addr va; 5650 int idx; 5651 5652 r = -EINVAL; 5653 if (!lapic_in_kernel(vcpu)) 5654 goto out; 5655 r = -EFAULT; 5656 if (copy_from_user(&va, argp, sizeof(va))) 5657 goto out; 5658 idx = srcu_read_lock(&vcpu->kvm->srcu); 5659 r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); 5660 srcu_read_unlock(&vcpu->kvm->srcu, idx); 5661 break; 5662 } 5663 case KVM_X86_SETUP_MCE: { 5664 u64 mcg_cap; 5665 5666 r = -EFAULT; 5667 if (copy_from_user(&mcg_cap, argp, sizeof(mcg_cap))) 5668 goto out; 5669 r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap); 5670 break; 5671 } 5672 case KVM_X86_SET_MCE: { 5673 struct kvm_x86_mce mce; 5674 5675 r = -EFAULT; 5676 if (copy_from_user(&mce, argp, sizeof(mce))) 5677 goto out; 5678 r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce); 5679 break; 5680 } 5681 case KVM_GET_VCPU_EVENTS: { 5682 struct kvm_vcpu_events events; 5683 5684 kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events); 5685 5686 r = -EFAULT; 5687 if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events))) 5688 break; 5689 r = 0; 5690 break; 5691 } 5692 case KVM_SET_VCPU_EVENTS: { 5693 struct kvm_vcpu_events events; 5694 5695 r = -EFAULT; 5696 if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events))) 5697 break; 5698 5699 r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events); 5700 break; 5701 } 5702 case KVM_GET_DEBUGREGS: { 5703 struct kvm_debugregs dbgregs; 5704 5705 kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs); 5706 5707 r = -EFAULT; 5708 if (copy_to_user(argp, &dbgregs, 5709 sizeof(struct kvm_debugregs))) 5710 break; 5711 r = 0; 5712 break; 5713 } 5714 case KVM_SET_DEBUGREGS: { 5715 struct kvm_debugregs dbgregs; 5716 5717 r = -EFAULT; 5718 if (copy_from_user(&dbgregs, argp, 5719 sizeof(struct kvm_debugregs))) 5720 break; 5721 5722 r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs); 5723 break; 5724 } 5725 case KVM_GET_XSAVE: { 5726 r = -EINVAL; 5727 if (vcpu->arch.guest_fpu.uabi_size > sizeof(struct kvm_xsave)) 5728 break; 5729 5730 u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL_ACCOUNT); 5731 r = -ENOMEM; 5732 if (!u.xsave) 5733 break; 5734 5735 kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave); 5736 5737 r = -EFAULT; 5738 if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave))) 5739 break; 5740 r = 0; 5741 break; 5742 } 5743 case KVM_SET_XSAVE: { 5744 int size = vcpu->arch.guest_fpu.uabi_size; 5745 5746 u.xsave = memdup_user(argp, size); 5747 if (IS_ERR(u.xsave)) { 5748 r = PTR_ERR(u.xsave); 5749 goto out_nofree; 5750 } 5751 5752 r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave); 5753 break; 5754 } 5755 5756 case KVM_GET_XSAVE2: { 5757 int size = vcpu->arch.guest_fpu.uabi_size; 5758 5759 u.xsave = kzalloc(size, GFP_KERNEL_ACCOUNT); 5760 r = -ENOMEM; 5761 if (!u.xsave) 5762 break; 5763 5764 kvm_vcpu_ioctl_x86_get_xsave2(vcpu, u.buffer, size); 5765 5766 r = -EFAULT; 5767 if (copy_to_user(argp, u.xsave, size)) 5768 break; 5769 5770 r = 0; 5771 break; 5772 } 5773 5774 case KVM_GET_XCRS: { 5775 u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL_ACCOUNT); 5776 r = -ENOMEM; 5777 if (!u.xcrs) 5778 break; 5779 5780 kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs); 5781 5782 r = -EFAULT; 5783 if (copy_to_user(argp, u.xcrs, 5784 sizeof(struct kvm_xcrs))) 5785 break; 5786 r = 0; 5787 break; 5788 } 5789 case KVM_SET_XCRS: { 5790 u.xcrs = memdup_user(argp, sizeof(*u.xcrs)); 5791 if (IS_ERR(u.xcrs)) { 5792 r = PTR_ERR(u.xcrs); 5793 goto out_nofree; 5794 } 5795 5796 r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs); 5797 break; 5798 } 5799 case KVM_SET_TSC_KHZ: { 5800 u32 user_tsc_khz; 5801 5802 r = -EINVAL; 5803 user_tsc_khz = (u32)arg; 5804 5805 if (kvm_caps.has_tsc_control && 5806 user_tsc_khz >= kvm_caps.max_guest_tsc_khz) 5807 goto out; 5808 5809 if (user_tsc_khz == 0) 5810 user_tsc_khz = tsc_khz; 5811 5812 if (!kvm_set_tsc_khz(vcpu, user_tsc_khz)) 5813 r = 0; 5814 5815 goto out; 5816 } 5817 case KVM_GET_TSC_KHZ: { 5818 r = vcpu->arch.virtual_tsc_khz; 5819 goto out; 5820 } 5821 case KVM_KVMCLOCK_CTRL: { 5822 r = kvm_set_guest_paused(vcpu); 5823 goto out; 5824 } 5825 case KVM_ENABLE_CAP: { 5826 struct kvm_enable_cap cap; 5827 5828 r = -EFAULT; 5829 if (copy_from_user(&cap, argp, sizeof(cap))) 5830 goto out; 5831 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); 5832 break; 5833 } 5834 case KVM_GET_NESTED_STATE: { 5835 struct kvm_nested_state __user *user_kvm_nested_state = argp; 5836 u32 user_data_size; 5837 5838 r = -EINVAL; 5839 if (!kvm_x86_ops.nested_ops->get_state) 5840 break; 5841 5842 BUILD_BUG_ON(sizeof(user_data_size) != sizeof(user_kvm_nested_state->size)); 5843 r = -EFAULT; 5844 if (get_user(user_data_size, &user_kvm_nested_state->size)) 5845 break; 5846 5847 r = kvm_x86_ops.nested_ops->get_state(vcpu, user_kvm_nested_state, 5848 user_data_size); 5849 if (r < 0) 5850 break; 5851 5852 if (r > user_data_size) { 5853 if (put_user(r, &user_kvm_nested_state->size)) 5854 r = -EFAULT; 5855 else 5856 r = -E2BIG; 5857 break; 5858 } 5859 5860 r = 0; 5861 break; 5862 } 5863 case KVM_SET_NESTED_STATE: { 5864 struct kvm_nested_state __user *user_kvm_nested_state = argp; 5865 struct kvm_nested_state kvm_state; 5866 int idx; 5867 5868 r = -EINVAL; 5869 if (!kvm_x86_ops.nested_ops->set_state) 5870 break; 5871 5872 r = -EFAULT; 5873 if (copy_from_user(&kvm_state, user_kvm_nested_state, sizeof(kvm_state))) 5874 break; 5875 5876 r = -EINVAL; 5877 if (kvm_state.size < sizeof(kvm_state)) 5878 break; 5879 5880 if (kvm_state.flags & 5881 ~(KVM_STATE_NESTED_RUN_PENDING | KVM_STATE_NESTED_GUEST_MODE 5882 | KVM_STATE_NESTED_EVMCS | KVM_STATE_NESTED_MTF_PENDING 5883 | KVM_STATE_NESTED_GIF_SET)) 5884 break; 5885 5886 /* nested_run_pending implies guest_mode. */ 5887 if ((kvm_state.flags & KVM_STATE_NESTED_RUN_PENDING) 5888 && !(kvm_state.flags & KVM_STATE_NESTED_GUEST_MODE)) 5889 break; 5890 5891 idx = srcu_read_lock(&vcpu->kvm->srcu); 5892 r = kvm_x86_ops.nested_ops->set_state(vcpu, user_kvm_nested_state, &kvm_state); 5893 srcu_read_unlock(&vcpu->kvm->srcu, idx); 5894 break; 5895 } 5896 case KVM_GET_SUPPORTED_HV_CPUID: 5897 r = kvm_ioctl_get_supported_hv_cpuid(vcpu, argp); 5898 break; 5899 #ifdef CONFIG_KVM_XEN 5900 case KVM_XEN_VCPU_GET_ATTR: { 5901 struct kvm_xen_vcpu_attr xva; 5902 5903 r = -EFAULT; 5904 if (copy_from_user(&xva, argp, sizeof(xva))) 5905 goto out; 5906 r = kvm_xen_vcpu_get_attr(vcpu, &xva); 5907 if (!r && copy_to_user(argp, &xva, sizeof(xva))) 5908 r = -EFAULT; 5909 break; 5910 } 5911 case KVM_XEN_VCPU_SET_ATTR: { 5912 struct kvm_xen_vcpu_attr xva; 5913 5914 r = -EFAULT; 5915 if (copy_from_user(&xva, argp, sizeof(xva))) 5916 goto out; 5917 r = kvm_xen_vcpu_set_attr(vcpu, &xva); 5918 break; 5919 } 5920 #endif 5921 case KVM_GET_SREGS2: { 5922 u.sregs2 = kzalloc(sizeof(struct kvm_sregs2), GFP_KERNEL); 5923 r = -ENOMEM; 5924 if (!u.sregs2) 5925 goto out; 5926 __get_sregs2(vcpu, u.sregs2); 5927 r = -EFAULT; 5928 if (copy_to_user(argp, u.sregs2, sizeof(struct kvm_sregs2))) 5929 goto out; 5930 r = 0; 5931 break; 5932 } 5933 case KVM_SET_SREGS2: { 5934 u.sregs2 = memdup_user(argp, sizeof(struct kvm_sregs2)); 5935 if (IS_ERR(u.sregs2)) { 5936 r = PTR_ERR(u.sregs2); 5937 u.sregs2 = NULL; 5938 goto out; 5939 } 5940 r = __set_sregs2(vcpu, u.sregs2); 5941 break; 5942 } 5943 case KVM_HAS_DEVICE_ATTR: 5944 case KVM_GET_DEVICE_ATTR: 5945 case KVM_SET_DEVICE_ATTR: 5946 r = kvm_vcpu_ioctl_device_attr(vcpu, ioctl, argp); 5947 break; 5948 default: 5949 r = -EINVAL; 5950 } 5951 out: 5952 kfree(u.buffer); 5953 out_nofree: 5954 vcpu_put(vcpu); 5955 return r; 5956 } 5957 5958 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) 5959 { 5960 return VM_FAULT_SIGBUS; 5961 } 5962 5963 static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr) 5964 { 5965 int ret; 5966 5967 if (addr > (unsigned int)(-3 * PAGE_SIZE)) 5968 return -EINVAL; 5969 ret = static_call(kvm_x86_set_tss_addr)(kvm, addr); 5970 return ret; 5971 } 5972 5973 static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm, 5974 u64 ident_addr) 5975 { 5976 return static_call(kvm_x86_set_identity_map_addr)(kvm, ident_addr); 5977 } 5978 5979 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, 5980 unsigned long kvm_nr_mmu_pages) 5981 { 5982 if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES) 5983 return -EINVAL; 5984 5985 mutex_lock(&kvm->slots_lock); 5986 5987 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages); 5988 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; 5989 5990 mutex_unlock(&kvm->slots_lock); 5991 return 0; 5992 } 5993 5994 static unsigned long kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm) 5995 { 5996 return kvm->arch.n_max_mmu_pages; 5997 } 5998 5999 static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) 6000 { 6001 struct kvm_pic *pic = kvm->arch.vpic; 6002 int r; 6003 6004 r = 0; 6005 switch (chip->chip_id) { 6006 case KVM_IRQCHIP_PIC_MASTER: 6007 memcpy(&chip->chip.pic, &pic->pics[0], 6008 sizeof(struct kvm_pic_state)); 6009 break; 6010 case KVM_IRQCHIP_PIC_SLAVE: 6011 memcpy(&chip->chip.pic, &pic->pics[1], 6012 sizeof(struct kvm_pic_state)); 6013 break; 6014 case KVM_IRQCHIP_IOAPIC: 6015 kvm_get_ioapic(kvm, &chip->chip.ioapic); 6016 break; 6017 default: 6018 r = -EINVAL; 6019 break; 6020 } 6021 return r; 6022 } 6023 6024 static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) 6025 { 6026 struct kvm_pic *pic = kvm->arch.vpic; 6027 int r; 6028 6029 r = 0; 6030 switch (chip->chip_id) { 6031 case KVM_IRQCHIP_PIC_MASTER: 6032 spin_lock(&pic->lock); 6033 memcpy(&pic->pics[0], &chip->chip.pic, 6034 sizeof(struct kvm_pic_state)); 6035 spin_unlock(&pic->lock); 6036 break; 6037 case KVM_IRQCHIP_PIC_SLAVE: 6038 spin_lock(&pic->lock); 6039 memcpy(&pic->pics[1], &chip->chip.pic, 6040 sizeof(struct kvm_pic_state)); 6041 spin_unlock(&pic->lock); 6042 break; 6043 case KVM_IRQCHIP_IOAPIC: 6044 kvm_set_ioapic(kvm, &chip->chip.ioapic); 6045 break; 6046 default: 6047 r = -EINVAL; 6048 break; 6049 } 6050 kvm_pic_update_irq(pic); 6051 return r; 6052 } 6053 6054 static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps) 6055 { 6056 struct kvm_kpit_state *kps = &kvm->arch.vpit->pit_state; 6057 6058 BUILD_BUG_ON(sizeof(*ps) != sizeof(kps->channels)); 6059 6060 mutex_lock(&kps->lock); 6061 memcpy(ps, &kps->channels, sizeof(*ps)); 6062 mutex_unlock(&kps->lock); 6063 return 0; 6064 } 6065 6066 static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps) 6067 { 6068 int i; 6069 struct kvm_pit *pit = kvm->arch.vpit; 6070 6071 mutex_lock(&pit->pit_state.lock); 6072 memcpy(&pit->pit_state.channels, ps, sizeof(*ps)); 6073 for (i = 0; i < 3; i++) 6074 kvm_pit_load_count(pit, i, ps->channels[i].count, 0); 6075 mutex_unlock(&pit->pit_state.lock); 6076 return 0; 6077 } 6078 6079 static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) 6080 { 6081 mutex_lock(&kvm->arch.vpit->pit_state.lock); 6082 memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels, 6083 sizeof(ps->channels)); 6084 ps->flags = kvm->arch.vpit->pit_state.flags; 6085 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 6086 memset(&ps->reserved, 0, sizeof(ps->reserved)); 6087 return 0; 6088 } 6089 6090 static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) 6091 { 6092 int start = 0; 6093 int i; 6094 u32 prev_legacy, cur_legacy; 6095 struct kvm_pit *pit = kvm->arch.vpit; 6096 6097 mutex_lock(&pit->pit_state.lock); 6098 prev_legacy = pit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY; 6099 cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY; 6100 if (!prev_legacy && cur_legacy) 6101 start = 1; 6102 memcpy(&pit->pit_state.channels, &ps->channels, 6103 sizeof(pit->pit_state.channels)); 6104 pit->pit_state.flags = ps->flags; 6105 for (i = 0; i < 3; i++) 6106 kvm_pit_load_count(pit, i, pit->pit_state.channels[i].count, 6107 start && i == 0); 6108 mutex_unlock(&pit->pit_state.lock); 6109 return 0; 6110 } 6111 6112 static int kvm_vm_ioctl_reinject(struct kvm *kvm, 6113 struct kvm_reinject_control *control) 6114 { 6115 struct kvm_pit *pit = kvm->arch.vpit; 6116 6117 /* pit->pit_state.lock was overloaded to prevent userspace from getting 6118 * an inconsistent state after running multiple KVM_REINJECT_CONTROL 6119 * ioctls in parallel. Use a separate lock if that ioctl isn't rare. 6120 */ 6121 mutex_lock(&pit->pit_state.lock); 6122 kvm_pit_set_reinject(pit, control->pit_reinject); 6123 mutex_unlock(&pit->pit_state.lock); 6124 6125 return 0; 6126 } 6127 6128 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) 6129 { 6130 6131 /* 6132 * Flush all CPUs' dirty log buffers to the dirty_bitmap. Called 6133 * before reporting dirty_bitmap to userspace. KVM flushes the buffers 6134 * on all VM-Exits, thus we only need to kick running vCPUs to force a 6135 * VM-Exit. 6136 */ 6137 struct kvm_vcpu *vcpu; 6138 unsigned long i; 6139 6140 kvm_for_each_vcpu(i, vcpu, kvm) 6141 kvm_vcpu_kick(vcpu); 6142 } 6143 6144 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, 6145 bool line_status) 6146 { 6147 if (!irqchip_in_kernel(kvm)) 6148 return -ENXIO; 6149 6150 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 6151 irq_event->irq, irq_event->level, 6152 line_status); 6153 return 0; 6154 } 6155 6156 int kvm_vm_ioctl_enable_cap(struct kvm *kvm, 6157 struct kvm_enable_cap *cap) 6158 { 6159 int r; 6160 6161 if (cap->flags) 6162 return -EINVAL; 6163 6164 switch (cap->cap) { 6165 case KVM_CAP_DISABLE_QUIRKS2: 6166 r = -EINVAL; 6167 if (cap->args[0] & ~KVM_X86_VALID_QUIRKS) 6168 break; 6169 fallthrough; 6170 case KVM_CAP_DISABLE_QUIRKS: 6171 kvm->arch.disabled_quirks = cap->args[0]; 6172 r = 0; 6173 break; 6174 case KVM_CAP_SPLIT_IRQCHIP: { 6175 mutex_lock(&kvm->lock); 6176 r = -EINVAL; 6177 if (cap->args[0] > MAX_NR_RESERVED_IOAPIC_PINS) 6178 goto split_irqchip_unlock; 6179 r = -EEXIST; 6180 if (irqchip_in_kernel(kvm)) 6181 goto split_irqchip_unlock; 6182 if (kvm->created_vcpus) 6183 goto split_irqchip_unlock; 6184 r = kvm_setup_empty_irq_routing(kvm); 6185 if (r) 6186 goto split_irqchip_unlock; 6187 /* Pairs with irqchip_in_kernel. */ 6188 smp_wmb(); 6189 kvm->arch.irqchip_mode = KVM_IRQCHIP_SPLIT; 6190 kvm->arch.nr_reserved_ioapic_pins = cap->args[0]; 6191 kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_ABSENT); 6192 r = 0; 6193 split_irqchip_unlock: 6194 mutex_unlock(&kvm->lock); 6195 break; 6196 } 6197 case KVM_CAP_X2APIC_API: 6198 r = -EINVAL; 6199 if (cap->args[0] & ~KVM_X2APIC_API_VALID_FLAGS) 6200 break; 6201 6202 if (cap->args[0] & KVM_X2APIC_API_USE_32BIT_IDS) 6203 kvm->arch.x2apic_format = true; 6204 if (cap->args[0] & KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK) 6205 kvm->arch.x2apic_broadcast_quirk_disabled = true; 6206 6207 r = 0; 6208 break; 6209 case KVM_CAP_X86_DISABLE_EXITS: 6210 r = -EINVAL; 6211 if (cap->args[0] & ~KVM_X86_DISABLE_VALID_EXITS) 6212 break; 6213 6214 if ((cap->args[0] & KVM_X86_DISABLE_EXITS_MWAIT) && 6215 kvm_can_mwait_in_guest()) 6216 kvm->arch.mwait_in_guest = true; 6217 if (cap->args[0] & KVM_X86_DISABLE_EXITS_HLT) 6218 kvm->arch.hlt_in_guest = true; 6219 if (cap->args[0] & KVM_X86_DISABLE_EXITS_PAUSE) 6220 kvm->arch.pause_in_guest = true; 6221 if (cap->args[0] & KVM_X86_DISABLE_EXITS_CSTATE) 6222 kvm->arch.cstate_in_guest = true; 6223 r = 0; 6224 break; 6225 case KVM_CAP_MSR_PLATFORM_INFO: 6226 kvm->arch.guest_can_read_msr_platform_info = cap->args[0]; 6227 r = 0; 6228 break; 6229 case KVM_CAP_EXCEPTION_PAYLOAD: 6230 kvm->arch.exception_payload_enabled = cap->args[0]; 6231 r = 0; 6232 break; 6233 case KVM_CAP_X86_TRIPLE_FAULT_EVENT: 6234 kvm->arch.triple_fault_event = cap->args[0]; 6235 r = 0; 6236 break; 6237 case KVM_CAP_X86_USER_SPACE_MSR: 6238 r = -EINVAL; 6239 if (cap->args[0] & ~(KVM_MSR_EXIT_REASON_INVAL | 6240 KVM_MSR_EXIT_REASON_UNKNOWN | 6241 KVM_MSR_EXIT_REASON_FILTER)) 6242 break; 6243 kvm->arch.user_space_msr_mask = cap->args[0]; 6244 r = 0; 6245 break; 6246 case KVM_CAP_X86_BUS_LOCK_EXIT: 6247 r = -EINVAL; 6248 if (cap->args[0] & ~KVM_BUS_LOCK_DETECTION_VALID_MODE) 6249 break; 6250 6251 if ((cap->args[0] & KVM_BUS_LOCK_DETECTION_OFF) && 6252 (cap->args[0] & KVM_BUS_LOCK_DETECTION_EXIT)) 6253 break; 6254 6255 if (kvm_caps.has_bus_lock_exit && 6256 cap->args[0] & KVM_BUS_LOCK_DETECTION_EXIT) 6257 kvm->arch.bus_lock_detection_enabled = true; 6258 r = 0; 6259 break; 6260 #ifdef CONFIG_X86_SGX_KVM 6261 case KVM_CAP_SGX_ATTRIBUTE: { 6262 unsigned long allowed_attributes = 0; 6263 6264 r = sgx_set_attribute(&allowed_attributes, cap->args[0]); 6265 if (r) 6266 break; 6267 6268 /* KVM only supports the PROVISIONKEY privileged attribute. */ 6269 if ((allowed_attributes & SGX_ATTR_PROVISIONKEY) && 6270 !(allowed_attributes & ~SGX_ATTR_PROVISIONKEY)) 6271 kvm->arch.sgx_provisioning_allowed = true; 6272 else 6273 r = -EINVAL; 6274 break; 6275 } 6276 #endif 6277 case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM: 6278 r = -EINVAL; 6279 if (!kvm_x86_ops.vm_copy_enc_context_from) 6280 break; 6281 6282 r = static_call(kvm_x86_vm_copy_enc_context_from)(kvm, cap->args[0]); 6283 break; 6284 case KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM: 6285 r = -EINVAL; 6286 if (!kvm_x86_ops.vm_move_enc_context_from) 6287 break; 6288 6289 r = static_call(kvm_x86_vm_move_enc_context_from)(kvm, cap->args[0]); 6290 break; 6291 case KVM_CAP_EXIT_HYPERCALL: 6292 if (cap->args[0] & ~KVM_EXIT_HYPERCALL_VALID_MASK) { 6293 r = -EINVAL; 6294 break; 6295 } 6296 kvm->arch.hypercall_exit_enabled = cap->args[0]; 6297 r = 0; 6298 break; 6299 case KVM_CAP_EXIT_ON_EMULATION_FAILURE: 6300 r = -EINVAL; 6301 if (cap->args[0] & ~1) 6302 break; 6303 kvm->arch.exit_on_emulation_error = cap->args[0]; 6304 r = 0; 6305 break; 6306 case KVM_CAP_PMU_CAPABILITY: 6307 r = -EINVAL; 6308 if (!enable_pmu || (cap->args[0] & ~KVM_CAP_PMU_VALID_MASK)) 6309 break; 6310 6311 mutex_lock(&kvm->lock); 6312 if (!kvm->created_vcpus) { 6313 kvm->arch.enable_pmu = !(cap->args[0] & KVM_PMU_CAP_DISABLE); 6314 r = 0; 6315 } 6316 mutex_unlock(&kvm->lock); 6317 break; 6318 case KVM_CAP_MAX_VCPU_ID: 6319 r = -EINVAL; 6320 if (cap->args[0] > KVM_MAX_VCPU_IDS) 6321 break; 6322 6323 mutex_lock(&kvm->lock); 6324 if (kvm->arch.max_vcpu_ids == cap->args[0]) { 6325 r = 0; 6326 } else if (!kvm->arch.max_vcpu_ids) { 6327 kvm->arch.max_vcpu_ids = cap->args[0]; 6328 r = 0; 6329 } 6330 mutex_unlock(&kvm->lock); 6331 break; 6332 case KVM_CAP_X86_NOTIFY_VMEXIT: 6333 r = -EINVAL; 6334 if ((u32)cap->args[0] & ~KVM_X86_NOTIFY_VMEXIT_VALID_BITS) 6335 break; 6336 if (!kvm_caps.has_notify_vmexit) 6337 break; 6338 if (!((u32)cap->args[0] & KVM_X86_NOTIFY_VMEXIT_ENABLED)) 6339 break; 6340 mutex_lock(&kvm->lock); 6341 if (!kvm->created_vcpus) { 6342 kvm->arch.notify_window = cap->args[0] >> 32; 6343 kvm->arch.notify_vmexit_flags = (u32)cap->args[0]; 6344 r = 0; 6345 } 6346 mutex_unlock(&kvm->lock); 6347 break; 6348 case KVM_CAP_VM_DISABLE_NX_HUGE_PAGES: 6349 r = -EINVAL; 6350 6351 /* 6352 * Since the risk of disabling NX hugepages is a guest crashing 6353 * the system, ensure the userspace process has permission to 6354 * reboot the system. 6355 * 6356 * Note that unlike the reboot() syscall, the process must have 6357 * this capability in the root namespace because exposing 6358 * /dev/kvm into a container does not limit the scope of the 6359 * iTLB multihit bug to that container. In other words, 6360 * this must use capable(), not ns_capable(). 6361 */ 6362 if (!capable(CAP_SYS_BOOT)) { 6363 r = -EPERM; 6364 break; 6365 } 6366 6367 if (cap->args[0]) 6368 break; 6369 6370 mutex_lock(&kvm->lock); 6371 if (!kvm->created_vcpus) { 6372 kvm->arch.disable_nx_huge_pages = true; 6373 r = 0; 6374 } 6375 mutex_unlock(&kvm->lock); 6376 break; 6377 default: 6378 r = -EINVAL; 6379 break; 6380 } 6381 return r; 6382 } 6383 6384 static struct kvm_x86_msr_filter *kvm_alloc_msr_filter(bool default_allow) 6385 { 6386 struct kvm_x86_msr_filter *msr_filter; 6387 6388 msr_filter = kzalloc(sizeof(*msr_filter), GFP_KERNEL_ACCOUNT); 6389 if (!msr_filter) 6390 return NULL; 6391 6392 msr_filter->default_allow = default_allow; 6393 return msr_filter; 6394 } 6395 6396 static void kvm_free_msr_filter(struct kvm_x86_msr_filter *msr_filter) 6397 { 6398 u32 i; 6399 6400 if (!msr_filter) 6401 return; 6402 6403 for (i = 0; i < msr_filter->count; i++) 6404 kfree(msr_filter->ranges[i].bitmap); 6405 6406 kfree(msr_filter); 6407 } 6408 6409 static int kvm_add_msr_filter(struct kvm_x86_msr_filter *msr_filter, 6410 struct kvm_msr_filter_range *user_range) 6411 { 6412 unsigned long *bitmap = NULL; 6413 size_t bitmap_size; 6414 6415 if (!user_range->nmsrs) 6416 return 0; 6417 6418 if (user_range->flags & ~(KVM_MSR_FILTER_READ | KVM_MSR_FILTER_WRITE)) 6419 return -EINVAL; 6420 6421 if (!user_range->flags) 6422 return -EINVAL; 6423 6424 bitmap_size = BITS_TO_LONGS(user_range->nmsrs) * sizeof(long); 6425 if (!bitmap_size || bitmap_size > KVM_MSR_FILTER_MAX_BITMAP_SIZE) 6426 return -EINVAL; 6427 6428 bitmap = memdup_user((__user u8*)user_range->bitmap, bitmap_size); 6429 if (IS_ERR(bitmap)) 6430 return PTR_ERR(bitmap); 6431 6432 msr_filter->ranges[msr_filter->count] = (struct msr_bitmap_range) { 6433 .flags = user_range->flags, 6434 .base = user_range->base, 6435 .nmsrs = user_range->nmsrs, 6436 .bitmap = bitmap, 6437 }; 6438 6439 msr_filter->count++; 6440 return 0; 6441 } 6442 6443 static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, 6444 struct kvm_msr_filter *filter) 6445 { 6446 struct kvm_x86_msr_filter *new_filter, *old_filter; 6447 bool default_allow; 6448 bool empty = true; 6449 int r = 0; 6450 u32 i; 6451 6452 if (filter->flags & ~KVM_MSR_FILTER_DEFAULT_DENY) 6453 return -EINVAL; 6454 6455 for (i = 0; i < ARRAY_SIZE(filter->ranges); i++) 6456 empty &= !filter->ranges[i].nmsrs; 6457 6458 default_allow = !(filter->flags & KVM_MSR_FILTER_DEFAULT_DENY); 6459 if (empty && !default_allow) 6460 return -EINVAL; 6461 6462 new_filter = kvm_alloc_msr_filter(default_allow); 6463 if (!new_filter) 6464 return -ENOMEM; 6465 6466 for (i = 0; i < ARRAY_SIZE(filter->ranges); i++) { 6467 r = kvm_add_msr_filter(new_filter, &filter->ranges[i]); 6468 if (r) { 6469 kvm_free_msr_filter(new_filter); 6470 return r; 6471 } 6472 } 6473 6474 mutex_lock(&kvm->lock); 6475 6476 /* The per-VM filter is protected by kvm->lock... */ 6477 old_filter = srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1); 6478 6479 rcu_assign_pointer(kvm->arch.msr_filter, new_filter); 6480 synchronize_srcu(&kvm->srcu); 6481 6482 kvm_free_msr_filter(old_filter); 6483 6484 kvm_make_all_cpus_request(kvm, KVM_REQ_MSR_FILTER_CHANGED); 6485 mutex_unlock(&kvm->lock); 6486 6487 return 0; 6488 } 6489 6490 #ifdef CONFIG_KVM_COMPAT 6491 /* for KVM_X86_SET_MSR_FILTER */ 6492 struct kvm_msr_filter_range_compat { 6493 __u32 flags; 6494 __u32 nmsrs; 6495 __u32 base; 6496 __u32 bitmap; 6497 }; 6498 6499 struct kvm_msr_filter_compat { 6500 __u32 flags; 6501 struct kvm_msr_filter_range_compat ranges[KVM_MSR_FILTER_MAX_RANGES]; 6502 }; 6503 6504 #define KVM_X86_SET_MSR_FILTER_COMPAT _IOW(KVMIO, 0xc6, struct kvm_msr_filter_compat) 6505 6506 long kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl, 6507 unsigned long arg) 6508 { 6509 void __user *argp = (void __user *)arg; 6510 struct kvm *kvm = filp->private_data; 6511 long r = -ENOTTY; 6512 6513 switch (ioctl) { 6514 case KVM_X86_SET_MSR_FILTER_COMPAT: { 6515 struct kvm_msr_filter __user *user_msr_filter = argp; 6516 struct kvm_msr_filter_compat filter_compat; 6517 struct kvm_msr_filter filter; 6518 int i; 6519 6520 if (copy_from_user(&filter_compat, user_msr_filter, 6521 sizeof(filter_compat))) 6522 return -EFAULT; 6523 6524 filter.flags = filter_compat.flags; 6525 for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) { 6526 struct kvm_msr_filter_range_compat *cr; 6527 6528 cr = &filter_compat.ranges[i]; 6529 filter.ranges[i] = (struct kvm_msr_filter_range) { 6530 .flags = cr->flags, 6531 .nmsrs = cr->nmsrs, 6532 .base = cr->base, 6533 .bitmap = (__u8 *)(ulong)cr->bitmap, 6534 }; 6535 } 6536 6537 r = kvm_vm_ioctl_set_msr_filter(kvm, &filter); 6538 break; 6539 } 6540 } 6541 6542 return r; 6543 } 6544 #endif 6545 6546 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER 6547 static int kvm_arch_suspend_notifier(struct kvm *kvm) 6548 { 6549 struct kvm_vcpu *vcpu; 6550 unsigned long i; 6551 int ret = 0; 6552 6553 mutex_lock(&kvm->lock); 6554 kvm_for_each_vcpu(i, vcpu, kvm) { 6555 if (!vcpu->arch.pv_time.active) 6556 continue; 6557 6558 ret = kvm_set_guest_paused(vcpu); 6559 if (ret) { 6560 kvm_err("Failed to pause guest VCPU%d: %d\n", 6561 vcpu->vcpu_id, ret); 6562 break; 6563 } 6564 } 6565 mutex_unlock(&kvm->lock); 6566 6567 return ret ? NOTIFY_BAD : NOTIFY_DONE; 6568 } 6569 6570 int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state) 6571 { 6572 switch (state) { 6573 case PM_HIBERNATION_PREPARE: 6574 case PM_SUSPEND_PREPARE: 6575 return kvm_arch_suspend_notifier(kvm); 6576 } 6577 6578 return NOTIFY_DONE; 6579 } 6580 #endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */ 6581 6582 static int kvm_vm_ioctl_get_clock(struct kvm *kvm, void __user *argp) 6583 { 6584 struct kvm_clock_data data = { 0 }; 6585 6586 get_kvmclock(kvm, &data); 6587 if (copy_to_user(argp, &data, sizeof(data))) 6588 return -EFAULT; 6589 6590 return 0; 6591 } 6592 6593 static int kvm_vm_ioctl_set_clock(struct kvm *kvm, void __user *argp) 6594 { 6595 struct kvm_arch *ka = &kvm->arch; 6596 struct kvm_clock_data data; 6597 u64 now_raw_ns; 6598 6599 if (copy_from_user(&data, argp, sizeof(data))) 6600 return -EFAULT; 6601 6602 /* 6603 * Only KVM_CLOCK_REALTIME is used, but allow passing the 6604 * result of KVM_GET_CLOCK back to KVM_SET_CLOCK. 6605 */ 6606 if (data.flags & ~KVM_CLOCK_VALID_FLAGS) 6607 return -EINVAL; 6608 6609 kvm_hv_request_tsc_page_update(kvm); 6610 kvm_start_pvclock_update(kvm); 6611 pvclock_update_vm_gtod_copy(kvm); 6612 6613 /* 6614 * This pairs with kvm_guest_time_update(): when masterclock is 6615 * in use, we use master_kernel_ns + kvmclock_offset to set 6616 * unsigned 'system_time' so if we use get_kvmclock_ns() (which 6617 * is slightly ahead) here we risk going negative on unsigned 6618 * 'system_time' when 'data.clock' is very small. 6619 */ 6620 if (data.flags & KVM_CLOCK_REALTIME) { 6621 u64 now_real_ns = ktime_get_real_ns(); 6622 6623 /* 6624 * Avoid stepping the kvmclock backwards. 6625 */ 6626 if (now_real_ns > data.realtime) 6627 data.clock += now_real_ns - data.realtime; 6628 } 6629 6630 if (ka->use_master_clock) 6631 now_raw_ns = ka->master_kernel_ns; 6632 else 6633 now_raw_ns = get_kvmclock_base_ns(); 6634 ka->kvmclock_offset = data.clock - now_raw_ns; 6635 kvm_end_pvclock_update(kvm); 6636 return 0; 6637 } 6638 6639 long kvm_arch_vm_ioctl(struct file *filp, 6640 unsigned int ioctl, unsigned long arg) 6641 { 6642 struct kvm *kvm = filp->private_data; 6643 void __user *argp = (void __user *)arg; 6644 int r = -ENOTTY; 6645 /* 6646 * This union makes it completely explicit to gcc-3.x 6647 * that these two variables' stack usage should be 6648 * combined, not added together. 6649 */ 6650 union { 6651 struct kvm_pit_state ps; 6652 struct kvm_pit_state2 ps2; 6653 struct kvm_pit_config pit_config; 6654 } u; 6655 6656 switch (ioctl) { 6657 case KVM_SET_TSS_ADDR: 6658 r = kvm_vm_ioctl_set_tss_addr(kvm, arg); 6659 break; 6660 case KVM_SET_IDENTITY_MAP_ADDR: { 6661 u64 ident_addr; 6662 6663 mutex_lock(&kvm->lock); 6664 r = -EINVAL; 6665 if (kvm->created_vcpus) 6666 goto set_identity_unlock; 6667 r = -EFAULT; 6668 if (copy_from_user(&ident_addr, argp, sizeof(ident_addr))) 6669 goto set_identity_unlock; 6670 r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr); 6671 set_identity_unlock: 6672 mutex_unlock(&kvm->lock); 6673 break; 6674 } 6675 case KVM_SET_NR_MMU_PAGES: 6676 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg); 6677 break; 6678 case KVM_GET_NR_MMU_PAGES: 6679 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm); 6680 break; 6681 case KVM_CREATE_IRQCHIP: { 6682 mutex_lock(&kvm->lock); 6683 6684 r = -EEXIST; 6685 if (irqchip_in_kernel(kvm)) 6686 goto create_irqchip_unlock; 6687 6688 r = -EINVAL; 6689 if (kvm->created_vcpus) 6690 goto create_irqchip_unlock; 6691 6692 r = kvm_pic_init(kvm); 6693 if (r) 6694 goto create_irqchip_unlock; 6695 6696 r = kvm_ioapic_init(kvm); 6697 if (r) { 6698 kvm_pic_destroy(kvm); 6699 goto create_irqchip_unlock; 6700 } 6701 6702 r = kvm_setup_default_irq_routing(kvm); 6703 if (r) { 6704 kvm_ioapic_destroy(kvm); 6705 kvm_pic_destroy(kvm); 6706 goto create_irqchip_unlock; 6707 } 6708 /* Write kvm->irq_routing before enabling irqchip_in_kernel. */ 6709 smp_wmb(); 6710 kvm->arch.irqchip_mode = KVM_IRQCHIP_KERNEL; 6711 kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_ABSENT); 6712 create_irqchip_unlock: 6713 mutex_unlock(&kvm->lock); 6714 break; 6715 } 6716 case KVM_CREATE_PIT: 6717 u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY; 6718 goto create_pit; 6719 case KVM_CREATE_PIT2: 6720 r = -EFAULT; 6721 if (copy_from_user(&u.pit_config, argp, 6722 sizeof(struct kvm_pit_config))) 6723 goto out; 6724 create_pit: 6725 mutex_lock(&kvm->lock); 6726 r = -EEXIST; 6727 if (kvm->arch.vpit) 6728 goto create_pit_unlock; 6729 r = -ENOMEM; 6730 kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags); 6731 if (kvm->arch.vpit) 6732 r = 0; 6733 create_pit_unlock: 6734 mutex_unlock(&kvm->lock); 6735 break; 6736 case KVM_GET_IRQCHIP: { 6737 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ 6738 struct kvm_irqchip *chip; 6739 6740 chip = memdup_user(argp, sizeof(*chip)); 6741 if (IS_ERR(chip)) { 6742 r = PTR_ERR(chip); 6743 goto out; 6744 } 6745 6746 r = -ENXIO; 6747 if (!irqchip_kernel(kvm)) 6748 goto get_irqchip_out; 6749 r = kvm_vm_ioctl_get_irqchip(kvm, chip); 6750 if (r) 6751 goto get_irqchip_out; 6752 r = -EFAULT; 6753 if (copy_to_user(argp, chip, sizeof(*chip))) 6754 goto get_irqchip_out; 6755 r = 0; 6756 get_irqchip_out: 6757 kfree(chip); 6758 break; 6759 } 6760 case KVM_SET_IRQCHIP: { 6761 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ 6762 struct kvm_irqchip *chip; 6763 6764 chip = memdup_user(argp, sizeof(*chip)); 6765 if (IS_ERR(chip)) { 6766 r = PTR_ERR(chip); 6767 goto out; 6768 } 6769 6770 r = -ENXIO; 6771 if (!irqchip_kernel(kvm)) 6772 goto set_irqchip_out; 6773 r = kvm_vm_ioctl_set_irqchip(kvm, chip); 6774 set_irqchip_out: 6775 kfree(chip); 6776 break; 6777 } 6778 case KVM_GET_PIT: { 6779 r = -EFAULT; 6780 if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state))) 6781 goto out; 6782 r = -ENXIO; 6783 if (!kvm->arch.vpit) 6784 goto out; 6785 r = kvm_vm_ioctl_get_pit(kvm, &u.ps); 6786 if (r) 6787 goto out; 6788 r = -EFAULT; 6789 if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state))) 6790 goto out; 6791 r = 0; 6792 break; 6793 } 6794 case KVM_SET_PIT: { 6795 r = -EFAULT; 6796 if (copy_from_user(&u.ps, argp, sizeof(u.ps))) 6797 goto out; 6798 mutex_lock(&kvm->lock); 6799 r = -ENXIO; 6800 if (!kvm->arch.vpit) 6801 goto set_pit_out; 6802 r = kvm_vm_ioctl_set_pit(kvm, &u.ps); 6803 set_pit_out: 6804 mutex_unlock(&kvm->lock); 6805 break; 6806 } 6807 case KVM_GET_PIT2: { 6808 r = -ENXIO; 6809 if (!kvm->arch.vpit) 6810 goto out; 6811 r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2); 6812 if (r) 6813 goto out; 6814 r = -EFAULT; 6815 if (copy_to_user(argp, &u.ps2, sizeof(u.ps2))) 6816 goto out; 6817 r = 0; 6818 break; 6819 } 6820 case KVM_SET_PIT2: { 6821 r = -EFAULT; 6822 if (copy_from_user(&u.ps2, argp, sizeof(u.ps2))) 6823 goto out; 6824 mutex_lock(&kvm->lock); 6825 r = -ENXIO; 6826 if (!kvm->arch.vpit) 6827 goto set_pit2_out; 6828 r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2); 6829 set_pit2_out: 6830 mutex_unlock(&kvm->lock); 6831 break; 6832 } 6833 case KVM_REINJECT_CONTROL: { 6834 struct kvm_reinject_control control; 6835 r = -EFAULT; 6836 if (copy_from_user(&control, argp, sizeof(control))) 6837 goto out; 6838 r = -ENXIO; 6839 if (!kvm->arch.vpit) 6840 goto out; 6841 r = kvm_vm_ioctl_reinject(kvm, &control); 6842 break; 6843 } 6844 case KVM_SET_BOOT_CPU_ID: 6845 r = 0; 6846 mutex_lock(&kvm->lock); 6847 if (kvm->created_vcpus) 6848 r = -EBUSY; 6849 else 6850 kvm->arch.bsp_vcpu_id = arg; 6851 mutex_unlock(&kvm->lock); 6852 break; 6853 #ifdef CONFIG_KVM_XEN 6854 case KVM_XEN_HVM_CONFIG: { 6855 struct kvm_xen_hvm_config xhc; 6856 r = -EFAULT; 6857 if (copy_from_user(&xhc, argp, sizeof(xhc))) 6858 goto out; 6859 r = kvm_xen_hvm_config(kvm, &xhc); 6860 break; 6861 } 6862 case KVM_XEN_HVM_GET_ATTR: { 6863 struct kvm_xen_hvm_attr xha; 6864 6865 r = -EFAULT; 6866 if (copy_from_user(&xha, argp, sizeof(xha))) 6867 goto out; 6868 r = kvm_xen_hvm_get_attr(kvm, &xha); 6869 if (!r && copy_to_user(argp, &xha, sizeof(xha))) 6870 r = -EFAULT; 6871 break; 6872 } 6873 case KVM_XEN_HVM_SET_ATTR: { 6874 struct kvm_xen_hvm_attr xha; 6875 6876 r = -EFAULT; 6877 if (copy_from_user(&xha, argp, sizeof(xha))) 6878 goto out; 6879 r = kvm_xen_hvm_set_attr(kvm, &xha); 6880 break; 6881 } 6882 case KVM_XEN_HVM_EVTCHN_SEND: { 6883 struct kvm_irq_routing_xen_evtchn uxe; 6884 6885 r = -EFAULT; 6886 if (copy_from_user(&uxe, argp, sizeof(uxe))) 6887 goto out; 6888 r = kvm_xen_hvm_evtchn_send(kvm, &uxe); 6889 break; 6890 } 6891 #endif 6892 case KVM_SET_CLOCK: 6893 r = kvm_vm_ioctl_set_clock(kvm, argp); 6894 break; 6895 case KVM_GET_CLOCK: 6896 r = kvm_vm_ioctl_get_clock(kvm, argp); 6897 break; 6898 case KVM_SET_TSC_KHZ: { 6899 u32 user_tsc_khz; 6900 6901 r = -EINVAL; 6902 user_tsc_khz = (u32)arg; 6903 6904 if (kvm_caps.has_tsc_control && 6905 user_tsc_khz >= kvm_caps.max_guest_tsc_khz) 6906 goto out; 6907 6908 if (user_tsc_khz == 0) 6909 user_tsc_khz = tsc_khz; 6910 6911 WRITE_ONCE(kvm->arch.default_tsc_khz, user_tsc_khz); 6912 r = 0; 6913 6914 goto out; 6915 } 6916 case KVM_GET_TSC_KHZ: { 6917 r = READ_ONCE(kvm->arch.default_tsc_khz); 6918 goto out; 6919 } 6920 case KVM_MEMORY_ENCRYPT_OP: { 6921 r = -ENOTTY; 6922 if (!kvm_x86_ops.mem_enc_ioctl) 6923 goto out; 6924 6925 r = static_call(kvm_x86_mem_enc_ioctl)(kvm, argp); 6926 break; 6927 } 6928 case KVM_MEMORY_ENCRYPT_REG_REGION: { 6929 struct kvm_enc_region region; 6930 6931 r = -EFAULT; 6932 if (copy_from_user(®ion, argp, sizeof(region))) 6933 goto out; 6934 6935 r = -ENOTTY; 6936 if (!kvm_x86_ops.mem_enc_register_region) 6937 goto out; 6938 6939 r = static_call(kvm_x86_mem_enc_register_region)(kvm, ®ion); 6940 break; 6941 } 6942 case KVM_MEMORY_ENCRYPT_UNREG_REGION: { 6943 struct kvm_enc_region region; 6944 6945 r = -EFAULT; 6946 if (copy_from_user(®ion, argp, sizeof(region))) 6947 goto out; 6948 6949 r = -ENOTTY; 6950 if (!kvm_x86_ops.mem_enc_unregister_region) 6951 goto out; 6952 6953 r = static_call(kvm_x86_mem_enc_unregister_region)(kvm, ®ion); 6954 break; 6955 } 6956 case KVM_HYPERV_EVENTFD: { 6957 struct kvm_hyperv_eventfd hvevfd; 6958 6959 r = -EFAULT; 6960 if (copy_from_user(&hvevfd, argp, sizeof(hvevfd))) 6961 goto out; 6962 r = kvm_vm_ioctl_hv_eventfd(kvm, &hvevfd); 6963 break; 6964 } 6965 case KVM_SET_PMU_EVENT_FILTER: 6966 r = kvm_vm_ioctl_set_pmu_event_filter(kvm, argp); 6967 break; 6968 case KVM_X86_SET_MSR_FILTER: { 6969 struct kvm_msr_filter __user *user_msr_filter = argp; 6970 struct kvm_msr_filter filter; 6971 6972 if (copy_from_user(&filter, user_msr_filter, sizeof(filter))) 6973 return -EFAULT; 6974 6975 r = kvm_vm_ioctl_set_msr_filter(kvm, &filter); 6976 break; 6977 } 6978 default: 6979 r = -ENOTTY; 6980 } 6981 out: 6982 return r; 6983 } 6984 6985 static void kvm_init_msr_list(void) 6986 { 6987 u32 dummy[2]; 6988 unsigned i; 6989 6990 BUILD_BUG_ON_MSG(KVM_PMC_MAX_FIXED != 3, 6991 "Please update the fixed PMCs in msrs_to_saved_all[]"); 6992 6993 num_msrs_to_save = 0; 6994 num_emulated_msrs = 0; 6995 num_msr_based_features = 0; 6996 6997 for (i = 0; i < ARRAY_SIZE(msrs_to_save_all); i++) { 6998 if (rdmsr_safe(msrs_to_save_all[i], &dummy[0], &dummy[1]) < 0) 6999 continue; 7000 7001 /* 7002 * Even MSRs that are valid in the host may not be exposed 7003 * to the guests in some cases. 7004 */ 7005 switch (msrs_to_save_all[i]) { 7006 case MSR_IA32_BNDCFGS: 7007 if (!kvm_mpx_supported()) 7008 continue; 7009 break; 7010 case MSR_TSC_AUX: 7011 if (!kvm_cpu_cap_has(X86_FEATURE_RDTSCP) && 7012 !kvm_cpu_cap_has(X86_FEATURE_RDPID)) 7013 continue; 7014 break; 7015 case MSR_IA32_UMWAIT_CONTROL: 7016 if (!kvm_cpu_cap_has(X86_FEATURE_WAITPKG)) 7017 continue; 7018 break; 7019 case MSR_IA32_RTIT_CTL: 7020 case MSR_IA32_RTIT_STATUS: 7021 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT)) 7022 continue; 7023 break; 7024 case MSR_IA32_RTIT_CR3_MATCH: 7025 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) || 7026 !intel_pt_validate_hw_cap(PT_CAP_cr3_filtering)) 7027 continue; 7028 break; 7029 case MSR_IA32_RTIT_OUTPUT_BASE: 7030 case MSR_IA32_RTIT_OUTPUT_MASK: 7031 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) || 7032 (!intel_pt_validate_hw_cap(PT_CAP_topa_output) && 7033 !intel_pt_validate_hw_cap(PT_CAP_single_range_output))) 7034 continue; 7035 break; 7036 case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: 7037 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) || 7038 msrs_to_save_all[i] - MSR_IA32_RTIT_ADDR0_A >= 7039 intel_pt_validate_hw_cap(PT_CAP_num_address_ranges) * 2) 7040 continue; 7041 break; 7042 case MSR_ARCH_PERFMON_PERFCTR0 ... MSR_ARCH_PERFMON_PERFCTR_MAX: 7043 if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_PERFCTR0 >= 7044 min(KVM_INTEL_PMC_MAX_GENERIC, kvm_pmu_cap.num_counters_gp)) 7045 continue; 7046 break; 7047 case MSR_ARCH_PERFMON_EVENTSEL0 ... MSR_ARCH_PERFMON_EVENTSEL_MAX: 7048 if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_EVENTSEL0 >= 7049 min(KVM_INTEL_PMC_MAX_GENERIC, kvm_pmu_cap.num_counters_gp)) 7050 continue; 7051 break; 7052 case MSR_IA32_XFD: 7053 case MSR_IA32_XFD_ERR: 7054 if (!kvm_cpu_cap_has(X86_FEATURE_XFD)) 7055 continue; 7056 break; 7057 default: 7058 break; 7059 } 7060 7061 msrs_to_save[num_msrs_to_save++] = msrs_to_save_all[i]; 7062 } 7063 7064 for (i = 0; i < ARRAY_SIZE(emulated_msrs_all); i++) { 7065 if (!static_call(kvm_x86_has_emulated_msr)(NULL, emulated_msrs_all[i])) 7066 continue; 7067 7068 emulated_msrs[num_emulated_msrs++] = emulated_msrs_all[i]; 7069 } 7070 7071 for (i = 0; i < ARRAY_SIZE(msr_based_features_all); i++) { 7072 struct kvm_msr_entry msr; 7073 7074 msr.index = msr_based_features_all[i]; 7075 if (kvm_get_msr_feature(&msr)) 7076 continue; 7077 7078 msr_based_features[num_msr_based_features++] = msr_based_features_all[i]; 7079 } 7080 } 7081 7082 static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len, 7083 const void *v) 7084 { 7085 int handled = 0; 7086 int n; 7087 7088 do { 7089 n = min(len, 8); 7090 if (!(lapic_in_kernel(vcpu) && 7091 !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v)) 7092 && kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, n, v)) 7093 break; 7094 handled += n; 7095 addr += n; 7096 len -= n; 7097 v += n; 7098 } while (len); 7099 7100 return handled; 7101 } 7102 7103 static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v) 7104 { 7105 int handled = 0; 7106 int n; 7107 7108 do { 7109 n = min(len, 8); 7110 if (!(lapic_in_kernel(vcpu) && 7111 !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev, 7112 addr, n, v)) 7113 && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v)) 7114 break; 7115 trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, v); 7116 handled += n; 7117 addr += n; 7118 len -= n; 7119 v += n; 7120 } while (len); 7121 7122 return handled; 7123 } 7124 7125 static void kvm_set_segment(struct kvm_vcpu *vcpu, 7126 struct kvm_segment *var, int seg) 7127 { 7128 static_call(kvm_x86_set_segment)(vcpu, var, seg); 7129 } 7130 7131 void kvm_get_segment(struct kvm_vcpu *vcpu, 7132 struct kvm_segment *var, int seg) 7133 { 7134 static_call(kvm_x86_get_segment)(vcpu, var, seg); 7135 } 7136 7137 gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u64 access, 7138 struct x86_exception *exception) 7139 { 7140 struct kvm_mmu *mmu = vcpu->arch.mmu; 7141 gpa_t t_gpa; 7142 7143 BUG_ON(!mmu_is_nested(vcpu)); 7144 7145 /* NPT walks are always user-walks */ 7146 access |= PFERR_USER_MASK; 7147 t_gpa = mmu->gva_to_gpa(vcpu, mmu, gpa, access, exception); 7148 7149 return t_gpa; 7150 } 7151 7152 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, 7153 struct x86_exception *exception) 7154 { 7155 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 7156 7157 u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; 7158 return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); 7159 } 7160 EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_read); 7161 7162 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, 7163 struct x86_exception *exception) 7164 { 7165 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 7166 7167 u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; 7168 access |= PFERR_FETCH_MASK; 7169 return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); 7170 } 7171 7172 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, 7173 struct x86_exception *exception) 7174 { 7175 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 7176 7177 u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; 7178 access |= PFERR_WRITE_MASK; 7179 return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); 7180 } 7181 EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_write); 7182 7183 /* uses this to access any guest's mapped memory without checking CPL */ 7184 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, 7185 struct x86_exception *exception) 7186 { 7187 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 7188 7189 return mmu->gva_to_gpa(vcpu, mmu, gva, 0, exception); 7190 } 7191 7192 static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, 7193 struct kvm_vcpu *vcpu, u64 access, 7194 struct x86_exception *exception) 7195 { 7196 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 7197 void *data = val; 7198 int r = X86EMUL_CONTINUE; 7199 7200 while (bytes) { 7201 gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access, exception); 7202 unsigned offset = addr & (PAGE_SIZE-1); 7203 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset); 7204 int ret; 7205 7206 if (gpa == INVALID_GPA) 7207 return X86EMUL_PROPAGATE_FAULT; 7208 ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, data, 7209 offset, toread); 7210 if (ret < 0) { 7211 r = X86EMUL_IO_NEEDED; 7212 goto out; 7213 } 7214 7215 bytes -= toread; 7216 data += toread; 7217 addr += toread; 7218 } 7219 out: 7220 return r; 7221 } 7222 7223 /* used for instruction fetching */ 7224 static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt, 7225 gva_t addr, void *val, unsigned int bytes, 7226 struct x86_exception *exception) 7227 { 7228 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7229 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 7230 u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; 7231 unsigned offset; 7232 int ret; 7233 7234 /* Inline kvm_read_guest_virt_helper for speed. */ 7235 gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access|PFERR_FETCH_MASK, 7236 exception); 7237 if (unlikely(gpa == INVALID_GPA)) 7238 return X86EMUL_PROPAGATE_FAULT; 7239 7240 offset = addr & (PAGE_SIZE-1); 7241 if (WARN_ON(offset + bytes > PAGE_SIZE)) 7242 bytes = (unsigned)PAGE_SIZE - offset; 7243 ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, val, 7244 offset, bytes); 7245 if (unlikely(ret < 0)) 7246 return X86EMUL_IO_NEEDED; 7247 7248 return X86EMUL_CONTINUE; 7249 } 7250 7251 int kvm_read_guest_virt(struct kvm_vcpu *vcpu, 7252 gva_t addr, void *val, unsigned int bytes, 7253 struct x86_exception *exception) 7254 { 7255 u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; 7256 7257 /* 7258 * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED 7259 * is returned, but our callers are not ready for that and they blindly 7260 * call kvm_inject_page_fault. Ensure that they at least do not leak 7261 * uninitialized kernel stack memory into cr2 and error code. 7262 */ 7263 memset(exception, 0, sizeof(*exception)); 7264 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, 7265 exception); 7266 } 7267 EXPORT_SYMBOL_GPL(kvm_read_guest_virt); 7268 7269 static int emulator_read_std(struct x86_emulate_ctxt *ctxt, 7270 gva_t addr, void *val, unsigned int bytes, 7271 struct x86_exception *exception, bool system) 7272 { 7273 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7274 u64 access = 0; 7275 7276 if (system) 7277 access |= PFERR_IMPLICIT_ACCESS; 7278 else if (static_call(kvm_x86_get_cpl)(vcpu) == 3) 7279 access |= PFERR_USER_MASK; 7280 7281 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception); 7282 } 7283 7284 static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt, 7285 unsigned long addr, void *val, unsigned int bytes) 7286 { 7287 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7288 int r = kvm_vcpu_read_guest(vcpu, addr, val, bytes); 7289 7290 return r < 0 ? X86EMUL_IO_NEEDED : X86EMUL_CONTINUE; 7291 } 7292 7293 static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, 7294 struct kvm_vcpu *vcpu, u64 access, 7295 struct x86_exception *exception) 7296 { 7297 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 7298 void *data = val; 7299 int r = X86EMUL_CONTINUE; 7300 7301 while (bytes) { 7302 gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access, exception); 7303 unsigned offset = addr & (PAGE_SIZE-1); 7304 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset); 7305 int ret; 7306 7307 if (gpa == INVALID_GPA) 7308 return X86EMUL_PROPAGATE_FAULT; 7309 ret = kvm_vcpu_write_guest(vcpu, gpa, data, towrite); 7310 if (ret < 0) { 7311 r = X86EMUL_IO_NEEDED; 7312 goto out; 7313 } 7314 7315 bytes -= towrite; 7316 data += towrite; 7317 addr += towrite; 7318 } 7319 out: 7320 return r; 7321 } 7322 7323 static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, 7324 unsigned int bytes, struct x86_exception *exception, 7325 bool system) 7326 { 7327 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7328 u64 access = PFERR_WRITE_MASK; 7329 7330 if (system) 7331 access |= PFERR_IMPLICIT_ACCESS; 7332 else if (static_call(kvm_x86_get_cpl)(vcpu) == 3) 7333 access |= PFERR_USER_MASK; 7334 7335 return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, 7336 access, exception); 7337 } 7338 7339 int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val, 7340 unsigned int bytes, struct x86_exception *exception) 7341 { 7342 /* kvm_write_guest_virt_system can pull in tons of pages. */ 7343 vcpu->arch.l1tf_flush_l1d = true; 7344 7345 return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, 7346 PFERR_WRITE_MASK, exception); 7347 } 7348 EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system); 7349 7350 static int kvm_can_emulate_insn(struct kvm_vcpu *vcpu, int emul_type, 7351 void *insn, int insn_len) 7352 { 7353 return static_call(kvm_x86_can_emulate_instruction)(vcpu, emul_type, 7354 insn, insn_len); 7355 } 7356 7357 int handle_ud(struct kvm_vcpu *vcpu) 7358 { 7359 static const char kvm_emulate_prefix[] = { __KVM_EMULATE_PREFIX }; 7360 int fep_flags = READ_ONCE(force_emulation_prefix); 7361 int emul_type = EMULTYPE_TRAP_UD; 7362 char sig[5]; /* ud2; .ascii "kvm" */ 7363 struct x86_exception e; 7364 7365 if (unlikely(!kvm_can_emulate_insn(vcpu, emul_type, NULL, 0))) 7366 return 1; 7367 7368 if (fep_flags && 7369 kvm_read_guest_virt(vcpu, kvm_get_linear_rip(vcpu), 7370 sig, sizeof(sig), &e) == 0 && 7371 memcmp(sig, kvm_emulate_prefix, sizeof(sig)) == 0) { 7372 if (fep_flags & KVM_FEP_CLEAR_RFLAGS_RF) 7373 kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) & ~X86_EFLAGS_RF); 7374 kvm_rip_write(vcpu, kvm_rip_read(vcpu) + sizeof(sig)); 7375 emul_type = EMULTYPE_TRAP_UD_FORCED; 7376 } 7377 7378 return kvm_emulate_instruction(vcpu, emul_type); 7379 } 7380 EXPORT_SYMBOL_GPL(handle_ud); 7381 7382 static int vcpu_is_mmio_gpa(struct kvm_vcpu *vcpu, unsigned long gva, 7383 gpa_t gpa, bool write) 7384 { 7385 /* For APIC access vmexit */ 7386 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) 7387 return 1; 7388 7389 if (vcpu_match_mmio_gpa(vcpu, gpa)) { 7390 trace_vcpu_match_mmio(gva, gpa, write, true); 7391 return 1; 7392 } 7393 7394 return 0; 7395 } 7396 7397 static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, 7398 gpa_t *gpa, struct x86_exception *exception, 7399 bool write) 7400 { 7401 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 7402 u64 access = ((static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0) 7403 | (write ? PFERR_WRITE_MASK : 0); 7404 7405 /* 7406 * currently PKRU is only applied to ept enabled guest so 7407 * there is no pkey in EPT page table for L1 guest or EPT 7408 * shadow page table for L2 guest. 7409 */ 7410 if (vcpu_match_mmio_gva(vcpu, gva) && (!is_paging(vcpu) || 7411 !permission_fault(vcpu, vcpu->arch.walk_mmu, 7412 vcpu->arch.mmio_access, 0, access))) { 7413 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT | 7414 (gva & (PAGE_SIZE - 1)); 7415 trace_vcpu_match_mmio(gva, *gpa, write, false); 7416 return 1; 7417 } 7418 7419 *gpa = mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); 7420 7421 if (*gpa == INVALID_GPA) 7422 return -1; 7423 7424 return vcpu_is_mmio_gpa(vcpu, gva, *gpa, write); 7425 } 7426 7427 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, 7428 const void *val, int bytes) 7429 { 7430 int ret; 7431 7432 ret = kvm_vcpu_write_guest(vcpu, gpa, val, bytes); 7433 if (ret < 0) 7434 return 0; 7435 kvm_page_track_write(vcpu, gpa, val, bytes); 7436 return 1; 7437 } 7438 7439 struct read_write_emulator_ops { 7440 int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val, 7441 int bytes); 7442 int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa, 7443 void *val, int bytes); 7444 int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa, 7445 int bytes, void *val); 7446 int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa, 7447 void *val, int bytes); 7448 bool write; 7449 }; 7450 7451 static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes) 7452 { 7453 if (vcpu->mmio_read_completed) { 7454 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, 7455 vcpu->mmio_fragments[0].gpa, val); 7456 vcpu->mmio_read_completed = 0; 7457 return 1; 7458 } 7459 7460 return 0; 7461 } 7462 7463 static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, 7464 void *val, int bytes) 7465 { 7466 return !kvm_vcpu_read_guest(vcpu, gpa, val, bytes); 7467 } 7468 7469 static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, 7470 void *val, int bytes) 7471 { 7472 return emulator_write_phys(vcpu, gpa, val, bytes); 7473 } 7474 7475 static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val) 7476 { 7477 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, val); 7478 return vcpu_mmio_write(vcpu, gpa, bytes, val); 7479 } 7480 7481 static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, 7482 void *val, int bytes) 7483 { 7484 trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, NULL); 7485 return X86EMUL_IO_NEEDED; 7486 } 7487 7488 static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, 7489 void *val, int bytes) 7490 { 7491 struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0]; 7492 7493 memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); 7494 return X86EMUL_CONTINUE; 7495 } 7496 7497 static const struct read_write_emulator_ops read_emultor = { 7498 .read_write_prepare = read_prepare, 7499 .read_write_emulate = read_emulate, 7500 .read_write_mmio = vcpu_mmio_read, 7501 .read_write_exit_mmio = read_exit_mmio, 7502 }; 7503 7504 static const struct read_write_emulator_ops write_emultor = { 7505 .read_write_emulate = write_emulate, 7506 .read_write_mmio = write_mmio, 7507 .read_write_exit_mmio = write_exit_mmio, 7508 .write = true, 7509 }; 7510 7511 static int emulator_read_write_onepage(unsigned long addr, void *val, 7512 unsigned int bytes, 7513 struct x86_exception *exception, 7514 struct kvm_vcpu *vcpu, 7515 const struct read_write_emulator_ops *ops) 7516 { 7517 gpa_t gpa; 7518 int handled, ret; 7519 bool write = ops->write; 7520 struct kvm_mmio_fragment *frag; 7521 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 7522 7523 /* 7524 * If the exit was due to a NPF we may already have a GPA. 7525 * If the GPA is present, use it to avoid the GVA to GPA table walk. 7526 * Note, this cannot be used on string operations since string 7527 * operation using rep will only have the initial GPA from the NPF 7528 * occurred. 7529 */ 7530 if (ctxt->gpa_available && emulator_can_use_gpa(ctxt) && 7531 (addr & ~PAGE_MASK) == (ctxt->gpa_val & ~PAGE_MASK)) { 7532 gpa = ctxt->gpa_val; 7533 ret = vcpu_is_mmio_gpa(vcpu, addr, gpa, write); 7534 } else { 7535 ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write); 7536 if (ret < 0) 7537 return X86EMUL_PROPAGATE_FAULT; 7538 } 7539 7540 if (!ret && ops->read_write_emulate(vcpu, gpa, val, bytes)) 7541 return X86EMUL_CONTINUE; 7542 7543 /* 7544 * Is this MMIO handled locally? 7545 */ 7546 handled = ops->read_write_mmio(vcpu, gpa, bytes, val); 7547 if (handled == bytes) 7548 return X86EMUL_CONTINUE; 7549 7550 gpa += handled; 7551 bytes -= handled; 7552 val += handled; 7553 7554 WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS); 7555 frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++]; 7556 frag->gpa = gpa; 7557 frag->data = val; 7558 frag->len = bytes; 7559 return X86EMUL_CONTINUE; 7560 } 7561 7562 static int emulator_read_write(struct x86_emulate_ctxt *ctxt, 7563 unsigned long addr, 7564 void *val, unsigned int bytes, 7565 struct x86_exception *exception, 7566 const struct read_write_emulator_ops *ops) 7567 { 7568 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7569 gpa_t gpa; 7570 int rc; 7571 7572 if (ops->read_write_prepare && 7573 ops->read_write_prepare(vcpu, val, bytes)) 7574 return X86EMUL_CONTINUE; 7575 7576 vcpu->mmio_nr_fragments = 0; 7577 7578 /* Crossing a page boundary? */ 7579 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) { 7580 int now; 7581 7582 now = -addr & ~PAGE_MASK; 7583 rc = emulator_read_write_onepage(addr, val, now, exception, 7584 vcpu, ops); 7585 7586 if (rc != X86EMUL_CONTINUE) 7587 return rc; 7588 addr += now; 7589 if (ctxt->mode != X86EMUL_MODE_PROT64) 7590 addr = (u32)addr; 7591 val += now; 7592 bytes -= now; 7593 } 7594 7595 rc = emulator_read_write_onepage(addr, val, bytes, exception, 7596 vcpu, ops); 7597 if (rc != X86EMUL_CONTINUE) 7598 return rc; 7599 7600 if (!vcpu->mmio_nr_fragments) 7601 return rc; 7602 7603 gpa = vcpu->mmio_fragments[0].gpa; 7604 7605 vcpu->mmio_needed = 1; 7606 vcpu->mmio_cur_fragment = 0; 7607 7608 vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len); 7609 vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write; 7610 vcpu->run->exit_reason = KVM_EXIT_MMIO; 7611 vcpu->run->mmio.phys_addr = gpa; 7612 7613 return ops->read_write_exit_mmio(vcpu, gpa, val, bytes); 7614 } 7615 7616 static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt, 7617 unsigned long addr, 7618 void *val, 7619 unsigned int bytes, 7620 struct x86_exception *exception) 7621 { 7622 return emulator_read_write(ctxt, addr, val, bytes, 7623 exception, &read_emultor); 7624 } 7625 7626 static int emulator_write_emulated(struct x86_emulate_ctxt *ctxt, 7627 unsigned long addr, 7628 const void *val, 7629 unsigned int bytes, 7630 struct x86_exception *exception) 7631 { 7632 return emulator_read_write(ctxt, addr, (void *)val, bytes, 7633 exception, &write_emultor); 7634 } 7635 7636 #define emulator_try_cmpxchg_user(t, ptr, old, new) \ 7637 (__try_cmpxchg_user((t __user *)(ptr), (t *)(old), *(t *)(new), efault ## t)) 7638 7639 static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt, 7640 unsigned long addr, 7641 const void *old, 7642 const void *new, 7643 unsigned int bytes, 7644 struct x86_exception *exception) 7645 { 7646 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7647 u64 page_line_mask; 7648 unsigned long hva; 7649 gpa_t gpa; 7650 int r; 7651 7652 /* guests cmpxchg8b have to be emulated atomically */ 7653 if (bytes > 8 || (bytes & (bytes - 1))) 7654 goto emul_write; 7655 7656 gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL); 7657 7658 if (gpa == INVALID_GPA || 7659 (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) 7660 goto emul_write; 7661 7662 /* 7663 * Emulate the atomic as a straight write to avoid #AC if SLD is 7664 * enabled in the host and the access splits a cache line. 7665 */ 7666 if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) 7667 page_line_mask = ~(cache_line_size() - 1); 7668 else 7669 page_line_mask = PAGE_MASK; 7670 7671 if (((gpa + bytes - 1) & page_line_mask) != (gpa & page_line_mask)) 7672 goto emul_write; 7673 7674 hva = kvm_vcpu_gfn_to_hva(vcpu, gpa_to_gfn(gpa)); 7675 if (kvm_is_error_hva(hva)) 7676 goto emul_write; 7677 7678 hva += offset_in_page(gpa); 7679 7680 switch (bytes) { 7681 case 1: 7682 r = emulator_try_cmpxchg_user(u8, hva, old, new); 7683 break; 7684 case 2: 7685 r = emulator_try_cmpxchg_user(u16, hva, old, new); 7686 break; 7687 case 4: 7688 r = emulator_try_cmpxchg_user(u32, hva, old, new); 7689 break; 7690 case 8: 7691 r = emulator_try_cmpxchg_user(u64, hva, old, new); 7692 break; 7693 default: 7694 BUG(); 7695 } 7696 7697 if (r < 0) 7698 return X86EMUL_UNHANDLEABLE; 7699 if (r) 7700 return X86EMUL_CMPXCHG_FAILED; 7701 7702 kvm_page_track_write(vcpu, gpa, new, bytes); 7703 7704 return X86EMUL_CONTINUE; 7705 7706 emul_write: 7707 printk_once(KERN_WARNING "kvm: emulating exchange as write\n"); 7708 7709 return emulator_write_emulated(ctxt, addr, new, bytes, exception); 7710 } 7711 7712 static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size, 7713 unsigned short port, void *data, 7714 unsigned int count, bool in) 7715 { 7716 unsigned i; 7717 int r; 7718 7719 WARN_ON_ONCE(vcpu->arch.pio.count); 7720 for (i = 0; i < count; i++) { 7721 if (in) 7722 r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, port, size, data); 7723 else 7724 r = kvm_io_bus_write(vcpu, KVM_PIO_BUS, port, size, data); 7725 7726 if (r) { 7727 if (i == 0) 7728 goto userspace_io; 7729 7730 /* 7731 * Userspace must have unregistered the device while PIO 7732 * was running. Drop writes / read as 0. 7733 */ 7734 if (in) 7735 memset(data, 0, size * (count - i)); 7736 break; 7737 } 7738 7739 data += size; 7740 } 7741 return 1; 7742 7743 userspace_io: 7744 vcpu->arch.pio.port = port; 7745 vcpu->arch.pio.in = in; 7746 vcpu->arch.pio.count = count; 7747 vcpu->arch.pio.size = size; 7748 7749 if (in) 7750 memset(vcpu->arch.pio_data, 0, size * count); 7751 else 7752 memcpy(vcpu->arch.pio_data, data, size * count); 7753 7754 vcpu->run->exit_reason = KVM_EXIT_IO; 7755 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; 7756 vcpu->run->io.size = size; 7757 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE; 7758 vcpu->run->io.count = count; 7759 vcpu->run->io.port = port; 7760 return 0; 7761 } 7762 7763 static int emulator_pio_in(struct kvm_vcpu *vcpu, int size, 7764 unsigned short port, void *val, unsigned int count) 7765 { 7766 int r = emulator_pio_in_out(vcpu, size, port, val, count, true); 7767 if (r) 7768 trace_kvm_pio(KVM_PIO_IN, port, size, count, val); 7769 7770 return r; 7771 } 7772 7773 static void complete_emulator_pio_in(struct kvm_vcpu *vcpu, void *val) 7774 { 7775 int size = vcpu->arch.pio.size; 7776 unsigned int count = vcpu->arch.pio.count; 7777 memcpy(val, vcpu->arch.pio_data, size * count); 7778 trace_kvm_pio(KVM_PIO_IN, vcpu->arch.pio.port, size, count, vcpu->arch.pio_data); 7779 vcpu->arch.pio.count = 0; 7780 } 7781 7782 static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt, 7783 int size, unsigned short port, void *val, 7784 unsigned int count) 7785 { 7786 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7787 if (vcpu->arch.pio.count) { 7788 /* 7789 * Complete a previous iteration that required userspace I/O. 7790 * Note, @count isn't guaranteed to match pio.count as userspace 7791 * can modify ECX before rerunning the vCPU. Ignore any such 7792 * shenanigans as KVM doesn't support modifying the rep count, 7793 * and the emulator ensures @count doesn't overflow the buffer. 7794 */ 7795 complete_emulator_pio_in(vcpu, val); 7796 return 1; 7797 } 7798 7799 return emulator_pio_in(vcpu, size, port, val, count); 7800 } 7801 7802 static int emulator_pio_out(struct kvm_vcpu *vcpu, int size, 7803 unsigned short port, const void *val, 7804 unsigned int count) 7805 { 7806 trace_kvm_pio(KVM_PIO_OUT, port, size, count, val); 7807 return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false); 7808 } 7809 7810 static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt, 7811 int size, unsigned short port, 7812 const void *val, unsigned int count) 7813 { 7814 return emulator_pio_out(emul_to_vcpu(ctxt), size, port, val, count); 7815 } 7816 7817 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg) 7818 { 7819 return static_call(kvm_x86_get_segment_base)(vcpu, seg); 7820 } 7821 7822 static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address) 7823 { 7824 kvm_mmu_invlpg(emul_to_vcpu(ctxt), address); 7825 } 7826 7827 static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu) 7828 { 7829 if (!need_emulate_wbinvd(vcpu)) 7830 return X86EMUL_CONTINUE; 7831 7832 if (static_call(kvm_x86_has_wbinvd_exit)()) { 7833 int cpu = get_cpu(); 7834 7835 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); 7836 on_each_cpu_mask(vcpu->arch.wbinvd_dirty_mask, 7837 wbinvd_ipi, NULL, 1); 7838 put_cpu(); 7839 cpumask_clear(vcpu->arch.wbinvd_dirty_mask); 7840 } else 7841 wbinvd(); 7842 return X86EMUL_CONTINUE; 7843 } 7844 7845 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu) 7846 { 7847 kvm_emulate_wbinvd_noskip(vcpu); 7848 return kvm_skip_emulated_instruction(vcpu); 7849 } 7850 EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd); 7851 7852 7853 7854 static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt) 7855 { 7856 kvm_emulate_wbinvd_noskip(emul_to_vcpu(ctxt)); 7857 } 7858 7859 static void emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, 7860 unsigned long *dest) 7861 { 7862 kvm_get_dr(emul_to_vcpu(ctxt), dr, dest); 7863 } 7864 7865 static int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, 7866 unsigned long value) 7867 { 7868 7869 return kvm_set_dr(emul_to_vcpu(ctxt), dr, value); 7870 } 7871 7872 static u64 mk_cr_64(u64 curr_cr, u32 new_val) 7873 { 7874 return (curr_cr & ~((1ULL << 32) - 1)) | new_val; 7875 } 7876 7877 static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr) 7878 { 7879 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7880 unsigned long value; 7881 7882 switch (cr) { 7883 case 0: 7884 value = kvm_read_cr0(vcpu); 7885 break; 7886 case 2: 7887 value = vcpu->arch.cr2; 7888 break; 7889 case 3: 7890 value = kvm_read_cr3(vcpu); 7891 break; 7892 case 4: 7893 value = kvm_read_cr4(vcpu); 7894 break; 7895 case 8: 7896 value = kvm_get_cr8(vcpu); 7897 break; 7898 default: 7899 kvm_err("%s: unexpected cr %u\n", __func__, cr); 7900 return 0; 7901 } 7902 7903 return value; 7904 } 7905 7906 static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val) 7907 { 7908 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7909 int res = 0; 7910 7911 switch (cr) { 7912 case 0: 7913 res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val)); 7914 break; 7915 case 2: 7916 vcpu->arch.cr2 = val; 7917 break; 7918 case 3: 7919 res = kvm_set_cr3(vcpu, val); 7920 break; 7921 case 4: 7922 res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val)); 7923 break; 7924 case 8: 7925 res = kvm_set_cr8(vcpu, val); 7926 break; 7927 default: 7928 kvm_err("%s: unexpected cr %u\n", __func__, cr); 7929 res = -1; 7930 } 7931 7932 return res; 7933 } 7934 7935 static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt) 7936 { 7937 return static_call(kvm_x86_get_cpl)(emul_to_vcpu(ctxt)); 7938 } 7939 7940 static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) 7941 { 7942 static_call(kvm_x86_get_gdt)(emul_to_vcpu(ctxt), dt); 7943 } 7944 7945 static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) 7946 { 7947 static_call(kvm_x86_get_idt)(emul_to_vcpu(ctxt), dt); 7948 } 7949 7950 static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) 7951 { 7952 static_call(kvm_x86_set_gdt)(emul_to_vcpu(ctxt), dt); 7953 } 7954 7955 static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) 7956 { 7957 static_call(kvm_x86_set_idt)(emul_to_vcpu(ctxt), dt); 7958 } 7959 7960 static unsigned long emulator_get_cached_segment_base( 7961 struct x86_emulate_ctxt *ctxt, int seg) 7962 { 7963 return get_segment_base(emul_to_vcpu(ctxt), seg); 7964 } 7965 7966 static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector, 7967 struct desc_struct *desc, u32 *base3, 7968 int seg) 7969 { 7970 struct kvm_segment var; 7971 7972 kvm_get_segment(emul_to_vcpu(ctxt), &var, seg); 7973 *selector = var.selector; 7974 7975 if (var.unusable) { 7976 memset(desc, 0, sizeof(*desc)); 7977 if (base3) 7978 *base3 = 0; 7979 return false; 7980 } 7981 7982 if (var.g) 7983 var.limit >>= 12; 7984 set_desc_limit(desc, var.limit); 7985 set_desc_base(desc, (unsigned long)var.base); 7986 #ifdef CONFIG_X86_64 7987 if (base3) 7988 *base3 = var.base >> 32; 7989 #endif 7990 desc->type = var.type; 7991 desc->s = var.s; 7992 desc->dpl = var.dpl; 7993 desc->p = var.present; 7994 desc->avl = var.avl; 7995 desc->l = var.l; 7996 desc->d = var.db; 7997 desc->g = var.g; 7998 7999 return true; 8000 } 8001 8002 static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector, 8003 struct desc_struct *desc, u32 base3, 8004 int seg) 8005 { 8006 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 8007 struct kvm_segment var; 8008 8009 var.selector = selector; 8010 var.base = get_desc_base(desc); 8011 #ifdef CONFIG_X86_64 8012 var.base |= ((u64)base3) << 32; 8013 #endif 8014 var.limit = get_desc_limit(desc); 8015 if (desc->g) 8016 var.limit = (var.limit << 12) | 0xfff; 8017 var.type = desc->type; 8018 var.dpl = desc->dpl; 8019 var.db = desc->d; 8020 var.s = desc->s; 8021 var.l = desc->l; 8022 var.g = desc->g; 8023 var.avl = desc->avl; 8024 var.present = desc->p; 8025 var.unusable = !var.present; 8026 var.padding = 0; 8027 8028 kvm_set_segment(vcpu, &var, seg); 8029 return; 8030 } 8031 8032 static int emulator_get_msr_with_filter(struct x86_emulate_ctxt *ctxt, 8033 u32 msr_index, u64 *pdata) 8034 { 8035 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 8036 int r; 8037 8038 r = kvm_get_msr_with_filter(vcpu, msr_index, pdata); 8039 if (r < 0) 8040 return X86EMUL_UNHANDLEABLE; 8041 8042 if (r) { 8043 if (kvm_msr_user_space(vcpu, msr_index, KVM_EXIT_X86_RDMSR, 0, 8044 complete_emulated_rdmsr, r)) 8045 return X86EMUL_IO_NEEDED; 8046 8047 trace_kvm_msr_read_ex(msr_index); 8048 return X86EMUL_PROPAGATE_FAULT; 8049 } 8050 8051 trace_kvm_msr_read(msr_index, *pdata); 8052 return X86EMUL_CONTINUE; 8053 } 8054 8055 static int emulator_set_msr_with_filter(struct x86_emulate_ctxt *ctxt, 8056 u32 msr_index, u64 data) 8057 { 8058 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 8059 int r; 8060 8061 r = kvm_set_msr_with_filter(vcpu, msr_index, data); 8062 if (r < 0) 8063 return X86EMUL_UNHANDLEABLE; 8064 8065 if (r) { 8066 if (kvm_msr_user_space(vcpu, msr_index, KVM_EXIT_X86_WRMSR, data, 8067 complete_emulated_msr_access, r)) 8068 return X86EMUL_IO_NEEDED; 8069 8070 trace_kvm_msr_write_ex(msr_index, data); 8071 return X86EMUL_PROPAGATE_FAULT; 8072 } 8073 8074 trace_kvm_msr_write(msr_index, data); 8075 return X86EMUL_CONTINUE; 8076 } 8077 8078 static int emulator_get_msr(struct x86_emulate_ctxt *ctxt, 8079 u32 msr_index, u64 *pdata) 8080 { 8081 return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata); 8082 } 8083 8084 static int emulator_set_msr(struct x86_emulate_ctxt *ctxt, 8085 u32 msr_index, u64 data) 8086 { 8087 return kvm_set_msr(emul_to_vcpu(ctxt), msr_index, data); 8088 } 8089 8090 static u64 emulator_get_smbase(struct x86_emulate_ctxt *ctxt) 8091 { 8092 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 8093 8094 return vcpu->arch.smbase; 8095 } 8096 8097 static void emulator_set_smbase(struct x86_emulate_ctxt *ctxt, u64 smbase) 8098 { 8099 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 8100 8101 vcpu->arch.smbase = smbase; 8102 } 8103 8104 static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt, 8105 u32 pmc) 8106 { 8107 if (kvm_pmu_is_valid_rdpmc_ecx(emul_to_vcpu(ctxt), pmc)) 8108 return 0; 8109 return -EINVAL; 8110 } 8111 8112 static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt, 8113 u32 pmc, u64 *pdata) 8114 { 8115 return kvm_pmu_rdpmc(emul_to_vcpu(ctxt), pmc, pdata); 8116 } 8117 8118 static void emulator_halt(struct x86_emulate_ctxt *ctxt) 8119 { 8120 emul_to_vcpu(ctxt)->arch.halt_request = 1; 8121 } 8122 8123 static int emulator_intercept(struct x86_emulate_ctxt *ctxt, 8124 struct x86_instruction_info *info, 8125 enum x86_intercept_stage stage) 8126 { 8127 return static_call(kvm_x86_check_intercept)(emul_to_vcpu(ctxt), info, stage, 8128 &ctxt->exception); 8129 } 8130 8131 static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt, 8132 u32 *eax, u32 *ebx, u32 *ecx, u32 *edx, 8133 bool exact_only) 8134 { 8135 return kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx, exact_only); 8136 } 8137 8138 static bool emulator_guest_has_long_mode(struct x86_emulate_ctxt *ctxt) 8139 { 8140 return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_LM); 8141 } 8142 8143 static bool emulator_guest_has_movbe(struct x86_emulate_ctxt *ctxt) 8144 { 8145 return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_MOVBE); 8146 } 8147 8148 static bool emulator_guest_has_fxsr(struct x86_emulate_ctxt *ctxt) 8149 { 8150 return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_FXSR); 8151 } 8152 8153 static bool emulator_guest_has_rdpid(struct x86_emulate_ctxt *ctxt) 8154 { 8155 return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_RDPID); 8156 } 8157 8158 static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg) 8159 { 8160 return kvm_register_read_raw(emul_to_vcpu(ctxt), reg); 8161 } 8162 8163 static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val) 8164 { 8165 kvm_register_write_raw(emul_to_vcpu(ctxt), reg, val); 8166 } 8167 8168 static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked) 8169 { 8170 static_call(kvm_x86_set_nmi_mask)(emul_to_vcpu(ctxt), masked); 8171 } 8172 8173 static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt) 8174 { 8175 return emul_to_vcpu(ctxt)->arch.hflags; 8176 } 8177 8178 static void emulator_exiting_smm(struct x86_emulate_ctxt *ctxt) 8179 { 8180 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 8181 8182 kvm_smm_changed(vcpu, false); 8183 } 8184 8185 static int emulator_leave_smm(struct x86_emulate_ctxt *ctxt, 8186 const char *smstate) 8187 { 8188 return static_call(kvm_x86_leave_smm)(emul_to_vcpu(ctxt), smstate); 8189 } 8190 8191 static void emulator_triple_fault(struct x86_emulate_ctxt *ctxt) 8192 { 8193 kvm_make_request(KVM_REQ_TRIPLE_FAULT, emul_to_vcpu(ctxt)); 8194 } 8195 8196 static int emulator_set_xcr(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr) 8197 { 8198 return __kvm_set_xcr(emul_to_vcpu(ctxt), index, xcr); 8199 } 8200 8201 static void emulator_vm_bugged(struct x86_emulate_ctxt *ctxt) 8202 { 8203 struct kvm *kvm = emul_to_vcpu(ctxt)->kvm; 8204 8205 if (!kvm->vm_bugged) 8206 kvm_vm_bugged(kvm); 8207 } 8208 8209 static const struct x86_emulate_ops emulate_ops = { 8210 .vm_bugged = emulator_vm_bugged, 8211 .read_gpr = emulator_read_gpr, 8212 .write_gpr = emulator_write_gpr, 8213 .read_std = emulator_read_std, 8214 .write_std = emulator_write_std, 8215 .read_phys = kvm_read_guest_phys_system, 8216 .fetch = kvm_fetch_guest_virt, 8217 .read_emulated = emulator_read_emulated, 8218 .write_emulated = emulator_write_emulated, 8219 .cmpxchg_emulated = emulator_cmpxchg_emulated, 8220 .invlpg = emulator_invlpg, 8221 .pio_in_emulated = emulator_pio_in_emulated, 8222 .pio_out_emulated = emulator_pio_out_emulated, 8223 .get_segment = emulator_get_segment, 8224 .set_segment = emulator_set_segment, 8225 .get_cached_segment_base = emulator_get_cached_segment_base, 8226 .get_gdt = emulator_get_gdt, 8227 .get_idt = emulator_get_idt, 8228 .set_gdt = emulator_set_gdt, 8229 .set_idt = emulator_set_idt, 8230 .get_cr = emulator_get_cr, 8231 .set_cr = emulator_set_cr, 8232 .cpl = emulator_get_cpl, 8233 .get_dr = emulator_get_dr, 8234 .set_dr = emulator_set_dr, 8235 .get_smbase = emulator_get_smbase, 8236 .set_smbase = emulator_set_smbase, 8237 .set_msr_with_filter = emulator_set_msr_with_filter, 8238 .get_msr_with_filter = emulator_get_msr_with_filter, 8239 .set_msr = emulator_set_msr, 8240 .get_msr = emulator_get_msr, 8241 .check_pmc = emulator_check_pmc, 8242 .read_pmc = emulator_read_pmc, 8243 .halt = emulator_halt, 8244 .wbinvd = emulator_wbinvd, 8245 .fix_hypercall = emulator_fix_hypercall, 8246 .intercept = emulator_intercept, 8247 .get_cpuid = emulator_get_cpuid, 8248 .guest_has_long_mode = emulator_guest_has_long_mode, 8249 .guest_has_movbe = emulator_guest_has_movbe, 8250 .guest_has_fxsr = emulator_guest_has_fxsr, 8251 .guest_has_rdpid = emulator_guest_has_rdpid, 8252 .set_nmi_mask = emulator_set_nmi_mask, 8253 .get_hflags = emulator_get_hflags, 8254 .exiting_smm = emulator_exiting_smm, 8255 .leave_smm = emulator_leave_smm, 8256 .triple_fault = emulator_triple_fault, 8257 .set_xcr = emulator_set_xcr, 8258 }; 8259 8260 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) 8261 { 8262 u32 int_shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu); 8263 /* 8264 * an sti; sti; sequence only disable interrupts for the first 8265 * instruction. So, if the last instruction, be it emulated or 8266 * not, left the system with the INT_STI flag enabled, it 8267 * means that the last instruction is an sti. We should not 8268 * leave the flag on in this case. The same goes for mov ss 8269 */ 8270 if (int_shadow & mask) 8271 mask = 0; 8272 if (unlikely(int_shadow || mask)) { 8273 static_call(kvm_x86_set_interrupt_shadow)(vcpu, mask); 8274 if (!mask) 8275 kvm_make_request(KVM_REQ_EVENT, vcpu); 8276 } 8277 } 8278 8279 static void inject_emulated_exception(struct kvm_vcpu *vcpu) 8280 { 8281 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 8282 8283 if (ctxt->exception.vector == PF_VECTOR) 8284 kvm_inject_emulated_page_fault(vcpu, &ctxt->exception); 8285 else if (ctxt->exception.error_code_valid) 8286 kvm_queue_exception_e(vcpu, ctxt->exception.vector, 8287 ctxt->exception.error_code); 8288 else 8289 kvm_queue_exception(vcpu, ctxt->exception.vector); 8290 } 8291 8292 static struct x86_emulate_ctxt *alloc_emulate_ctxt(struct kvm_vcpu *vcpu) 8293 { 8294 struct x86_emulate_ctxt *ctxt; 8295 8296 ctxt = kmem_cache_zalloc(x86_emulator_cache, GFP_KERNEL_ACCOUNT); 8297 if (!ctxt) { 8298 pr_err("kvm: failed to allocate vcpu's emulator\n"); 8299 return NULL; 8300 } 8301 8302 ctxt->vcpu = vcpu; 8303 ctxt->ops = &emulate_ops; 8304 vcpu->arch.emulate_ctxt = ctxt; 8305 8306 return ctxt; 8307 } 8308 8309 static void init_emulate_ctxt(struct kvm_vcpu *vcpu) 8310 { 8311 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 8312 int cs_db, cs_l; 8313 8314 static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l); 8315 8316 ctxt->gpa_available = false; 8317 ctxt->eflags = kvm_get_rflags(vcpu); 8318 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0; 8319 8320 ctxt->eip = kvm_rip_read(vcpu); 8321 ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : 8322 (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 : 8323 (cs_l && is_long_mode(vcpu)) ? X86EMUL_MODE_PROT64 : 8324 cs_db ? X86EMUL_MODE_PROT32 : 8325 X86EMUL_MODE_PROT16; 8326 BUILD_BUG_ON(HF_GUEST_MASK != X86EMUL_GUEST_MASK); 8327 BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK); 8328 BUILD_BUG_ON(HF_SMM_INSIDE_NMI_MASK != X86EMUL_SMM_INSIDE_NMI_MASK); 8329 8330 ctxt->interruptibility = 0; 8331 ctxt->have_exception = false; 8332 ctxt->exception.vector = -1; 8333 ctxt->perm_ok = false; 8334 8335 init_decode_cache(ctxt); 8336 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; 8337 } 8338 8339 void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip) 8340 { 8341 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 8342 int ret; 8343 8344 init_emulate_ctxt(vcpu); 8345 8346 ctxt->op_bytes = 2; 8347 ctxt->ad_bytes = 2; 8348 ctxt->_eip = ctxt->eip + inc_eip; 8349 ret = emulate_int_real(ctxt, irq); 8350 8351 if (ret != X86EMUL_CONTINUE) { 8352 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 8353 } else { 8354 ctxt->eip = ctxt->_eip; 8355 kvm_rip_write(vcpu, ctxt->eip); 8356 kvm_set_rflags(vcpu, ctxt->eflags); 8357 } 8358 } 8359 EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt); 8360 8361 static void prepare_emulation_failure_exit(struct kvm_vcpu *vcpu, u64 *data, 8362 u8 ndata, u8 *insn_bytes, u8 insn_size) 8363 { 8364 struct kvm_run *run = vcpu->run; 8365 u64 info[5]; 8366 u8 info_start; 8367 8368 /* 8369 * Zero the whole array used to retrieve the exit info, as casting to 8370 * u32 for select entries will leave some chunks uninitialized. 8371 */ 8372 memset(&info, 0, sizeof(info)); 8373 8374 static_call(kvm_x86_get_exit_info)(vcpu, (u32 *)&info[0], &info[1], 8375 &info[2], (u32 *)&info[3], 8376 (u32 *)&info[4]); 8377 8378 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 8379 run->emulation_failure.suberror = KVM_INTERNAL_ERROR_EMULATION; 8380 8381 /* 8382 * There's currently space for 13 entries, but 5 are used for the exit 8383 * reason and info. Restrict to 4 to reduce the maintenance burden 8384 * when expanding kvm_run.emulation_failure in the future. 8385 */ 8386 if (WARN_ON_ONCE(ndata > 4)) 8387 ndata = 4; 8388 8389 /* Always include the flags as a 'data' entry. */ 8390 info_start = 1; 8391 run->emulation_failure.flags = 0; 8392 8393 if (insn_size) { 8394 BUILD_BUG_ON((sizeof(run->emulation_failure.insn_size) + 8395 sizeof(run->emulation_failure.insn_bytes) != 16)); 8396 info_start += 2; 8397 run->emulation_failure.flags |= 8398 KVM_INTERNAL_ERROR_EMULATION_FLAG_INSTRUCTION_BYTES; 8399 run->emulation_failure.insn_size = insn_size; 8400 memset(run->emulation_failure.insn_bytes, 0x90, 8401 sizeof(run->emulation_failure.insn_bytes)); 8402 memcpy(run->emulation_failure.insn_bytes, insn_bytes, insn_size); 8403 } 8404 8405 memcpy(&run->internal.data[info_start], info, sizeof(info)); 8406 memcpy(&run->internal.data[info_start + ARRAY_SIZE(info)], data, 8407 ndata * sizeof(data[0])); 8408 8409 run->emulation_failure.ndata = info_start + ARRAY_SIZE(info) + ndata; 8410 } 8411 8412 static void prepare_emulation_ctxt_failure_exit(struct kvm_vcpu *vcpu) 8413 { 8414 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 8415 8416 prepare_emulation_failure_exit(vcpu, NULL, 0, ctxt->fetch.data, 8417 ctxt->fetch.end - ctxt->fetch.data); 8418 } 8419 8420 void __kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu, u64 *data, 8421 u8 ndata) 8422 { 8423 prepare_emulation_failure_exit(vcpu, data, ndata, NULL, 0); 8424 } 8425 EXPORT_SYMBOL_GPL(__kvm_prepare_emulation_failure_exit); 8426 8427 void kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu) 8428 { 8429 __kvm_prepare_emulation_failure_exit(vcpu, NULL, 0); 8430 } 8431 EXPORT_SYMBOL_GPL(kvm_prepare_emulation_failure_exit); 8432 8433 static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type) 8434 { 8435 struct kvm *kvm = vcpu->kvm; 8436 8437 ++vcpu->stat.insn_emulation_fail; 8438 trace_kvm_emulate_insn_failed(vcpu); 8439 8440 if (emulation_type & EMULTYPE_VMWARE_GP) { 8441 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); 8442 return 1; 8443 } 8444 8445 if (kvm->arch.exit_on_emulation_error || 8446 (emulation_type & EMULTYPE_SKIP)) { 8447 prepare_emulation_ctxt_failure_exit(vcpu); 8448 return 0; 8449 } 8450 8451 kvm_queue_exception(vcpu, UD_VECTOR); 8452 8453 if (!is_guest_mode(vcpu) && static_call(kvm_x86_get_cpl)(vcpu) == 0) { 8454 prepare_emulation_ctxt_failure_exit(vcpu); 8455 return 0; 8456 } 8457 8458 return 1; 8459 } 8460 8461 static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, 8462 bool write_fault_to_shadow_pgtable, 8463 int emulation_type) 8464 { 8465 gpa_t gpa = cr2_or_gpa; 8466 kvm_pfn_t pfn; 8467 8468 if (!(emulation_type & EMULTYPE_ALLOW_RETRY_PF)) 8469 return false; 8470 8471 if (WARN_ON_ONCE(is_guest_mode(vcpu)) || 8472 WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF))) 8473 return false; 8474 8475 if (!vcpu->arch.mmu->root_role.direct) { 8476 /* 8477 * Write permission should be allowed since only 8478 * write access need to be emulated. 8479 */ 8480 gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL); 8481 8482 /* 8483 * If the mapping is invalid in guest, let cpu retry 8484 * it to generate fault. 8485 */ 8486 if (gpa == INVALID_GPA) 8487 return true; 8488 } 8489 8490 /* 8491 * Do not retry the unhandleable instruction if it faults on the 8492 * readonly host memory, otherwise it will goto a infinite loop: 8493 * retry instruction -> write #PF -> emulation fail -> retry 8494 * instruction -> ... 8495 */ 8496 pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa)); 8497 8498 /* 8499 * If the instruction failed on the error pfn, it can not be fixed, 8500 * report the error to userspace. 8501 */ 8502 if (is_error_noslot_pfn(pfn)) 8503 return false; 8504 8505 kvm_release_pfn_clean(pfn); 8506 8507 /* The instructions are well-emulated on direct mmu. */ 8508 if (vcpu->arch.mmu->root_role.direct) { 8509 unsigned int indirect_shadow_pages; 8510 8511 write_lock(&vcpu->kvm->mmu_lock); 8512 indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages; 8513 write_unlock(&vcpu->kvm->mmu_lock); 8514 8515 if (indirect_shadow_pages) 8516 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); 8517 8518 return true; 8519 } 8520 8521 /* 8522 * if emulation was due to access to shadowed page table 8523 * and it failed try to unshadow page and re-enter the 8524 * guest to let CPU execute the instruction. 8525 */ 8526 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); 8527 8528 /* 8529 * If the access faults on its page table, it can not 8530 * be fixed by unprotecting shadow page and it should 8531 * be reported to userspace. 8532 */ 8533 return !write_fault_to_shadow_pgtable; 8534 } 8535 8536 static bool retry_instruction(struct x86_emulate_ctxt *ctxt, 8537 gpa_t cr2_or_gpa, int emulation_type) 8538 { 8539 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 8540 unsigned long last_retry_eip, last_retry_addr, gpa = cr2_or_gpa; 8541 8542 last_retry_eip = vcpu->arch.last_retry_eip; 8543 last_retry_addr = vcpu->arch.last_retry_addr; 8544 8545 /* 8546 * If the emulation is caused by #PF and it is non-page_table 8547 * writing instruction, it means the VM-EXIT is caused by shadow 8548 * page protected, we can zap the shadow page and retry this 8549 * instruction directly. 8550 * 8551 * Note: if the guest uses a non-page-table modifying instruction 8552 * on the PDE that points to the instruction, then we will unmap 8553 * the instruction and go to an infinite loop. So, we cache the 8554 * last retried eip and the last fault address, if we meet the eip 8555 * and the address again, we can break out of the potential infinite 8556 * loop. 8557 */ 8558 vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0; 8559 8560 if (!(emulation_type & EMULTYPE_ALLOW_RETRY_PF)) 8561 return false; 8562 8563 if (WARN_ON_ONCE(is_guest_mode(vcpu)) || 8564 WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF))) 8565 return false; 8566 8567 if (x86_page_table_writing_insn(ctxt)) 8568 return false; 8569 8570 if (ctxt->eip == last_retry_eip && last_retry_addr == cr2_or_gpa) 8571 return false; 8572 8573 vcpu->arch.last_retry_eip = ctxt->eip; 8574 vcpu->arch.last_retry_addr = cr2_or_gpa; 8575 8576 if (!vcpu->arch.mmu->root_role.direct) 8577 gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL); 8578 8579 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); 8580 8581 return true; 8582 } 8583 8584 static int complete_emulated_mmio(struct kvm_vcpu *vcpu); 8585 static int complete_emulated_pio(struct kvm_vcpu *vcpu); 8586 8587 static void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm) 8588 { 8589 trace_kvm_smm_transition(vcpu->vcpu_id, vcpu->arch.smbase, entering_smm); 8590 8591 if (entering_smm) { 8592 vcpu->arch.hflags |= HF_SMM_MASK; 8593 } else { 8594 vcpu->arch.hflags &= ~(HF_SMM_MASK | HF_SMM_INSIDE_NMI_MASK); 8595 8596 /* Process a latched INIT or SMI, if any. */ 8597 kvm_make_request(KVM_REQ_EVENT, vcpu); 8598 8599 /* 8600 * Even if KVM_SET_SREGS2 loaded PDPTRs out of band, 8601 * on SMM exit we still need to reload them from 8602 * guest memory 8603 */ 8604 vcpu->arch.pdptrs_from_userspace = false; 8605 } 8606 8607 kvm_mmu_reset_context(vcpu); 8608 } 8609 8610 static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7, 8611 unsigned long *db) 8612 { 8613 u32 dr6 = 0; 8614 int i; 8615 u32 enable, rwlen; 8616 8617 enable = dr7; 8618 rwlen = dr7 >> 16; 8619 for (i = 0; i < 4; i++, enable >>= 2, rwlen >>= 4) 8620 if ((enable & 3) && (rwlen & 15) == type && db[i] == addr) 8621 dr6 |= (1 << i); 8622 return dr6; 8623 } 8624 8625 static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu) 8626 { 8627 struct kvm_run *kvm_run = vcpu->run; 8628 8629 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { 8630 kvm_run->debug.arch.dr6 = DR6_BS | DR6_ACTIVE_LOW; 8631 kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu); 8632 kvm_run->debug.arch.exception = DB_VECTOR; 8633 kvm_run->exit_reason = KVM_EXIT_DEBUG; 8634 return 0; 8635 } 8636 kvm_queue_exception_p(vcpu, DB_VECTOR, DR6_BS); 8637 return 1; 8638 } 8639 8640 int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu) 8641 { 8642 unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu); 8643 int r; 8644 8645 r = static_call(kvm_x86_skip_emulated_instruction)(vcpu); 8646 if (unlikely(!r)) 8647 return 0; 8648 8649 kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_INSTRUCTIONS); 8650 8651 /* 8652 * rflags is the old, "raw" value of the flags. The new value has 8653 * not been saved yet. 8654 * 8655 * This is correct even for TF set by the guest, because "the 8656 * processor will not generate this exception after the instruction 8657 * that sets the TF flag". 8658 */ 8659 if (unlikely(rflags & X86_EFLAGS_TF)) 8660 r = kvm_vcpu_do_singlestep(vcpu); 8661 return r; 8662 } 8663 EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction); 8664 8665 static bool kvm_is_code_breakpoint_inhibited(struct kvm_vcpu *vcpu) 8666 { 8667 u32 shadow; 8668 8669 if (kvm_get_rflags(vcpu) & X86_EFLAGS_RF) 8670 return true; 8671 8672 /* 8673 * Intel CPUs inhibit code #DBs when MOV/POP SS blocking is active, 8674 * but AMD CPUs do not. MOV/POP SS blocking is rare, check that first 8675 * to avoid the relatively expensive CPUID lookup. 8676 */ 8677 shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu); 8678 return (shadow & KVM_X86_SHADOW_INT_MOV_SS) && 8679 guest_cpuid_is_intel(vcpu); 8680 } 8681 8682 static bool kvm_vcpu_check_code_breakpoint(struct kvm_vcpu *vcpu, 8683 int emulation_type, int *r) 8684 { 8685 WARN_ON_ONCE(emulation_type & EMULTYPE_NO_DECODE); 8686 8687 /* 8688 * Do not check for code breakpoints if hardware has already done the 8689 * checks, as inferred from the emulation type. On NO_DECODE and SKIP, 8690 * the instruction has passed all exception checks, and all intercepted 8691 * exceptions that trigger emulation have lower priority than code 8692 * breakpoints, i.e. the fact that the intercepted exception occurred 8693 * means any code breakpoints have already been serviced. 8694 * 8695 * Note, KVM needs to check for code #DBs on EMULTYPE_TRAP_UD_FORCED as 8696 * hardware has checked the RIP of the magic prefix, but not the RIP of 8697 * the instruction being emulated. The intent of forced emulation is 8698 * to behave as if KVM intercepted the instruction without an exception 8699 * and without a prefix. 8700 */ 8701 if (emulation_type & (EMULTYPE_NO_DECODE | EMULTYPE_SKIP | 8702 EMULTYPE_TRAP_UD | EMULTYPE_VMWARE_GP | EMULTYPE_PF)) 8703 return false; 8704 8705 if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) && 8706 (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) { 8707 struct kvm_run *kvm_run = vcpu->run; 8708 unsigned long eip = kvm_get_linear_rip(vcpu); 8709 u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0, 8710 vcpu->arch.guest_debug_dr7, 8711 vcpu->arch.eff_db); 8712 8713 if (dr6 != 0) { 8714 kvm_run->debug.arch.dr6 = dr6 | DR6_ACTIVE_LOW; 8715 kvm_run->debug.arch.pc = eip; 8716 kvm_run->debug.arch.exception = DB_VECTOR; 8717 kvm_run->exit_reason = KVM_EXIT_DEBUG; 8718 *r = 0; 8719 return true; 8720 } 8721 } 8722 8723 if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) && 8724 !kvm_is_code_breakpoint_inhibited(vcpu)) { 8725 unsigned long eip = kvm_get_linear_rip(vcpu); 8726 u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0, 8727 vcpu->arch.dr7, 8728 vcpu->arch.db); 8729 8730 if (dr6 != 0) { 8731 kvm_queue_exception_p(vcpu, DB_VECTOR, dr6); 8732 *r = 1; 8733 return true; 8734 } 8735 } 8736 8737 return false; 8738 } 8739 8740 static bool is_vmware_backdoor_opcode(struct x86_emulate_ctxt *ctxt) 8741 { 8742 switch (ctxt->opcode_len) { 8743 case 1: 8744 switch (ctxt->b) { 8745 case 0xe4: /* IN */ 8746 case 0xe5: 8747 case 0xec: 8748 case 0xed: 8749 case 0xe6: /* OUT */ 8750 case 0xe7: 8751 case 0xee: 8752 case 0xef: 8753 case 0x6c: /* INS */ 8754 case 0x6d: 8755 case 0x6e: /* OUTS */ 8756 case 0x6f: 8757 return true; 8758 } 8759 break; 8760 case 2: 8761 switch (ctxt->b) { 8762 case 0x33: /* RDPMC */ 8763 return true; 8764 } 8765 break; 8766 } 8767 8768 return false; 8769 } 8770 8771 /* 8772 * Decode an instruction for emulation. The caller is responsible for handling 8773 * code breakpoints. Note, manually detecting code breakpoints is unnecessary 8774 * (and wrong) when emulating on an intercepted fault-like exception[*], as 8775 * code breakpoints have higher priority and thus have already been done by 8776 * hardware. 8777 * 8778 * [*] Except #MC, which is higher priority, but KVM should never emulate in 8779 * response to a machine check. 8780 */ 8781 int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type, 8782 void *insn, int insn_len) 8783 { 8784 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 8785 int r; 8786 8787 init_emulate_ctxt(vcpu); 8788 8789 r = x86_decode_insn(ctxt, insn, insn_len, emulation_type); 8790 8791 trace_kvm_emulate_insn_start(vcpu); 8792 ++vcpu->stat.insn_emulation; 8793 8794 return r; 8795 } 8796 EXPORT_SYMBOL_GPL(x86_decode_emulated_instruction); 8797 8798 int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, 8799 int emulation_type, void *insn, int insn_len) 8800 { 8801 int r; 8802 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 8803 bool writeback = true; 8804 bool write_fault_to_spt; 8805 8806 if (unlikely(!kvm_can_emulate_insn(vcpu, emulation_type, insn, insn_len))) 8807 return 1; 8808 8809 vcpu->arch.l1tf_flush_l1d = true; 8810 8811 /* 8812 * Clear write_fault_to_shadow_pgtable here to ensure it is 8813 * never reused. 8814 */ 8815 write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable; 8816 vcpu->arch.write_fault_to_shadow_pgtable = false; 8817 8818 if (!(emulation_type & EMULTYPE_NO_DECODE)) { 8819 kvm_clear_exception_queue(vcpu); 8820 8821 /* 8822 * Return immediately if RIP hits a code breakpoint, such #DBs 8823 * are fault-like and are higher priority than any faults on 8824 * the code fetch itself. 8825 */ 8826 if (kvm_vcpu_check_code_breakpoint(vcpu, emulation_type, &r)) 8827 return r; 8828 8829 r = x86_decode_emulated_instruction(vcpu, emulation_type, 8830 insn, insn_len); 8831 if (r != EMULATION_OK) { 8832 if ((emulation_type & EMULTYPE_TRAP_UD) || 8833 (emulation_type & EMULTYPE_TRAP_UD_FORCED)) { 8834 kvm_queue_exception(vcpu, UD_VECTOR); 8835 return 1; 8836 } 8837 if (reexecute_instruction(vcpu, cr2_or_gpa, 8838 write_fault_to_spt, 8839 emulation_type)) 8840 return 1; 8841 if (ctxt->have_exception) { 8842 /* 8843 * #UD should result in just EMULATION_FAILED, and trap-like 8844 * exception should not be encountered during decode. 8845 */ 8846 WARN_ON_ONCE(ctxt->exception.vector == UD_VECTOR || 8847 exception_type(ctxt->exception.vector) == EXCPT_TRAP); 8848 inject_emulated_exception(vcpu); 8849 return 1; 8850 } 8851 return handle_emulation_failure(vcpu, emulation_type); 8852 } 8853 } 8854 8855 if ((emulation_type & EMULTYPE_VMWARE_GP) && 8856 !is_vmware_backdoor_opcode(ctxt)) { 8857 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); 8858 return 1; 8859 } 8860 8861 /* 8862 * EMULTYPE_SKIP without EMULTYPE_COMPLETE_USER_EXIT is intended for 8863 * use *only* by vendor callbacks for kvm_skip_emulated_instruction(). 8864 * The caller is responsible for updating interruptibility state and 8865 * injecting single-step #DBs. 8866 */ 8867 if (emulation_type & EMULTYPE_SKIP) { 8868 if (ctxt->mode != X86EMUL_MODE_PROT64) 8869 ctxt->eip = (u32)ctxt->_eip; 8870 else 8871 ctxt->eip = ctxt->_eip; 8872 8873 if (emulation_type & EMULTYPE_COMPLETE_USER_EXIT) { 8874 r = 1; 8875 goto writeback; 8876 } 8877 8878 kvm_rip_write(vcpu, ctxt->eip); 8879 if (ctxt->eflags & X86_EFLAGS_RF) 8880 kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF); 8881 return 1; 8882 } 8883 8884 if (retry_instruction(ctxt, cr2_or_gpa, emulation_type)) 8885 return 1; 8886 8887 /* this is needed for vmware backdoor interface to work since it 8888 changes registers values during IO operation */ 8889 if (vcpu->arch.emulate_regs_need_sync_from_vcpu) { 8890 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; 8891 emulator_invalidate_register_cache(ctxt); 8892 } 8893 8894 restart: 8895 if (emulation_type & EMULTYPE_PF) { 8896 /* Save the faulting GPA (cr2) in the address field */ 8897 ctxt->exception.address = cr2_or_gpa; 8898 8899 /* With shadow page tables, cr2 contains a GVA or nGPA. */ 8900 if (vcpu->arch.mmu->root_role.direct) { 8901 ctxt->gpa_available = true; 8902 ctxt->gpa_val = cr2_or_gpa; 8903 } 8904 } else { 8905 /* Sanitize the address out of an abundance of paranoia. */ 8906 ctxt->exception.address = 0; 8907 } 8908 8909 r = x86_emulate_insn(ctxt); 8910 8911 if (r == EMULATION_INTERCEPTED) 8912 return 1; 8913 8914 if (r == EMULATION_FAILED) { 8915 if (reexecute_instruction(vcpu, cr2_or_gpa, write_fault_to_spt, 8916 emulation_type)) 8917 return 1; 8918 8919 return handle_emulation_failure(vcpu, emulation_type); 8920 } 8921 8922 if (ctxt->have_exception) { 8923 r = 1; 8924 inject_emulated_exception(vcpu); 8925 } else if (vcpu->arch.pio.count) { 8926 if (!vcpu->arch.pio.in) { 8927 /* FIXME: return into emulator if single-stepping. */ 8928 vcpu->arch.pio.count = 0; 8929 } else { 8930 writeback = false; 8931 vcpu->arch.complete_userspace_io = complete_emulated_pio; 8932 } 8933 r = 0; 8934 } else if (vcpu->mmio_needed) { 8935 ++vcpu->stat.mmio_exits; 8936 8937 if (!vcpu->mmio_is_write) 8938 writeback = false; 8939 r = 0; 8940 vcpu->arch.complete_userspace_io = complete_emulated_mmio; 8941 } else if (vcpu->arch.complete_userspace_io) { 8942 writeback = false; 8943 r = 0; 8944 } else if (r == EMULATION_RESTART) 8945 goto restart; 8946 else 8947 r = 1; 8948 8949 writeback: 8950 if (writeback) { 8951 unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu); 8952 toggle_interruptibility(vcpu, ctxt->interruptibility); 8953 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; 8954 8955 /* 8956 * Note, EXCPT_DB is assumed to be fault-like as the emulator 8957 * only supports code breakpoints and general detect #DB, both 8958 * of which are fault-like. 8959 */ 8960 if (!ctxt->have_exception || 8961 exception_type(ctxt->exception.vector) == EXCPT_TRAP) { 8962 kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_INSTRUCTIONS); 8963 if (ctxt->is_branch) 8964 kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_BRANCH_INSTRUCTIONS); 8965 kvm_rip_write(vcpu, ctxt->eip); 8966 if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP))) 8967 r = kvm_vcpu_do_singlestep(vcpu); 8968 static_call_cond(kvm_x86_update_emulated_instruction)(vcpu); 8969 __kvm_set_rflags(vcpu, ctxt->eflags); 8970 } 8971 8972 /* 8973 * For STI, interrupts are shadowed; so KVM_REQ_EVENT will 8974 * do nothing, and it will be requested again as soon as 8975 * the shadow expires. But we still need to check here, 8976 * because POPF has no interrupt shadow. 8977 */ 8978 if (unlikely((ctxt->eflags & ~rflags) & X86_EFLAGS_IF)) 8979 kvm_make_request(KVM_REQ_EVENT, vcpu); 8980 } else 8981 vcpu->arch.emulate_regs_need_sync_to_vcpu = true; 8982 8983 return r; 8984 } 8985 8986 int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type) 8987 { 8988 return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0); 8989 } 8990 EXPORT_SYMBOL_GPL(kvm_emulate_instruction); 8991 8992 int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu, 8993 void *insn, int insn_len) 8994 { 8995 return x86_emulate_instruction(vcpu, 0, 0, insn, insn_len); 8996 } 8997 EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer); 8998 8999 static int complete_fast_pio_out_port_0x7e(struct kvm_vcpu *vcpu) 9000 { 9001 vcpu->arch.pio.count = 0; 9002 return 1; 9003 } 9004 9005 static int complete_fast_pio_out(struct kvm_vcpu *vcpu) 9006 { 9007 vcpu->arch.pio.count = 0; 9008 9009 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) 9010 return 1; 9011 9012 return kvm_skip_emulated_instruction(vcpu); 9013 } 9014 9015 static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, 9016 unsigned short port) 9017 { 9018 unsigned long val = kvm_rax_read(vcpu); 9019 int ret = emulator_pio_out(vcpu, size, port, &val, 1); 9020 9021 if (ret) 9022 return ret; 9023 9024 /* 9025 * Workaround userspace that relies on old KVM behavior of %rip being 9026 * incremented prior to exiting to userspace to handle "OUT 0x7e". 9027 */ 9028 if (port == 0x7e && 9029 kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_OUT_7E_INC_RIP)) { 9030 vcpu->arch.complete_userspace_io = 9031 complete_fast_pio_out_port_0x7e; 9032 kvm_skip_emulated_instruction(vcpu); 9033 } else { 9034 vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu); 9035 vcpu->arch.complete_userspace_io = complete_fast_pio_out; 9036 } 9037 return 0; 9038 } 9039 9040 static int complete_fast_pio_in(struct kvm_vcpu *vcpu) 9041 { 9042 unsigned long val; 9043 9044 /* We should only ever be called with arch.pio.count equal to 1 */ 9045 BUG_ON(vcpu->arch.pio.count != 1); 9046 9047 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) { 9048 vcpu->arch.pio.count = 0; 9049 return 1; 9050 } 9051 9052 /* For size less than 4 we merge, else we zero extend */ 9053 val = (vcpu->arch.pio.size < 4) ? kvm_rax_read(vcpu) : 0; 9054 9055 complete_emulator_pio_in(vcpu, &val); 9056 kvm_rax_write(vcpu, val); 9057 9058 return kvm_skip_emulated_instruction(vcpu); 9059 } 9060 9061 static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size, 9062 unsigned short port) 9063 { 9064 unsigned long val; 9065 int ret; 9066 9067 /* For size less than 4 we merge, else we zero extend */ 9068 val = (size < 4) ? kvm_rax_read(vcpu) : 0; 9069 9070 ret = emulator_pio_in(vcpu, size, port, &val, 1); 9071 if (ret) { 9072 kvm_rax_write(vcpu, val); 9073 return ret; 9074 } 9075 9076 vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu); 9077 vcpu->arch.complete_userspace_io = complete_fast_pio_in; 9078 9079 return 0; 9080 } 9081 9082 int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in) 9083 { 9084 int ret; 9085 9086 if (in) 9087 ret = kvm_fast_pio_in(vcpu, size, port); 9088 else 9089 ret = kvm_fast_pio_out(vcpu, size, port); 9090 return ret && kvm_skip_emulated_instruction(vcpu); 9091 } 9092 EXPORT_SYMBOL_GPL(kvm_fast_pio); 9093 9094 static int kvmclock_cpu_down_prep(unsigned int cpu) 9095 { 9096 __this_cpu_write(cpu_tsc_khz, 0); 9097 return 0; 9098 } 9099 9100 static void tsc_khz_changed(void *data) 9101 { 9102 struct cpufreq_freqs *freq = data; 9103 unsigned long khz = 0; 9104 9105 if (data) 9106 khz = freq->new; 9107 else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) 9108 khz = cpufreq_quick_get(raw_smp_processor_id()); 9109 if (!khz) 9110 khz = tsc_khz; 9111 __this_cpu_write(cpu_tsc_khz, khz); 9112 } 9113 9114 #ifdef CONFIG_X86_64 9115 static void kvm_hyperv_tsc_notifier(void) 9116 { 9117 struct kvm *kvm; 9118 int cpu; 9119 9120 mutex_lock(&kvm_lock); 9121 list_for_each_entry(kvm, &vm_list, vm_list) 9122 kvm_make_mclock_inprogress_request(kvm); 9123 9124 /* no guest entries from this point */ 9125 hyperv_stop_tsc_emulation(); 9126 9127 /* TSC frequency always matches when on Hyper-V */ 9128 for_each_present_cpu(cpu) 9129 per_cpu(cpu_tsc_khz, cpu) = tsc_khz; 9130 kvm_caps.max_guest_tsc_khz = tsc_khz; 9131 9132 list_for_each_entry(kvm, &vm_list, vm_list) { 9133 __kvm_start_pvclock_update(kvm); 9134 pvclock_update_vm_gtod_copy(kvm); 9135 kvm_end_pvclock_update(kvm); 9136 } 9137 9138 mutex_unlock(&kvm_lock); 9139 } 9140 #endif 9141 9142 static void __kvmclock_cpufreq_notifier(struct cpufreq_freqs *freq, int cpu) 9143 { 9144 struct kvm *kvm; 9145 struct kvm_vcpu *vcpu; 9146 int send_ipi = 0; 9147 unsigned long i; 9148 9149 /* 9150 * We allow guests to temporarily run on slowing clocks, 9151 * provided we notify them after, or to run on accelerating 9152 * clocks, provided we notify them before. Thus time never 9153 * goes backwards. 9154 * 9155 * However, we have a problem. We can't atomically update 9156 * the frequency of a given CPU from this function; it is 9157 * merely a notifier, which can be called from any CPU. 9158 * Changing the TSC frequency at arbitrary points in time 9159 * requires a recomputation of local variables related to 9160 * the TSC for each VCPU. We must flag these local variables 9161 * to be updated and be sure the update takes place with the 9162 * new frequency before any guests proceed. 9163 * 9164 * Unfortunately, the combination of hotplug CPU and frequency 9165 * change creates an intractable locking scenario; the order 9166 * of when these callouts happen is undefined with respect to 9167 * CPU hotplug, and they can race with each other. As such, 9168 * merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is 9169 * undefined; you can actually have a CPU frequency change take 9170 * place in between the computation of X and the setting of the 9171 * variable. To protect against this problem, all updates of 9172 * the per_cpu tsc_khz variable are done in an interrupt 9173 * protected IPI, and all callers wishing to update the value 9174 * must wait for a synchronous IPI to complete (which is trivial 9175 * if the caller is on the CPU already). This establishes the 9176 * necessary total order on variable updates. 9177 * 9178 * Note that because a guest time update may take place 9179 * anytime after the setting of the VCPU's request bit, the 9180 * correct TSC value must be set before the request. However, 9181 * to ensure the update actually makes it to any guest which 9182 * starts running in hardware virtualization between the set 9183 * and the acquisition of the spinlock, we must also ping the 9184 * CPU after setting the request bit. 9185 * 9186 */ 9187 9188 smp_call_function_single(cpu, tsc_khz_changed, freq, 1); 9189 9190 mutex_lock(&kvm_lock); 9191 list_for_each_entry(kvm, &vm_list, vm_list) { 9192 kvm_for_each_vcpu(i, vcpu, kvm) { 9193 if (vcpu->cpu != cpu) 9194 continue; 9195 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 9196 if (vcpu->cpu != raw_smp_processor_id()) 9197 send_ipi = 1; 9198 } 9199 } 9200 mutex_unlock(&kvm_lock); 9201 9202 if (freq->old < freq->new && send_ipi) { 9203 /* 9204 * We upscale the frequency. Must make the guest 9205 * doesn't see old kvmclock values while running with 9206 * the new frequency, otherwise we risk the guest sees 9207 * time go backwards. 9208 * 9209 * In case we update the frequency for another cpu 9210 * (which might be in guest context) send an interrupt 9211 * to kick the cpu out of guest context. Next time 9212 * guest context is entered kvmclock will be updated, 9213 * so the guest will not see stale values. 9214 */ 9215 smp_call_function_single(cpu, tsc_khz_changed, freq, 1); 9216 } 9217 } 9218 9219 static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val, 9220 void *data) 9221 { 9222 struct cpufreq_freqs *freq = data; 9223 int cpu; 9224 9225 if (val == CPUFREQ_PRECHANGE && freq->old > freq->new) 9226 return 0; 9227 if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new) 9228 return 0; 9229 9230 for_each_cpu(cpu, freq->policy->cpus) 9231 __kvmclock_cpufreq_notifier(freq, cpu); 9232 9233 return 0; 9234 } 9235 9236 static struct notifier_block kvmclock_cpufreq_notifier_block = { 9237 .notifier_call = kvmclock_cpufreq_notifier 9238 }; 9239 9240 static int kvmclock_cpu_online(unsigned int cpu) 9241 { 9242 tsc_khz_changed(NULL); 9243 return 0; 9244 } 9245 9246 static void kvm_timer_init(void) 9247 { 9248 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { 9249 max_tsc_khz = tsc_khz; 9250 9251 if (IS_ENABLED(CONFIG_CPU_FREQ)) { 9252 struct cpufreq_policy *policy; 9253 int cpu; 9254 9255 cpu = get_cpu(); 9256 policy = cpufreq_cpu_get(cpu); 9257 if (policy) { 9258 if (policy->cpuinfo.max_freq) 9259 max_tsc_khz = policy->cpuinfo.max_freq; 9260 cpufreq_cpu_put(policy); 9261 } 9262 put_cpu(); 9263 } 9264 cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block, 9265 CPUFREQ_TRANSITION_NOTIFIER); 9266 } 9267 9268 cpuhp_setup_state(CPUHP_AP_X86_KVM_CLK_ONLINE, "x86/kvm/clk:online", 9269 kvmclock_cpu_online, kvmclock_cpu_down_prep); 9270 } 9271 9272 #ifdef CONFIG_X86_64 9273 static void pvclock_gtod_update_fn(struct work_struct *work) 9274 { 9275 struct kvm *kvm; 9276 struct kvm_vcpu *vcpu; 9277 unsigned long i; 9278 9279 mutex_lock(&kvm_lock); 9280 list_for_each_entry(kvm, &vm_list, vm_list) 9281 kvm_for_each_vcpu(i, vcpu, kvm) 9282 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); 9283 atomic_set(&kvm_guest_has_master_clock, 0); 9284 mutex_unlock(&kvm_lock); 9285 } 9286 9287 static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn); 9288 9289 /* 9290 * Indirection to move queue_work() out of the tk_core.seq write held 9291 * region to prevent possible deadlocks against time accessors which 9292 * are invoked with work related locks held. 9293 */ 9294 static void pvclock_irq_work_fn(struct irq_work *w) 9295 { 9296 queue_work(system_long_wq, &pvclock_gtod_work); 9297 } 9298 9299 static DEFINE_IRQ_WORK(pvclock_irq_work, pvclock_irq_work_fn); 9300 9301 /* 9302 * Notification about pvclock gtod data update. 9303 */ 9304 static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused, 9305 void *priv) 9306 { 9307 struct pvclock_gtod_data *gtod = &pvclock_gtod_data; 9308 struct timekeeper *tk = priv; 9309 9310 update_pvclock_gtod(tk); 9311 9312 /* 9313 * Disable master clock if host does not trust, or does not use, 9314 * TSC based clocksource. Delegate queue_work() to irq_work as 9315 * this is invoked with tk_core.seq write held. 9316 */ 9317 if (!gtod_is_based_on_tsc(gtod->clock.vclock_mode) && 9318 atomic_read(&kvm_guest_has_master_clock) != 0) 9319 irq_work_queue(&pvclock_irq_work); 9320 return 0; 9321 } 9322 9323 static struct notifier_block pvclock_gtod_notifier = { 9324 .notifier_call = pvclock_gtod_notify, 9325 }; 9326 #endif 9327 9328 int kvm_arch_init(void *opaque) 9329 { 9330 struct kvm_x86_init_ops *ops = opaque; 9331 u64 host_pat; 9332 int r; 9333 9334 if (kvm_x86_ops.hardware_enable) { 9335 pr_err("kvm: already loaded vendor module '%s'\n", kvm_x86_ops.name); 9336 return -EEXIST; 9337 } 9338 9339 if (!ops->cpu_has_kvm_support()) { 9340 pr_err_ratelimited("kvm: no hardware support for '%s'\n", 9341 ops->runtime_ops->name); 9342 return -EOPNOTSUPP; 9343 } 9344 if (ops->disabled_by_bios()) { 9345 pr_err_ratelimited("kvm: support for '%s' disabled by bios\n", 9346 ops->runtime_ops->name); 9347 return -EOPNOTSUPP; 9348 } 9349 9350 /* 9351 * KVM explicitly assumes that the guest has an FPU and 9352 * FXSAVE/FXRSTOR. For example, the KVM_GET_FPU explicitly casts the 9353 * vCPU's FPU state as a fxregs_state struct. 9354 */ 9355 if (!boot_cpu_has(X86_FEATURE_FPU) || !boot_cpu_has(X86_FEATURE_FXSR)) { 9356 printk(KERN_ERR "kvm: inadequate fpu\n"); 9357 return -EOPNOTSUPP; 9358 } 9359 9360 if (IS_ENABLED(CONFIG_PREEMPT_RT) && !boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { 9361 pr_err("RT requires X86_FEATURE_CONSTANT_TSC\n"); 9362 return -EOPNOTSUPP; 9363 } 9364 9365 /* 9366 * KVM assumes that PAT entry '0' encodes WB memtype and simply zeroes 9367 * the PAT bits in SPTEs. Bail if PAT[0] is programmed to something 9368 * other than WB. Note, EPT doesn't utilize the PAT, but don't bother 9369 * with an exception. PAT[0] is set to WB on RESET and also by the 9370 * kernel, i.e. failure indicates a kernel bug or broken firmware. 9371 */ 9372 if (rdmsrl_safe(MSR_IA32_CR_PAT, &host_pat) || 9373 (host_pat & GENMASK(2, 0)) != 6) { 9374 pr_err("kvm: host PAT[0] is not WB\n"); 9375 return -EIO; 9376 } 9377 9378 x86_emulator_cache = kvm_alloc_emulator_cache(); 9379 if (!x86_emulator_cache) { 9380 pr_err("kvm: failed to allocate cache for x86 emulator\n"); 9381 return -ENOMEM; 9382 } 9383 9384 user_return_msrs = alloc_percpu(struct kvm_user_return_msrs); 9385 if (!user_return_msrs) { 9386 printk(KERN_ERR "kvm: failed to allocate percpu kvm_user_return_msrs\n"); 9387 r = -ENOMEM; 9388 goto out_free_x86_emulator_cache; 9389 } 9390 kvm_nr_uret_msrs = 0; 9391 9392 r = kvm_mmu_vendor_module_init(); 9393 if (r) 9394 goto out_free_percpu; 9395 9396 kvm_timer_init(); 9397 9398 if (boot_cpu_has(X86_FEATURE_XSAVE)) { 9399 host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK); 9400 kvm_caps.supported_xcr0 = host_xcr0 & KVM_SUPPORTED_XCR0; 9401 } 9402 9403 if (pi_inject_timer == -1) 9404 pi_inject_timer = housekeeping_enabled(HK_TYPE_TIMER); 9405 #ifdef CONFIG_X86_64 9406 pvclock_gtod_register_notifier(&pvclock_gtod_notifier); 9407 9408 if (hypervisor_is_type(X86_HYPER_MS_HYPERV)) 9409 set_hv_tscchange_cb(kvm_hyperv_tsc_notifier); 9410 #endif 9411 9412 return 0; 9413 9414 out_free_percpu: 9415 free_percpu(user_return_msrs); 9416 out_free_x86_emulator_cache: 9417 kmem_cache_destroy(x86_emulator_cache); 9418 return r; 9419 } 9420 9421 void kvm_arch_exit(void) 9422 { 9423 #ifdef CONFIG_X86_64 9424 if (hypervisor_is_type(X86_HYPER_MS_HYPERV)) 9425 clear_hv_tscchange_cb(); 9426 #endif 9427 kvm_lapic_exit(); 9428 9429 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) 9430 cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block, 9431 CPUFREQ_TRANSITION_NOTIFIER); 9432 cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE); 9433 #ifdef CONFIG_X86_64 9434 pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier); 9435 irq_work_sync(&pvclock_irq_work); 9436 cancel_work_sync(&pvclock_gtod_work); 9437 #endif 9438 kvm_x86_ops.hardware_enable = NULL; 9439 kvm_mmu_vendor_module_exit(); 9440 free_percpu(user_return_msrs); 9441 kmem_cache_destroy(x86_emulator_cache); 9442 #ifdef CONFIG_KVM_XEN 9443 static_key_deferred_flush(&kvm_xen_enabled); 9444 WARN_ON(static_branch_unlikely(&kvm_xen_enabled.key)); 9445 #endif 9446 } 9447 9448 static int __kvm_emulate_halt(struct kvm_vcpu *vcpu, int state, int reason) 9449 { 9450 /* 9451 * The vCPU has halted, e.g. executed HLT. Update the run state if the 9452 * local APIC is in-kernel, the run loop will detect the non-runnable 9453 * state and halt the vCPU. Exit to userspace if the local APIC is 9454 * managed by userspace, in which case userspace is responsible for 9455 * handling wake events. 9456 */ 9457 ++vcpu->stat.halt_exits; 9458 if (lapic_in_kernel(vcpu)) { 9459 vcpu->arch.mp_state = state; 9460 return 1; 9461 } else { 9462 vcpu->run->exit_reason = reason; 9463 return 0; 9464 } 9465 } 9466 9467 int kvm_emulate_halt_noskip(struct kvm_vcpu *vcpu) 9468 { 9469 return __kvm_emulate_halt(vcpu, KVM_MP_STATE_HALTED, KVM_EXIT_HLT); 9470 } 9471 EXPORT_SYMBOL_GPL(kvm_emulate_halt_noskip); 9472 9473 int kvm_emulate_halt(struct kvm_vcpu *vcpu) 9474 { 9475 int ret = kvm_skip_emulated_instruction(vcpu); 9476 /* 9477 * TODO: we might be squashing a GUESTDBG_SINGLESTEP-triggered 9478 * KVM_EXIT_DEBUG here. 9479 */ 9480 return kvm_emulate_halt_noskip(vcpu) && ret; 9481 } 9482 EXPORT_SYMBOL_GPL(kvm_emulate_halt); 9483 9484 int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu) 9485 { 9486 int ret = kvm_skip_emulated_instruction(vcpu); 9487 9488 return __kvm_emulate_halt(vcpu, KVM_MP_STATE_AP_RESET_HOLD, 9489 KVM_EXIT_AP_RESET_HOLD) && ret; 9490 } 9491 EXPORT_SYMBOL_GPL(kvm_emulate_ap_reset_hold); 9492 9493 #ifdef CONFIG_X86_64 9494 static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr, 9495 unsigned long clock_type) 9496 { 9497 struct kvm_clock_pairing clock_pairing; 9498 struct timespec64 ts; 9499 u64 cycle; 9500 int ret; 9501 9502 if (clock_type != KVM_CLOCK_PAIRING_WALLCLOCK) 9503 return -KVM_EOPNOTSUPP; 9504 9505 /* 9506 * When tsc is in permanent catchup mode guests won't be able to use 9507 * pvclock_read_retry loop to get consistent view of pvclock 9508 */ 9509 if (vcpu->arch.tsc_always_catchup) 9510 return -KVM_EOPNOTSUPP; 9511 9512 if (!kvm_get_walltime_and_clockread(&ts, &cycle)) 9513 return -KVM_EOPNOTSUPP; 9514 9515 clock_pairing.sec = ts.tv_sec; 9516 clock_pairing.nsec = ts.tv_nsec; 9517 clock_pairing.tsc = kvm_read_l1_tsc(vcpu, cycle); 9518 clock_pairing.flags = 0; 9519 memset(&clock_pairing.pad, 0, sizeof(clock_pairing.pad)); 9520 9521 ret = 0; 9522 if (kvm_write_guest(vcpu->kvm, paddr, &clock_pairing, 9523 sizeof(struct kvm_clock_pairing))) 9524 ret = -KVM_EFAULT; 9525 9526 return ret; 9527 } 9528 #endif 9529 9530 /* 9531 * kvm_pv_kick_cpu_op: Kick a vcpu. 9532 * 9533 * @apicid - apicid of vcpu to be kicked. 9534 */ 9535 static void kvm_pv_kick_cpu_op(struct kvm *kvm, int apicid) 9536 { 9537 /* 9538 * All other fields are unused for APIC_DM_REMRD, but may be consumed by 9539 * common code, e.g. for tracing. Defer initialization to the compiler. 9540 */ 9541 struct kvm_lapic_irq lapic_irq = { 9542 .delivery_mode = APIC_DM_REMRD, 9543 .dest_mode = APIC_DEST_PHYSICAL, 9544 .shorthand = APIC_DEST_NOSHORT, 9545 .dest_id = apicid, 9546 }; 9547 9548 kvm_irq_delivery_to_apic(kvm, NULL, &lapic_irq, NULL); 9549 } 9550 9551 bool kvm_apicv_activated(struct kvm *kvm) 9552 { 9553 return (READ_ONCE(kvm->arch.apicv_inhibit_reasons) == 0); 9554 } 9555 EXPORT_SYMBOL_GPL(kvm_apicv_activated); 9556 9557 bool kvm_vcpu_apicv_activated(struct kvm_vcpu *vcpu) 9558 { 9559 ulong vm_reasons = READ_ONCE(vcpu->kvm->arch.apicv_inhibit_reasons); 9560 ulong vcpu_reasons = static_call(kvm_x86_vcpu_get_apicv_inhibit_reasons)(vcpu); 9561 9562 return (vm_reasons | vcpu_reasons) == 0; 9563 } 9564 EXPORT_SYMBOL_GPL(kvm_vcpu_apicv_activated); 9565 9566 static void set_or_clear_apicv_inhibit(unsigned long *inhibits, 9567 enum kvm_apicv_inhibit reason, bool set) 9568 { 9569 if (set) 9570 __set_bit(reason, inhibits); 9571 else 9572 __clear_bit(reason, inhibits); 9573 9574 trace_kvm_apicv_inhibit_changed(reason, set, *inhibits); 9575 } 9576 9577 static void kvm_apicv_init(struct kvm *kvm) 9578 { 9579 unsigned long *inhibits = &kvm->arch.apicv_inhibit_reasons; 9580 9581 init_rwsem(&kvm->arch.apicv_update_lock); 9582 9583 set_or_clear_apicv_inhibit(inhibits, APICV_INHIBIT_REASON_ABSENT, true); 9584 9585 if (!enable_apicv) 9586 set_or_clear_apicv_inhibit(inhibits, 9587 APICV_INHIBIT_REASON_DISABLE, true); 9588 } 9589 9590 static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id) 9591 { 9592 struct kvm_vcpu *target = NULL; 9593 struct kvm_apic_map *map; 9594 9595 vcpu->stat.directed_yield_attempted++; 9596 9597 if (single_task_running()) 9598 goto no_yield; 9599 9600 rcu_read_lock(); 9601 map = rcu_dereference(vcpu->kvm->arch.apic_map); 9602 9603 if (likely(map) && dest_id <= map->max_apic_id && map->phys_map[dest_id]) 9604 target = map->phys_map[dest_id]->vcpu; 9605 9606 rcu_read_unlock(); 9607 9608 if (!target || !READ_ONCE(target->ready)) 9609 goto no_yield; 9610 9611 /* Ignore requests to yield to self */ 9612 if (vcpu == target) 9613 goto no_yield; 9614 9615 if (kvm_vcpu_yield_to(target) <= 0) 9616 goto no_yield; 9617 9618 vcpu->stat.directed_yield_successful++; 9619 9620 no_yield: 9621 return; 9622 } 9623 9624 static int complete_hypercall_exit(struct kvm_vcpu *vcpu) 9625 { 9626 u64 ret = vcpu->run->hypercall.ret; 9627 9628 if (!is_64_bit_mode(vcpu)) 9629 ret = (u32)ret; 9630 kvm_rax_write(vcpu, ret); 9631 ++vcpu->stat.hypercalls; 9632 return kvm_skip_emulated_instruction(vcpu); 9633 } 9634 9635 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) 9636 { 9637 unsigned long nr, a0, a1, a2, a3, ret; 9638 int op_64_bit; 9639 9640 if (kvm_xen_hypercall_enabled(vcpu->kvm)) 9641 return kvm_xen_hypercall(vcpu); 9642 9643 if (kvm_hv_hypercall_enabled(vcpu)) 9644 return kvm_hv_hypercall(vcpu); 9645 9646 nr = kvm_rax_read(vcpu); 9647 a0 = kvm_rbx_read(vcpu); 9648 a1 = kvm_rcx_read(vcpu); 9649 a2 = kvm_rdx_read(vcpu); 9650 a3 = kvm_rsi_read(vcpu); 9651 9652 trace_kvm_hypercall(nr, a0, a1, a2, a3); 9653 9654 op_64_bit = is_64_bit_hypercall(vcpu); 9655 if (!op_64_bit) { 9656 nr &= 0xFFFFFFFF; 9657 a0 &= 0xFFFFFFFF; 9658 a1 &= 0xFFFFFFFF; 9659 a2 &= 0xFFFFFFFF; 9660 a3 &= 0xFFFFFFFF; 9661 } 9662 9663 if (static_call(kvm_x86_get_cpl)(vcpu) != 0) { 9664 ret = -KVM_EPERM; 9665 goto out; 9666 } 9667 9668 ret = -KVM_ENOSYS; 9669 9670 switch (nr) { 9671 case KVM_HC_VAPIC_POLL_IRQ: 9672 ret = 0; 9673 break; 9674 case KVM_HC_KICK_CPU: 9675 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_UNHALT)) 9676 break; 9677 9678 kvm_pv_kick_cpu_op(vcpu->kvm, a1); 9679 kvm_sched_yield(vcpu, a1); 9680 ret = 0; 9681 break; 9682 #ifdef CONFIG_X86_64 9683 case KVM_HC_CLOCK_PAIRING: 9684 ret = kvm_pv_clock_pairing(vcpu, a0, a1); 9685 break; 9686 #endif 9687 case KVM_HC_SEND_IPI: 9688 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_SEND_IPI)) 9689 break; 9690 9691 ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit); 9692 break; 9693 case KVM_HC_SCHED_YIELD: 9694 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_SCHED_YIELD)) 9695 break; 9696 9697 kvm_sched_yield(vcpu, a0); 9698 ret = 0; 9699 break; 9700 case KVM_HC_MAP_GPA_RANGE: { 9701 u64 gpa = a0, npages = a1, attrs = a2; 9702 9703 ret = -KVM_ENOSYS; 9704 if (!(vcpu->kvm->arch.hypercall_exit_enabled & (1 << KVM_HC_MAP_GPA_RANGE))) 9705 break; 9706 9707 if (!PAGE_ALIGNED(gpa) || !npages || 9708 gpa_to_gfn(gpa) + npages <= gpa_to_gfn(gpa)) { 9709 ret = -KVM_EINVAL; 9710 break; 9711 } 9712 9713 vcpu->run->exit_reason = KVM_EXIT_HYPERCALL; 9714 vcpu->run->hypercall.nr = KVM_HC_MAP_GPA_RANGE; 9715 vcpu->run->hypercall.args[0] = gpa; 9716 vcpu->run->hypercall.args[1] = npages; 9717 vcpu->run->hypercall.args[2] = attrs; 9718 vcpu->run->hypercall.longmode = op_64_bit; 9719 vcpu->arch.complete_userspace_io = complete_hypercall_exit; 9720 return 0; 9721 } 9722 default: 9723 ret = -KVM_ENOSYS; 9724 break; 9725 } 9726 out: 9727 if (!op_64_bit) 9728 ret = (u32)ret; 9729 kvm_rax_write(vcpu, ret); 9730 9731 ++vcpu->stat.hypercalls; 9732 return kvm_skip_emulated_instruction(vcpu); 9733 } 9734 EXPORT_SYMBOL_GPL(kvm_emulate_hypercall); 9735 9736 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt) 9737 { 9738 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 9739 char instruction[3]; 9740 unsigned long rip = kvm_rip_read(vcpu); 9741 9742 /* 9743 * If the quirk is disabled, synthesize a #UD and let the guest pick up 9744 * the pieces. 9745 */ 9746 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_FIX_HYPERCALL_INSN)) { 9747 ctxt->exception.error_code_valid = false; 9748 ctxt->exception.vector = UD_VECTOR; 9749 ctxt->have_exception = true; 9750 return X86EMUL_PROPAGATE_FAULT; 9751 } 9752 9753 static_call(kvm_x86_patch_hypercall)(vcpu, instruction); 9754 9755 return emulator_write_emulated(ctxt, rip, instruction, 3, 9756 &ctxt->exception); 9757 } 9758 9759 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) 9760 { 9761 return vcpu->run->request_interrupt_window && 9762 likely(!pic_in_kernel(vcpu->kvm)); 9763 } 9764 9765 /* Called within kvm->srcu read side. */ 9766 static void post_kvm_run_save(struct kvm_vcpu *vcpu) 9767 { 9768 struct kvm_run *kvm_run = vcpu->run; 9769 9770 kvm_run->if_flag = static_call(kvm_x86_get_if_flag)(vcpu); 9771 kvm_run->cr8 = kvm_get_cr8(vcpu); 9772 kvm_run->apic_base = kvm_get_apic_base(vcpu); 9773 9774 kvm_run->ready_for_interrupt_injection = 9775 pic_in_kernel(vcpu->kvm) || 9776 kvm_vcpu_ready_for_interrupt_injection(vcpu); 9777 9778 if (is_smm(vcpu)) 9779 kvm_run->flags |= KVM_RUN_X86_SMM; 9780 } 9781 9782 static void update_cr8_intercept(struct kvm_vcpu *vcpu) 9783 { 9784 int max_irr, tpr; 9785 9786 if (!kvm_x86_ops.update_cr8_intercept) 9787 return; 9788 9789 if (!lapic_in_kernel(vcpu)) 9790 return; 9791 9792 if (vcpu->arch.apic->apicv_active) 9793 return; 9794 9795 if (!vcpu->arch.apic->vapic_addr) 9796 max_irr = kvm_lapic_find_highest_irr(vcpu); 9797 else 9798 max_irr = -1; 9799 9800 if (max_irr != -1) 9801 max_irr >>= 4; 9802 9803 tpr = kvm_lapic_get_cr8(vcpu); 9804 9805 static_call(kvm_x86_update_cr8_intercept)(vcpu, tpr, max_irr); 9806 } 9807 9808 9809 int kvm_check_nested_events(struct kvm_vcpu *vcpu) 9810 { 9811 if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) { 9812 kvm_x86_ops.nested_ops->triple_fault(vcpu); 9813 return 1; 9814 } 9815 9816 return kvm_x86_ops.nested_ops->check_events(vcpu); 9817 } 9818 9819 static void kvm_inject_exception(struct kvm_vcpu *vcpu) 9820 { 9821 trace_kvm_inj_exception(vcpu->arch.exception.vector, 9822 vcpu->arch.exception.has_error_code, 9823 vcpu->arch.exception.error_code, 9824 vcpu->arch.exception.injected); 9825 9826 if (vcpu->arch.exception.error_code && !is_protmode(vcpu)) 9827 vcpu->arch.exception.error_code = false; 9828 static_call(kvm_x86_inject_exception)(vcpu); 9829 } 9830 9831 /* 9832 * Check for any event (interrupt or exception) that is ready to be injected, 9833 * and if there is at least one event, inject the event with the highest 9834 * priority. This handles both "pending" events, i.e. events that have never 9835 * been injected into the guest, and "injected" events, i.e. events that were 9836 * injected as part of a previous VM-Enter, but weren't successfully delivered 9837 * and need to be re-injected. 9838 * 9839 * Note, this is not guaranteed to be invoked on a guest instruction boundary, 9840 * i.e. doesn't guarantee that there's an event window in the guest. KVM must 9841 * be able to inject exceptions in the "middle" of an instruction, and so must 9842 * also be able to re-inject NMIs and IRQs in the middle of an instruction. 9843 * I.e. for exceptions and re-injected events, NOT invoking this on instruction 9844 * boundaries is necessary and correct. 9845 * 9846 * For simplicity, KVM uses a single path to inject all events (except events 9847 * that are injected directly from L1 to L2) and doesn't explicitly track 9848 * instruction boundaries for asynchronous events. However, because VM-Exits 9849 * that can occur during instruction execution typically result in KVM skipping 9850 * the instruction or injecting an exception, e.g. instruction and exception 9851 * intercepts, and because pending exceptions have higher priority than pending 9852 * interrupts, KVM still honors instruction boundaries in most scenarios. 9853 * 9854 * But, if a VM-Exit occurs during instruction execution, and KVM does NOT skip 9855 * the instruction or inject an exception, then KVM can incorrecty inject a new 9856 * asynchrounous event if the event became pending after the CPU fetched the 9857 * instruction (in the guest). E.g. if a page fault (#PF, #NPF, EPT violation) 9858 * occurs and is resolved by KVM, a coincident NMI, SMI, IRQ, etc... can be 9859 * injected on the restarted instruction instead of being deferred until the 9860 * instruction completes. 9861 * 9862 * In practice, this virtualization hole is unlikely to be observed by the 9863 * guest, and even less likely to cause functional problems. To detect the 9864 * hole, the guest would have to trigger an event on a side effect of an early 9865 * phase of instruction execution, e.g. on the instruction fetch from memory. 9866 * And for it to be a functional problem, the guest would need to depend on the 9867 * ordering between that side effect, the instruction completing, _and_ the 9868 * delivery of the asynchronous event. 9869 */ 9870 static int kvm_check_and_inject_events(struct kvm_vcpu *vcpu, 9871 bool *req_immediate_exit) 9872 { 9873 bool can_inject; 9874 int r; 9875 9876 /* 9877 * Process nested events first, as nested VM-Exit supercedes event 9878 * re-injection. If there's an event queued for re-injection, it will 9879 * be saved into the appropriate vmc{b,s}12 fields on nested VM-Exit. 9880 */ 9881 if (is_guest_mode(vcpu)) 9882 r = kvm_check_nested_events(vcpu); 9883 else 9884 r = 0; 9885 9886 /* 9887 * Re-inject exceptions and events *especially* if immediate entry+exit 9888 * to/from L2 is needed, as any event that has already been injected 9889 * into L2 needs to complete its lifecycle before injecting a new event. 9890 * 9891 * Don't re-inject an NMI or interrupt if there is a pending exception. 9892 * This collision arises if an exception occurred while vectoring the 9893 * injected event, KVM intercepted said exception, and KVM ultimately 9894 * determined the fault belongs to the guest and queues the exception 9895 * for injection back into the guest. 9896 * 9897 * "Injected" interrupts can also collide with pending exceptions if 9898 * userspace ignores the "ready for injection" flag and blindly queues 9899 * an interrupt. In that case, prioritizing the exception is correct, 9900 * as the exception "occurred" before the exit to userspace. Trap-like 9901 * exceptions, e.g. most #DBs, have higher priority than interrupts. 9902 * And while fault-like exceptions, e.g. #GP and #PF, are the lowest 9903 * priority, they're only generated (pended) during instruction 9904 * execution, and interrupts are recognized at instruction boundaries. 9905 * Thus a pending fault-like exception means the fault occurred on the 9906 * *previous* instruction and must be serviced prior to recognizing any 9907 * new events in order to fully complete the previous instruction. 9908 */ 9909 if (vcpu->arch.exception.injected) 9910 kvm_inject_exception(vcpu); 9911 else if (kvm_is_exception_pending(vcpu)) 9912 ; /* see above */ 9913 else if (vcpu->arch.nmi_injected) 9914 static_call(kvm_x86_inject_nmi)(vcpu); 9915 else if (vcpu->arch.interrupt.injected) 9916 static_call(kvm_x86_inject_irq)(vcpu, true); 9917 9918 /* 9919 * Exceptions that morph to VM-Exits are handled above, and pending 9920 * exceptions on top of injected exceptions that do not VM-Exit should 9921 * either morph to #DF or, sadly, override the injected exception. 9922 */ 9923 WARN_ON_ONCE(vcpu->arch.exception.injected && 9924 vcpu->arch.exception.pending); 9925 9926 /* 9927 * Bail if immediate entry+exit to/from the guest is needed to complete 9928 * nested VM-Enter or event re-injection so that a different pending 9929 * event can be serviced (or if KVM needs to exit to userspace). 9930 * 9931 * Otherwise, continue processing events even if VM-Exit occurred. The 9932 * VM-Exit will have cleared exceptions that were meant for L2, but 9933 * there may now be events that can be injected into L1. 9934 */ 9935 if (r < 0) 9936 goto out; 9937 9938 /* 9939 * A pending exception VM-Exit should either result in nested VM-Exit 9940 * or force an immediate re-entry and exit to/from L2, and exception 9941 * VM-Exits cannot be injected (flag should _never_ be set). 9942 */ 9943 WARN_ON_ONCE(vcpu->arch.exception_vmexit.injected || 9944 vcpu->arch.exception_vmexit.pending); 9945 9946 /* 9947 * New events, other than exceptions, cannot be injected if KVM needs 9948 * to re-inject a previous event. See above comments on re-injecting 9949 * for why pending exceptions get priority. 9950 */ 9951 can_inject = !kvm_event_needs_reinjection(vcpu); 9952 9953 if (vcpu->arch.exception.pending) { 9954 /* 9955 * Fault-class exceptions, except #DBs, set RF=1 in the RFLAGS 9956 * value pushed on the stack. Trap-like exception and all #DBs 9957 * leave RF as-is (KVM follows Intel's behavior in this regard; 9958 * AMD states that code breakpoint #DBs excplitly clear RF=0). 9959 * 9960 * Note, most versions of Intel's SDM and AMD's APM incorrectly 9961 * describe the behavior of General Detect #DBs, which are 9962 * fault-like. They do _not_ set RF, a la code breakpoints. 9963 */ 9964 if (exception_type(vcpu->arch.exception.vector) == EXCPT_FAULT) 9965 __kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) | 9966 X86_EFLAGS_RF); 9967 9968 if (vcpu->arch.exception.vector == DB_VECTOR) { 9969 kvm_deliver_exception_payload(vcpu, &vcpu->arch.exception); 9970 if (vcpu->arch.dr7 & DR7_GD) { 9971 vcpu->arch.dr7 &= ~DR7_GD; 9972 kvm_update_dr7(vcpu); 9973 } 9974 } 9975 9976 kvm_inject_exception(vcpu); 9977 9978 vcpu->arch.exception.pending = false; 9979 vcpu->arch.exception.injected = true; 9980 9981 can_inject = false; 9982 } 9983 9984 /* Don't inject interrupts if the user asked to avoid doing so */ 9985 if (vcpu->guest_debug & KVM_GUESTDBG_BLOCKIRQ) 9986 return 0; 9987 9988 /* 9989 * Finally, inject interrupt events. If an event cannot be injected 9990 * due to architectural conditions (e.g. IF=0) a window-open exit 9991 * will re-request KVM_REQ_EVENT. Sometimes however an event is pending 9992 * and can architecturally be injected, but we cannot do it right now: 9993 * an interrupt could have arrived just now and we have to inject it 9994 * as a vmexit, or there could already an event in the queue, which is 9995 * indicated by can_inject. In that case we request an immediate exit 9996 * in order to make progress and get back here for another iteration. 9997 * The kvm_x86_ops hooks communicate this by returning -EBUSY. 9998 */ 9999 if (vcpu->arch.smi_pending) { 10000 r = can_inject ? static_call(kvm_x86_smi_allowed)(vcpu, true) : -EBUSY; 10001 if (r < 0) 10002 goto out; 10003 if (r) { 10004 vcpu->arch.smi_pending = false; 10005 ++vcpu->arch.smi_count; 10006 enter_smm(vcpu); 10007 can_inject = false; 10008 } else 10009 static_call(kvm_x86_enable_smi_window)(vcpu); 10010 } 10011 10012 if (vcpu->arch.nmi_pending) { 10013 r = can_inject ? static_call(kvm_x86_nmi_allowed)(vcpu, true) : -EBUSY; 10014 if (r < 0) 10015 goto out; 10016 if (r) { 10017 --vcpu->arch.nmi_pending; 10018 vcpu->arch.nmi_injected = true; 10019 static_call(kvm_x86_inject_nmi)(vcpu); 10020 can_inject = false; 10021 WARN_ON(static_call(kvm_x86_nmi_allowed)(vcpu, true) < 0); 10022 } 10023 if (vcpu->arch.nmi_pending) 10024 static_call(kvm_x86_enable_nmi_window)(vcpu); 10025 } 10026 10027 if (kvm_cpu_has_injectable_intr(vcpu)) { 10028 r = can_inject ? static_call(kvm_x86_interrupt_allowed)(vcpu, true) : -EBUSY; 10029 if (r < 0) 10030 goto out; 10031 if (r) { 10032 kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), false); 10033 static_call(kvm_x86_inject_irq)(vcpu, false); 10034 WARN_ON(static_call(kvm_x86_interrupt_allowed)(vcpu, true) < 0); 10035 } 10036 if (kvm_cpu_has_injectable_intr(vcpu)) 10037 static_call(kvm_x86_enable_irq_window)(vcpu); 10038 } 10039 10040 if (is_guest_mode(vcpu) && 10041 kvm_x86_ops.nested_ops->has_events && 10042 kvm_x86_ops.nested_ops->has_events(vcpu)) 10043 *req_immediate_exit = true; 10044 10045 /* 10046 * KVM must never queue a new exception while injecting an event; KVM 10047 * is done emulating and should only propagate the to-be-injected event 10048 * to the VMCS/VMCB. Queueing a new exception can put the vCPU into an 10049 * infinite loop as KVM will bail from VM-Enter to inject the pending 10050 * exception and start the cycle all over. 10051 * 10052 * Exempt triple faults as they have special handling and won't put the 10053 * vCPU into an infinite loop. Triple fault can be queued when running 10054 * VMX without unrestricted guest, as that requires KVM to emulate Real 10055 * Mode events (see kvm_inject_realmode_interrupt()). 10056 */ 10057 WARN_ON_ONCE(vcpu->arch.exception.pending || 10058 vcpu->arch.exception_vmexit.pending); 10059 return 0; 10060 10061 out: 10062 if (r == -EBUSY) { 10063 *req_immediate_exit = true; 10064 r = 0; 10065 } 10066 return r; 10067 } 10068 10069 static void process_nmi(struct kvm_vcpu *vcpu) 10070 { 10071 unsigned limit = 2; 10072 10073 /* 10074 * x86 is limited to one NMI running, and one NMI pending after it. 10075 * If an NMI is already in progress, limit further NMIs to just one. 10076 * Otherwise, allow two (and we'll inject the first one immediately). 10077 */ 10078 if (static_call(kvm_x86_get_nmi_mask)(vcpu) || vcpu->arch.nmi_injected) 10079 limit = 1; 10080 10081 vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0); 10082 vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit); 10083 kvm_make_request(KVM_REQ_EVENT, vcpu); 10084 } 10085 10086 static u32 enter_smm_get_segment_flags(struct kvm_segment *seg) 10087 { 10088 u32 flags = 0; 10089 flags |= seg->g << 23; 10090 flags |= seg->db << 22; 10091 flags |= seg->l << 21; 10092 flags |= seg->avl << 20; 10093 flags |= seg->present << 15; 10094 flags |= seg->dpl << 13; 10095 flags |= seg->s << 12; 10096 flags |= seg->type << 8; 10097 return flags; 10098 } 10099 10100 static void enter_smm_save_seg_32(struct kvm_vcpu *vcpu, char *buf, int n) 10101 { 10102 struct kvm_segment seg; 10103 int offset; 10104 10105 kvm_get_segment(vcpu, &seg, n); 10106 put_smstate(u32, buf, 0x7fa8 + n * 4, seg.selector); 10107 10108 if (n < 3) 10109 offset = 0x7f84 + n * 12; 10110 else 10111 offset = 0x7f2c + (n - 3) * 12; 10112 10113 put_smstate(u32, buf, offset + 8, seg.base); 10114 put_smstate(u32, buf, offset + 4, seg.limit); 10115 put_smstate(u32, buf, offset, enter_smm_get_segment_flags(&seg)); 10116 } 10117 10118 #ifdef CONFIG_X86_64 10119 static void enter_smm_save_seg_64(struct kvm_vcpu *vcpu, char *buf, int n) 10120 { 10121 struct kvm_segment seg; 10122 int offset; 10123 u16 flags; 10124 10125 kvm_get_segment(vcpu, &seg, n); 10126 offset = 0x7e00 + n * 16; 10127 10128 flags = enter_smm_get_segment_flags(&seg) >> 8; 10129 put_smstate(u16, buf, offset, seg.selector); 10130 put_smstate(u16, buf, offset + 2, flags); 10131 put_smstate(u32, buf, offset + 4, seg.limit); 10132 put_smstate(u64, buf, offset + 8, seg.base); 10133 } 10134 #endif 10135 10136 static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf) 10137 { 10138 struct desc_ptr dt; 10139 struct kvm_segment seg; 10140 unsigned long val; 10141 int i; 10142 10143 put_smstate(u32, buf, 0x7ffc, kvm_read_cr0(vcpu)); 10144 put_smstate(u32, buf, 0x7ff8, kvm_read_cr3(vcpu)); 10145 put_smstate(u32, buf, 0x7ff4, kvm_get_rflags(vcpu)); 10146 put_smstate(u32, buf, 0x7ff0, kvm_rip_read(vcpu)); 10147 10148 for (i = 0; i < 8; i++) 10149 put_smstate(u32, buf, 0x7fd0 + i * 4, kvm_register_read_raw(vcpu, i)); 10150 10151 kvm_get_dr(vcpu, 6, &val); 10152 put_smstate(u32, buf, 0x7fcc, (u32)val); 10153 kvm_get_dr(vcpu, 7, &val); 10154 put_smstate(u32, buf, 0x7fc8, (u32)val); 10155 10156 kvm_get_segment(vcpu, &seg, VCPU_SREG_TR); 10157 put_smstate(u32, buf, 0x7fc4, seg.selector); 10158 put_smstate(u32, buf, 0x7f64, seg.base); 10159 put_smstate(u32, buf, 0x7f60, seg.limit); 10160 put_smstate(u32, buf, 0x7f5c, enter_smm_get_segment_flags(&seg)); 10161 10162 kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR); 10163 put_smstate(u32, buf, 0x7fc0, seg.selector); 10164 put_smstate(u32, buf, 0x7f80, seg.base); 10165 put_smstate(u32, buf, 0x7f7c, seg.limit); 10166 put_smstate(u32, buf, 0x7f78, enter_smm_get_segment_flags(&seg)); 10167 10168 static_call(kvm_x86_get_gdt)(vcpu, &dt); 10169 put_smstate(u32, buf, 0x7f74, dt.address); 10170 put_smstate(u32, buf, 0x7f70, dt.size); 10171 10172 static_call(kvm_x86_get_idt)(vcpu, &dt); 10173 put_smstate(u32, buf, 0x7f58, dt.address); 10174 put_smstate(u32, buf, 0x7f54, dt.size); 10175 10176 for (i = 0; i < 6; i++) 10177 enter_smm_save_seg_32(vcpu, buf, i); 10178 10179 put_smstate(u32, buf, 0x7f14, kvm_read_cr4(vcpu)); 10180 10181 /* revision id */ 10182 put_smstate(u32, buf, 0x7efc, 0x00020000); 10183 put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase); 10184 } 10185 10186 #ifdef CONFIG_X86_64 10187 static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf) 10188 { 10189 struct desc_ptr dt; 10190 struct kvm_segment seg; 10191 unsigned long val; 10192 int i; 10193 10194 for (i = 0; i < 16; i++) 10195 put_smstate(u64, buf, 0x7ff8 - i * 8, kvm_register_read_raw(vcpu, i)); 10196 10197 put_smstate(u64, buf, 0x7f78, kvm_rip_read(vcpu)); 10198 put_smstate(u32, buf, 0x7f70, kvm_get_rflags(vcpu)); 10199 10200 kvm_get_dr(vcpu, 6, &val); 10201 put_smstate(u64, buf, 0x7f68, val); 10202 kvm_get_dr(vcpu, 7, &val); 10203 put_smstate(u64, buf, 0x7f60, val); 10204 10205 put_smstate(u64, buf, 0x7f58, kvm_read_cr0(vcpu)); 10206 put_smstate(u64, buf, 0x7f50, kvm_read_cr3(vcpu)); 10207 put_smstate(u64, buf, 0x7f48, kvm_read_cr4(vcpu)); 10208 10209 put_smstate(u32, buf, 0x7f00, vcpu->arch.smbase); 10210 10211 /* revision id */ 10212 put_smstate(u32, buf, 0x7efc, 0x00020064); 10213 10214 put_smstate(u64, buf, 0x7ed0, vcpu->arch.efer); 10215 10216 kvm_get_segment(vcpu, &seg, VCPU_SREG_TR); 10217 put_smstate(u16, buf, 0x7e90, seg.selector); 10218 put_smstate(u16, buf, 0x7e92, enter_smm_get_segment_flags(&seg) >> 8); 10219 put_smstate(u32, buf, 0x7e94, seg.limit); 10220 put_smstate(u64, buf, 0x7e98, seg.base); 10221 10222 static_call(kvm_x86_get_idt)(vcpu, &dt); 10223 put_smstate(u32, buf, 0x7e84, dt.size); 10224 put_smstate(u64, buf, 0x7e88, dt.address); 10225 10226 kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR); 10227 put_smstate(u16, buf, 0x7e70, seg.selector); 10228 put_smstate(u16, buf, 0x7e72, enter_smm_get_segment_flags(&seg) >> 8); 10229 put_smstate(u32, buf, 0x7e74, seg.limit); 10230 put_smstate(u64, buf, 0x7e78, seg.base); 10231 10232 static_call(kvm_x86_get_gdt)(vcpu, &dt); 10233 put_smstate(u32, buf, 0x7e64, dt.size); 10234 put_smstate(u64, buf, 0x7e68, dt.address); 10235 10236 for (i = 0; i < 6; i++) 10237 enter_smm_save_seg_64(vcpu, buf, i); 10238 } 10239 #endif 10240 10241 static void enter_smm(struct kvm_vcpu *vcpu) 10242 { 10243 struct kvm_segment cs, ds; 10244 struct desc_ptr dt; 10245 unsigned long cr0; 10246 char buf[512]; 10247 10248 memset(buf, 0, 512); 10249 #ifdef CONFIG_X86_64 10250 if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) 10251 enter_smm_save_state_64(vcpu, buf); 10252 else 10253 #endif 10254 enter_smm_save_state_32(vcpu, buf); 10255 10256 /* 10257 * Give enter_smm() a chance to make ISA-specific changes to the vCPU 10258 * state (e.g. leave guest mode) after we've saved the state into the 10259 * SMM state-save area. 10260 */ 10261 static_call(kvm_x86_enter_smm)(vcpu, buf); 10262 10263 kvm_smm_changed(vcpu, true); 10264 kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf)); 10265 10266 if (static_call(kvm_x86_get_nmi_mask)(vcpu)) 10267 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; 10268 else 10269 static_call(kvm_x86_set_nmi_mask)(vcpu, true); 10270 10271 kvm_set_rflags(vcpu, X86_EFLAGS_FIXED); 10272 kvm_rip_write(vcpu, 0x8000); 10273 10274 cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG); 10275 static_call(kvm_x86_set_cr0)(vcpu, cr0); 10276 vcpu->arch.cr0 = cr0; 10277 10278 static_call(kvm_x86_set_cr4)(vcpu, 0); 10279 10280 /* Undocumented: IDT limit is set to zero on entry to SMM. */ 10281 dt.address = dt.size = 0; 10282 static_call(kvm_x86_set_idt)(vcpu, &dt); 10283 10284 kvm_set_dr(vcpu, 7, DR7_FIXED_1); 10285 10286 cs.selector = (vcpu->arch.smbase >> 4) & 0xffff; 10287 cs.base = vcpu->arch.smbase; 10288 10289 ds.selector = 0; 10290 ds.base = 0; 10291 10292 cs.limit = ds.limit = 0xffffffff; 10293 cs.type = ds.type = 0x3; 10294 cs.dpl = ds.dpl = 0; 10295 cs.db = ds.db = 0; 10296 cs.s = ds.s = 1; 10297 cs.l = ds.l = 0; 10298 cs.g = ds.g = 1; 10299 cs.avl = ds.avl = 0; 10300 cs.present = ds.present = 1; 10301 cs.unusable = ds.unusable = 0; 10302 cs.padding = ds.padding = 0; 10303 10304 kvm_set_segment(vcpu, &cs, VCPU_SREG_CS); 10305 kvm_set_segment(vcpu, &ds, VCPU_SREG_DS); 10306 kvm_set_segment(vcpu, &ds, VCPU_SREG_ES); 10307 kvm_set_segment(vcpu, &ds, VCPU_SREG_FS); 10308 kvm_set_segment(vcpu, &ds, VCPU_SREG_GS); 10309 kvm_set_segment(vcpu, &ds, VCPU_SREG_SS); 10310 10311 #ifdef CONFIG_X86_64 10312 if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) 10313 static_call(kvm_x86_set_efer)(vcpu, 0); 10314 #endif 10315 10316 kvm_update_cpuid_runtime(vcpu); 10317 kvm_mmu_reset_context(vcpu); 10318 } 10319 10320 static void process_smi(struct kvm_vcpu *vcpu) 10321 { 10322 vcpu->arch.smi_pending = true; 10323 kvm_make_request(KVM_REQ_EVENT, vcpu); 10324 } 10325 10326 void kvm_make_scan_ioapic_request_mask(struct kvm *kvm, 10327 unsigned long *vcpu_bitmap) 10328 { 10329 kvm_make_vcpus_request_mask(kvm, KVM_REQ_SCAN_IOAPIC, vcpu_bitmap); 10330 } 10331 10332 void kvm_make_scan_ioapic_request(struct kvm *kvm) 10333 { 10334 kvm_make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC); 10335 } 10336 10337 void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu) 10338 { 10339 struct kvm_lapic *apic = vcpu->arch.apic; 10340 bool activate; 10341 10342 if (!lapic_in_kernel(vcpu)) 10343 return; 10344 10345 down_read(&vcpu->kvm->arch.apicv_update_lock); 10346 preempt_disable(); 10347 10348 /* Do not activate APICV when APIC is disabled */ 10349 activate = kvm_vcpu_apicv_activated(vcpu) && 10350 (kvm_get_apic_mode(vcpu) != LAPIC_MODE_DISABLED); 10351 10352 if (apic->apicv_active == activate) 10353 goto out; 10354 10355 apic->apicv_active = activate; 10356 kvm_apic_update_apicv(vcpu); 10357 static_call(kvm_x86_refresh_apicv_exec_ctrl)(vcpu); 10358 10359 /* 10360 * When APICv gets disabled, we may still have injected interrupts 10361 * pending. At the same time, KVM_REQ_EVENT may not be set as APICv was 10362 * still active when the interrupt got accepted. Make sure 10363 * kvm_check_and_inject_events() is called to check for that. 10364 */ 10365 if (!apic->apicv_active) 10366 kvm_make_request(KVM_REQ_EVENT, vcpu); 10367 10368 out: 10369 preempt_enable(); 10370 up_read(&vcpu->kvm->arch.apicv_update_lock); 10371 } 10372 EXPORT_SYMBOL_GPL(kvm_vcpu_update_apicv); 10373 10374 void __kvm_set_or_clear_apicv_inhibit(struct kvm *kvm, 10375 enum kvm_apicv_inhibit reason, bool set) 10376 { 10377 unsigned long old, new; 10378 10379 lockdep_assert_held_write(&kvm->arch.apicv_update_lock); 10380 10381 if (!static_call(kvm_x86_check_apicv_inhibit_reasons)(reason)) 10382 return; 10383 10384 old = new = kvm->arch.apicv_inhibit_reasons; 10385 10386 set_or_clear_apicv_inhibit(&new, reason, set); 10387 10388 if (!!old != !!new) { 10389 /* 10390 * Kick all vCPUs before setting apicv_inhibit_reasons to avoid 10391 * false positives in the sanity check WARN in svm_vcpu_run(). 10392 * This task will wait for all vCPUs to ack the kick IRQ before 10393 * updating apicv_inhibit_reasons, and all other vCPUs will 10394 * block on acquiring apicv_update_lock so that vCPUs can't 10395 * redo svm_vcpu_run() without seeing the new inhibit state. 10396 * 10397 * Note, holding apicv_update_lock and taking it in the read 10398 * side (handling the request) also prevents other vCPUs from 10399 * servicing the request with a stale apicv_inhibit_reasons. 10400 */ 10401 kvm_make_all_cpus_request(kvm, KVM_REQ_APICV_UPDATE); 10402 kvm->arch.apicv_inhibit_reasons = new; 10403 if (new) { 10404 unsigned long gfn = gpa_to_gfn(APIC_DEFAULT_PHYS_BASE); 10405 int idx = srcu_read_lock(&kvm->srcu); 10406 10407 kvm_zap_gfn_range(kvm, gfn, gfn+1); 10408 srcu_read_unlock(&kvm->srcu, idx); 10409 } 10410 } else { 10411 kvm->arch.apicv_inhibit_reasons = new; 10412 } 10413 } 10414 10415 void kvm_set_or_clear_apicv_inhibit(struct kvm *kvm, 10416 enum kvm_apicv_inhibit reason, bool set) 10417 { 10418 if (!enable_apicv) 10419 return; 10420 10421 down_write(&kvm->arch.apicv_update_lock); 10422 __kvm_set_or_clear_apicv_inhibit(kvm, reason, set); 10423 up_write(&kvm->arch.apicv_update_lock); 10424 } 10425 EXPORT_SYMBOL_GPL(kvm_set_or_clear_apicv_inhibit); 10426 10427 static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) 10428 { 10429 if (!kvm_apic_present(vcpu)) 10430 return; 10431 10432 bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256); 10433 10434 if (irqchip_split(vcpu->kvm)) 10435 kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors); 10436 else { 10437 static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); 10438 if (ioapic_in_kernel(vcpu->kvm)) 10439 kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors); 10440 } 10441 10442 if (is_guest_mode(vcpu)) 10443 vcpu->arch.load_eoi_exitmap_pending = true; 10444 else 10445 kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu); 10446 } 10447 10448 static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu) 10449 { 10450 u64 eoi_exit_bitmap[4]; 10451 10452 if (!kvm_apic_hw_enabled(vcpu->arch.apic)) 10453 return; 10454 10455 if (to_hv_vcpu(vcpu)) { 10456 bitmap_or((ulong *)eoi_exit_bitmap, 10457 vcpu->arch.ioapic_handled_vectors, 10458 to_hv_synic(vcpu)->vec_bitmap, 256); 10459 static_call_cond(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap); 10460 return; 10461 } 10462 10463 static_call_cond(kvm_x86_load_eoi_exitmap)( 10464 vcpu, (u64 *)vcpu->arch.ioapic_handled_vectors); 10465 } 10466 10467 void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, 10468 unsigned long start, unsigned long end) 10469 { 10470 unsigned long apic_address; 10471 10472 /* 10473 * The physical address of apic access page is stored in the VMCS. 10474 * Update it when it becomes invalid. 10475 */ 10476 apic_address = gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT); 10477 if (start <= apic_address && apic_address < end) 10478 kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD); 10479 } 10480 10481 void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) 10482 { 10483 static_call_cond(kvm_x86_guest_memory_reclaimed)(kvm); 10484 } 10485 10486 static void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) 10487 { 10488 if (!lapic_in_kernel(vcpu)) 10489 return; 10490 10491 static_call_cond(kvm_x86_set_apic_access_page_addr)(vcpu); 10492 } 10493 10494 void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu) 10495 { 10496 smp_send_reschedule(vcpu->cpu); 10497 } 10498 EXPORT_SYMBOL_GPL(__kvm_request_immediate_exit); 10499 10500 /* 10501 * Called within kvm->srcu read side. 10502 * Returns 1 to let vcpu_run() continue the guest execution loop without 10503 * exiting to the userspace. Otherwise, the value will be returned to the 10504 * userspace. 10505 */ 10506 static int vcpu_enter_guest(struct kvm_vcpu *vcpu) 10507 { 10508 int r; 10509 bool req_int_win = 10510 dm_request_for_irq_injection(vcpu) && 10511 kvm_cpu_accept_dm_intr(vcpu); 10512 fastpath_t exit_fastpath; 10513 10514 bool req_immediate_exit = false; 10515 10516 /* Forbid vmenter if vcpu dirty ring is soft-full */ 10517 if (unlikely(vcpu->kvm->dirty_ring_size && 10518 kvm_dirty_ring_soft_full(&vcpu->dirty_ring))) { 10519 vcpu->run->exit_reason = KVM_EXIT_DIRTY_RING_FULL; 10520 trace_kvm_dirty_ring_exit(vcpu); 10521 r = 0; 10522 goto out; 10523 } 10524 10525 if (kvm_request_pending(vcpu)) { 10526 if (kvm_check_request(KVM_REQ_VM_DEAD, vcpu)) { 10527 r = -EIO; 10528 goto out; 10529 } 10530 if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) { 10531 if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) { 10532 r = 0; 10533 goto out; 10534 } 10535 } 10536 if (kvm_check_request(KVM_REQ_MMU_FREE_OBSOLETE_ROOTS, vcpu)) 10537 kvm_mmu_free_obsolete_roots(vcpu); 10538 if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu)) 10539 __kvm_migrate_timers(vcpu); 10540 if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu)) 10541 kvm_update_masterclock(vcpu->kvm); 10542 if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu)) 10543 kvm_gen_kvmclock_update(vcpu); 10544 if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) { 10545 r = kvm_guest_time_update(vcpu); 10546 if (unlikely(r)) 10547 goto out; 10548 } 10549 if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu)) 10550 kvm_mmu_sync_roots(vcpu); 10551 if (kvm_check_request(KVM_REQ_LOAD_MMU_PGD, vcpu)) 10552 kvm_mmu_load_pgd(vcpu); 10553 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { 10554 kvm_vcpu_flush_tlb_all(vcpu); 10555 10556 /* Flushing all ASIDs flushes the current ASID... */ 10557 kvm_clear_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); 10558 } 10559 kvm_service_local_tlb_flush_requests(vcpu); 10560 10561 if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) { 10562 vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS; 10563 r = 0; 10564 goto out; 10565 } 10566 if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) { 10567 if (is_guest_mode(vcpu)) { 10568 kvm_x86_ops.nested_ops->triple_fault(vcpu); 10569 } else { 10570 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; 10571 vcpu->mmio_needed = 0; 10572 r = 0; 10573 goto out; 10574 } 10575 } 10576 if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) { 10577 /* Page is swapped out. Do synthetic halt */ 10578 vcpu->arch.apf.halted = true; 10579 r = 1; 10580 goto out; 10581 } 10582 if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu)) 10583 record_steal_time(vcpu); 10584 if (kvm_check_request(KVM_REQ_SMI, vcpu)) 10585 process_smi(vcpu); 10586 if (kvm_check_request(KVM_REQ_NMI, vcpu)) 10587 process_nmi(vcpu); 10588 if (kvm_check_request(KVM_REQ_PMU, vcpu)) 10589 kvm_pmu_handle_event(vcpu); 10590 if (kvm_check_request(KVM_REQ_PMI, vcpu)) 10591 kvm_pmu_deliver_pmi(vcpu); 10592 if (kvm_check_request(KVM_REQ_IOAPIC_EOI_EXIT, vcpu)) { 10593 BUG_ON(vcpu->arch.pending_ioapic_eoi > 255); 10594 if (test_bit(vcpu->arch.pending_ioapic_eoi, 10595 vcpu->arch.ioapic_handled_vectors)) { 10596 vcpu->run->exit_reason = KVM_EXIT_IOAPIC_EOI; 10597 vcpu->run->eoi.vector = 10598 vcpu->arch.pending_ioapic_eoi; 10599 r = 0; 10600 goto out; 10601 } 10602 } 10603 if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu)) 10604 vcpu_scan_ioapic(vcpu); 10605 if (kvm_check_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu)) 10606 vcpu_load_eoi_exitmap(vcpu); 10607 if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu)) 10608 kvm_vcpu_reload_apic_access_page(vcpu); 10609 if (kvm_check_request(KVM_REQ_HV_CRASH, vcpu)) { 10610 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; 10611 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_CRASH; 10612 vcpu->run->system_event.ndata = 0; 10613 r = 0; 10614 goto out; 10615 } 10616 if (kvm_check_request(KVM_REQ_HV_RESET, vcpu)) { 10617 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; 10618 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_RESET; 10619 vcpu->run->system_event.ndata = 0; 10620 r = 0; 10621 goto out; 10622 } 10623 if (kvm_check_request(KVM_REQ_HV_EXIT, vcpu)) { 10624 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); 10625 10626 vcpu->run->exit_reason = KVM_EXIT_HYPERV; 10627 vcpu->run->hyperv = hv_vcpu->exit; 10628 r = 0; 10629 goto out; 10630 } 10631 10632 /* 10633 * KVM_REQ_HV_STIMER has to be processed after 10634 * KVM_REQ_CLOCK_UPDATE, because Hyper-V SynIC timers 10635 * depend on the guest clock being up-to-date 10636 */ 10637 if (kvm_check_request(KVM_REQ_HV_STIMER, vcpu)) 10638 kvm_hv_process_stimers(vcpu); 10639 if (kvm_check_request(KVM_REQ_APICV_UPDATE, vcpu)) 10640 kvm_vcpu_update_apicv(vcpu); 10641 if (kvm_check_request(KVM_REQ_APF_READY, vcpu)) 10642 kvm_check_async_pf_completion(vcpu); 10643 if (kvm_check_request(KVM_REQ_MSR_FILTER_CHANGED, vcpu)) 10644 static_call(kvm_x86_msr_filter_changed)(vcpu); 10645 10646 if (kvm_check_request(KVM_REQ_UPDATE_CPU_DIRTY_LOGGING, vcpu)) 10647 static_call(kvm_x86_update_cpu_dirty_logging)(vcpu); 10648 } 10649 10650 if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win || 10651 kvm_xen_has_interrupt(vcpu)) { 10652 ++vcpu->stat.req_event; 10653 r = kvm_apic_accept_events(vcpu); 10654 if (r < 0) { 10655 r = 0; 10656 goto out; 10657 } 10658 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { 10659 r = 1; 10660 goto out; 10661 } 10662 10663 r = kvm_check_and_inject_events(vcpu, &req_immediate_exit); 10664 if (r < 0) { 10665 r = 0; 10666 goto out; 10667 } 10668 if (req_int_win) 10669 static_call(kvm_x86_enable_irq_window)(vcpu); 10670 10671 if (kvm_lapic_enabled(vcpu)) { 10672 update_cr8_intercept(vcpu); 10673 kvm_lapic_sync_to_vapic(vcpu); 10674 } 10675 } 10676 10677 r = kvm_mmu_reload(vcpu); 10678 if (unlikely(r)) { 10679 goto cancel_injection; 10680 } 10681 10682 preempt_disable(); 10683 10684 static_call(kvm_x86_prepare_switch_to_guest)(vcpu); 10685 10686 /* 10687 * Disable IRQs before setting IN_GUEST_MODE. Posted interrupt 10688 * IPI are then delayed after guest entry, which ensures that they 10689 * result in virtual interrupt delivery. 10690 */ 10691 local_irq_disable(); 10692 10693 /* Store vcpu->apicv_active before vcpu->mode. */ 10694 smp_store_release(&vcpu->mode, IN_GUEST_MODE); 10695 10696 kvm_vcpu_srcu_read_unlock(vcpu); 10697 10698 /* 10699 * 1) We should set ->mode before checking ->requests. Please see 10700 * the comment in kvm_vcpu_exiting_guest_mode(). 10701 * 10702 * 2) For APICv, we should set ->mode before checking PID.ON. This 10703 * pairs with the memory barrier implicit in pi_test_and_set_on 10704 * (see vmx_deliver_posted_interrupt). 10705 * 10706 * 3) This also orders the write to mode from any reads to the page 10707 * tables done while the VCPU is running. Please see the comment 10708 * in kvm_flush_remote_tlbs. 10709 */ 10710 smp_mb__after_srcu_read_unlock(); 10711 10712 /* 10713 * Process pending posted interrupts to handle the case where the 10714 * notification IRQ arrived in the host, or was never sent (because the 10715 * target vCPU wasn't running). Do this regardless of the vCPU's APICv 10716 * status, KVM doesn't update assigned devices when APICv is inhibited, 10717 * i.e. they can post interrupts even if APICv is temporarily disabled. 10718 */ 10719 if (kvm_lapic_enabled(vcpu)) 10720 static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); 10721 10722 if (kvm_vcpu_exit_request(vcpu)) { 10723 vcpu->mode = OUTSIDE_GUEST_MODE; 10724 smp_wmb(); 10725 local_irq_enable(); 10726 preempt_enable(); 10727 kvm_vcpu_srcu_read_lock(vcpu); 10728 r = 1; 10729 goto cancel_injection; 10730 } 10731 10732 if (req_immediate_exit) { 10733 kvm_make_request(KVM_REQ_EVENT, vcpu); 10734 static_call(kvm_x86_request_immediate_exit)(vcpu); 10735 } 10736 10737 fpregs_assert_state_consistent(); 10738 if (test_thread_flag(TIF_NEED_FPU_LOAD)) 10739 switch_fpu_return(); 10740 10741 if (vcpu->arch.guest_fpu.xfd_err) 10742 wrmsrl(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err); 10743 10744 if (unlikely(vcpu->arch.switch_db_regs)) { 10745 set_debugreg(0, 7); 10746 set_debugreg(vcpu->arch.eff_db[0], 0); 10747 set_debugreg(vcpu->arch.eff_db[1], 1); 10748 set_debugreg(vcpu->arch.eff_db[2], 2); 10749 set_debugreg(vcpu->arch.eff_db[3], 3); 10750 } else if (unlikely(hw_breakpoint_active())) { 10751 set_debugreg(0, 7); 10752 } 10753 10754 guest_timing_enter_irqoff(); 10755 10756 for (;;) { 10757 /* 10758 * Assert that vCPU vs. VM APICv state is consistent. An APICv 10759 * update must kick and wait for all vCPUs before toggling the 10760 * per-VM state, and responsing vCPUs must wait for the update 10761 * to complete before servicing KVM_REQ_APICV_UPDATE. 10762 */ 10763 WARN_ON_ONCE((kvm_vcpu_apicv_activated(vcpu) != kvm_vcpu_apicv_active(vcpu)) && 10764 (kvm_get_apic_mode(vcpu) != LAPIC_MODE_DISABLED)); 10765 10766 exit_fastpath = static_call(kvm_x86_vcpu_run)(vcpu); 10767 if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST)) 10768 break; 10769 10770 if (kvm_lapic_enabled(vcpu)) 10771 static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); 10772 10773 if (unlikely(kvm_vcpu_exit_request(vcpu))) { 10774 exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED; 10775 break; 10776 } 10777 } 10778 10779 /* 10780 * Do this here before restoring debug registers on the host. And 10781 * since we do this before handling the vmexit, a DR access vmexit 10782 * can (a) read the correct value of the debug registers, (b) set 10783 * KVM_DEBUGREG_WONT_EXIT again. 10784 */ 10785 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) { 10786 WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP); 10787 static_call(kvm_x86_sync_dirty_debug_regs)(vcpu); 10788 kvm_update_dr0123(vcpu); 10789 kvm_update_dr7(vcpu); 10790 } 10791 10792 /* 10793 * If the guest has used debug registers, at least dr7 10794 * will be disabled while returning to the host. 10795 * If we don't have active breakpoints in the host, we don't 10796 * care about the messed up debug address registers. But if 10797 * we have some of them active, restore the old state. 10798 */ 10799 if (hw_breakpoint_active()) 10800 hw_breakpoint_restore(); 10801 10802 vcpu->arch.last_vmentry_cpu = vcpu->cpu; 10803 vcpu->arch.last_guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc()); 10804 10805 vcpu->mode = OUTSIDE_GUEST_MODE; 10806 smp_wmb(); 10807 10808 /* 10809 * Sync xfd before calling handle_exit_irqoff() which may 10810 * rely on the fact that guest_fpu::xfd is up-to-date (e.g. 10811 * in #NM irqoff handler). 10812 */ 10813 if (vcpu->arch.xfd_no_write_intercept) 10814 fpu_sync_guest_vmexit_xfd_state(); 10815 10816 static_call(kvm_x86_handle_exit_irqoff)(vcpu); 10817 10818 if (vcpu->arch.guest_fpu.xfd_err) 10819 wrmsrl(MSR_IA32_XFD_ERR, 0); 10820 10821 /* 10822 * Consume any pending interrupts, including the possible source of 10823 * VM-Exit on SVM and any ticks that occur between VM-Exit and now. 10824 * An instruction is required after local_irq_enable() to fully unblock 10825 * interrupts on processors that implement an interrupt shadow, the 10826 * stat.exits increment will do nicely. 10827 */ 10828 kvm_before_interrupt(vcpu, KVM_HANDLING_IRQ); 10829 local_irq_enable(); 10830 ++vcpu->stat.exits; 10831 local_irq_disable(); 10832 kvm_after_interrupt(vcpu); 10833 10834 /* 10835 * Wait until after servicing IRQs to account guest time so that any 10836 * ticks that occurred while running the guest are properly accounted 10837 * to the guest. Waiting until IRQs are enabled degrades the accuracy 10838 * of accounting via context tracking, but the loss of accuracy is 10839 * acceptable for all known use cases. 10840 */ 10841 guest_timing_exit_irqoff(); 10842 10843 local_irq_enable(); 10844 preempt_enable(); 10845 10846 kvm_vcpu_srcu_read_lock(vcpu); 10847 10848 /* 10849 * Profile KVM exit RIPs: 10850 */ 10851 if (unlikely(prof_on == KVM_PROFILING)) { 10852 unsigned long rip = kvm_rip_read(vcpu); 10853 profile_hit(KVM_PROFILING, (void *)rip); 10854 } 10855 10856 if (unlikely(vcpu->arch.tsc_always_catchup)) 10857 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 10858 10859 if (vcpu->arch.apic_attention) 10860 kvm_lapic_sync_from_vapic(vcpu); 10861 10862 r = static_call(kvm_x86_handle_exit)(vcpu, exit_fastpath); 10863 return r; 10864 10865 cancel_injection: 10866 if (req_immediate_exit) 10867 kvm_make_request(KVM_REQ_EVENT, vcpu); 10868 static_call(kvm_x86_cancel_injection)(vcpu); 10869 if (unlikely(vcpu->arch.apic_attention)) 10870 kvm_lapic_sync_from_vapic(vcpu); 10871 out: 10872 return r; 10873 } 10874 10875 /* Called within kvm->srcu read side. */ 10876 static inline int vcpu_block(struct kvm_vcpu *vcpu) 10877 { 10878 bool hv_timer; 10879 10880 if (!kvm_arch_vcpu_runnable(vcpu)) { 10881 /* 10882 * Switch to the software timer before halt-polling/blocking as 10883 * the guest's timer may be a break event for the vCPU, and the 10884 * hypervisor timer runs only when the CPU is in guest mode. 10885 * Switch before halt-polling so that KVM recognizes an expired 10886 * timer before blocking. 10887 */ 10888 hv_timer = kvm_lapic_hv_timer_in_use(vcpu); 10889 if (hv_timer) 10890 kvm_lapic_switch_to_sw_timer(vcpu); 10891 10892 kvm_vcpu_srcu_read_unlock(vcpu); 10893 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) 10894 kvm_vcpu_halt(vcpu); 10895 else 10896 kvm_vcpu_block(vcpu); 10897 kvm_vcpu_srcu_read_lock(vcpu); 10898 10899 if (hv_timer) 10900 kvm_lapic_switch_to_hv_timer(vcpu); 10901 10902 /* 10903 * If the vCPU is not runnable, a signal or another host event 10904 * of some kind is pending; service it without changing the 10905 * vCPU's activity state. 10906 */ 10907 if (!kvm_arch_vcpu_runnable(vcpu)) 10908 return 1; 10909 } 10910 10911 /* 10912 * Evaluate nested events before exiting the halted state. This allows 10913 * the halt state to be recorded properly in the VMCS12's activity 10914 * state field (AMD does not have a similar field and a VM-Exit always 10915 * causes a spurious wakeup from HLT). 10916 */ 10917 if (is_guest_mode(vcpu)) { 10918 if (kvm_check_nested_events(vcpu) < 0) 10919 return 0; 10920 } 10921 10922 if (kvm_apic_accept_events(vcpu) < 0) 10923 return 0; 10924 switch(vcpu->arch.mp_state) { 10925 case KVM_MP_STATE_HALTED: 10926 case KVM_MP_STATE_AP_RESET_HOLD: 10927 vcpu->arch.pv.pv_unhalted = false; 10928 vcpu->arch.mp_state = 10929 KVM_MP_STATE_RUNNABLE; 10930 fallthrough; 10931 case KVM_MP_STATE_RUNNABLE: 10932 vcpu->arch.apf.halted = false; 10933 break; 10934 case KVM_MP_STATE_INIT_RECEIVED: 10935 break; 10936 default: 10937 WARN_ON_ONCE(1); 10938 break; 10939 } 10940 return 1; 10941 } 10942 10943 static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu) 10944 { 10945 return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && 10946 !vcpu->arch.apf.halted); 10947 } 10948 10949 /* Called within kvm->srcu read side. */ 10950 static int vcpu_run(struct kvm_vcpu *vcpu) 10951 { 10952 int r; 10953 10954 vcpu->arch.l1tf_flush_l1d = true; 10955 10956 for (;;) { 10957 /* 10958 * If another guest vCPU requests a PV TLB flush in the middle 10959 * of instruction emulation, the rest of the emulation could 10960 * use a stale page translation. Assume that any code after 10961 * this point can start executing an instruction. 10962 */ 10963 vcpu->arch.at_instruction_boundary = false; 10964 if (kvm_vcpu_running(vcpu)) { 10965 r = vcpu_enter_guest(vcpu); 10966 } else { 10967 r = vcpu_block(vcpu); 10968 } 10969 10970 if (r <= 0) 10971 break; 10972 10973 kvm_clear_request(KVM_REQ_UNBLOCK, vcpu); 10974 if (kvm_xen_has_pending_events(vcpu)) 10975 kvm_xen_inject_pending_events(vcpu); 10976 10977 if (kvm_cpu_has_pending_timer(vcpu)) 10978 kvm_inject_pending_timer_irqs(vcpu); 10979 10980 if (dm_request_for_irq_injection(vcpu) && 10981 kvm_vcpu_ready_for_interrupt_injection(vcpu)) { 10982 r = 0; 10983 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; 10984 ++vcpu->stat.request_irq_exits; 10985 break; 10986 } 10987 10988 if (__xfer_to_guest_mode_work_pending()) { 10989 kvm_vcpu_srcu_read_unlock(vcpu); 10990 r = xfer_to_guest_mode_handle_work(vcpu); 10991 kvm_vcpu_srcu_read_lock(vcpu); 10992 if (r) 10993 return r; 10994 } 10995 } 10996 10997 return r; 10998 } 10999 11000 static inline int complete_emulated_io(struct kvm_vcpu *vcpu) 11001 { 11002 return kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE); 11003 } 11004 11005 static int complete_emulated_pio(struct kvm_vcpu *vcpu) 11006 { 11007 BUG_ON(!vcpu->arch.pio.count); 11008 11009 return complete_emulated_io(vcpu); 11010 } 11011 11012 /* 11013 * Implements the following, as a state machine: 11014 * 11015 * read: 11016 * for each fragment 11017 * for each mmio piece in the fragment 11018 * write gpa, len 11019 * exit 11020 * copy data 11021 * execute insn 11022 * 11023 * write: 11024 * for each fragment 11025 * for each mmio piece in the fragment 11026 * write gpa, len 11027 * copy data 11028 * exit 11029 */ 11030 static int complete_emulated_mmio(struct kvm_vcpu *vcpu) 11031 { 11032 struct kvm_run *run = vcpu->run; 11033 struct kvm_mmio_fragment *frag; 11034 unsigned len; 11035 11036 BUG_ON(!vcpu->mmio_needed); 11037 11038 /* Complete previous fragment */ 11039 frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment]; 11040 len = min(8u, frag->len); 11041 if (!vcpu->mmio_is_write) 11042 memcpy(frag->data, run->mmio.data, len); 11043 11044 if (frag->len <= 8) { 11045 /* Switch to the next fragment. */ 11046 frag++; 11047 vcpu->mmio_cur_fragment++; 11048 } else { 11049 /* Go forward to the next mmio piece. */ 11050 frag->data += len; 11051 frag->gpa += len; 11052 frag->len -= len; 11053 } 11054 11055 if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) { 11056 vcpu->mmio_needed = 0; 11057 11058 /* FIXME: return into emulator if single-stepping. */ 11059 if (vcpu->mmio_is_write) 11060 return 1; 11061 vcpu->mmio_read_completed = 1; 11062 return complete_emulated_io(vcpu); 11063 } 11064 11065 run->exit_reason = KVM_EXIT_MMIO; 11066 run->mmio.phys_addr = frag->gpa; 11067 if (vcpu->mmio_is_write) 11068 memcpy(run->mmio.data, frag->data, min(8u, frag->len)); 11069 run->mmio.len = min(8u, frag->len); 11070 run->mmio.is_write = vcpu->mmio_is_write; 11071 vcpu->arch.complete_userspace_io = complete_emulated_mmio; 11072 return 0; 11073 } 11074 11075 /* Swap (qemu) user FPU context for the guest FPU context. */ 11076 static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) 11077 { 11078 /* Exclude PKRU, it's restored separately immediately after VM-Exit. */ 11079 fpu_swap_kvm_fpstate(&vcpu->arch.guest_fpu, true); 11080 trace_kvm_fpu(1); 11081 } 11082 11083 /* When vcpu_run ends, restore user space FPU context. */ 11084 static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) 11085 { 11086 fpu_swap_kvm_fpstate(&vcpu->arch.guest_fpu, false); 11087 ++vcpu->stat.fpu_reload; 11088 trace_kvm_fpu(0); 11089 } 11090 11091 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) 11092 { 11093 struct kvm_queued_exception *ex = &vcpu->arch.exception; 11094 struct kvm_run *kvm_run = vcpu->run; 11095 int r; 11096 11097 vcpu_load(vcpu); 11098 kvm_sigset_activate(vcpu); 11099 kvm_run->flags = 0; 11100 kvm_load_guest_fpu(vcpu); 11101 11102 kvm_vcpu_srcu_read_lock(vcpu); 11103 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { 11104 if (kvm_run->immediate_exit) { 11105 r = -EINTR; 11106 goto out; 11107 } 11108 /* 11109 * It should be impossible for the hypervisor timer to be in 11110 * use before KVM has ever run the vCPU. 11111 */ 11112 WARN_ON_ONCE(kvm_lapic_hv_timer_in_use(vcpu)); 11113 11114 kvm_vcpu_srcu_read_unlock(vcpu); 11115 kvm_vcpu_block(vcpu); 11116 kvm_vcpu_srcu_read_lock(vcpu); 11117 11118 if (kvm_apic_accept_events(vcpu) < 0) { 11119 r = 0; 11120 goto out; 11121 } 11122 r = -EAGAIN; 11123 if (signal_pending(current)) { 11124 r = -EINTR; 11125 kvm_run->exit_reason = KVM_EXIT_INTR; 11126 ++vcpu->stat.signal_exits; 11127 } 11128 goto out; 11129 } 11130 11131 if ((kvm_run->kvm_valid_regs & ~KVM_SYNC_X86_VALID_FIELDS) || 11132 (kvm_run->kvm_dirty_regs & ~KVM_SYNC_X86_VALID_FIELDS)) { 11133 r = -EINVAL; 11134 goto out; 11135 } 11136 11137 if (kvm_run->kvm_dirty_regs) { 11138 r = sync_regs(vcpu); 11139 if (r != 0) 11140 goto out; 11141 } 11142 11143 /* re-sync apic's tpr */ 11144 if (!lapic_in_kernel(vcpu)) { 11145 if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) { 11146 r = -EINVAL; 11147 goto out; 11148 } 11149 } 11150 11151 /* 11152 * If userspace set a pending exception and L2 is active, convert it to 11153 * a pending VM-Exit if L1 wants to intercept the exception. 11154 */ 11155 if (vcpu->arch.exception_from_userspace && is_guest_mode(vcpu) && 11156 kvm_x86_ops.nested_ops->is_exception_vmexit(vcpu, ex->vector, 11157 ex->error_code)) { 11158 kvm_queue_exception_vmexit(vcpu, ex->vector, 11159 ex->has_error_code, ex->error_code, 11160 ex->has_payload, ex->payload); 11161 ex->injected = false; 11162 ex->pending = false; 11163 } 11164 vcpu->arch.exception_from_userspace = false; 11165 11166 if (unlikely(vcpu->arch.complete_userspace_io)) { 11167 int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io; 11168 vcpu->arch.complete_userspace_io = NULL; 11169 r = cui(vcpu); 11170 if (r <= 0) 11171 goto out; 11172 } else { 11173 WARN_ON_ONCE(vcpu->arch.pio.count); 11174 WARN_ON_ONCE(vcpu->mmio_needed); 11175 } 11176 11177 if (kvm_run->immediate_exit) { 11178 r = -EINTR; 11179 goto out; 11180 } 11181 11182 r = static_call(kvm_x86_vcpu_pre_run)(vcpu); 11183 if (r <= 0) 11184 goto out; 11185 11186 r = vcpu_run(vcpu); 11187 11188 out: 11189 kvm_put_guest_fpu(vcpu); 11190 if (kvm_run->kvm_valid_regs) 11191 store_regs(vcpu); 11192 post_kvm_run_save(vcpu); 11193 kvm_vcpu_srcu_read_unlock(vcpu); 11194 11195 kvm_sigset_deactivate(vcpu); 11196 vcpu_put(vcpu); 11197 return r; 11198 } 11199 11200 static void __get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 11201 { 11202 if (vcpu->arch.emulate_regs_need_sync_to_vcpu) { 11203 /* 11204 * We are here if userspace calls get_regs() in the middle of 11205 * instruction emulation. Registers state needs to be copied 11206 * back from emulation context to vcpu. Userspace shouldn't do 11207 * that usually, but some bad designed PV devices (vmware 11208 * backdoor interface) need this to work 11209 */ 11210 emulator_writeback_register_cache(vcpu->arch.emulate_ctxt); 11211 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; 11212 } 11213 regs->rax = kvm_rax_read(vcpu); 11214 regs->rbx = kvm_rbx_read(vcpu); 11215 regs->rcx = kvm_rcx_read(vcpu); 11216 regs->rdx = kvm_rdx_read(vcpu); 11217 regs->rsi = kvm_rsi_read(vcpu); 11218 regs->rdi = kvm_rdi_read(vcpu); 11219 regs->rsp = kvm_rsp_read(vcpu); 11220 regs->rbp = kvm_rbp_read(vcpu); 11221 #ifdef CONFIG_X86_64 11222 regs->r8 = kvm_r8_read(vcpu); 11223 regs->r9 = kvm_r9_read(vcpu); 11224 regs->r10 = kvm_r10_read(vcpu); 11225 regs->r11 = kvm_r11_read(vcpu); 11226 regs->r12 = kvm_r12_read(vcpu); 11227 regs->r13 = kvm_r13_read(vcpu); 11228 regs->r14 = kvm_r14_read(vcpu); 11229 regs->r15 = kvm_r15_read(vcpu); 11230 #endif 11231 11232 regs->rip = kvm_rip_read(vcpu); 11233 regs->rflags = kvm_get_rflags(vcpu); 11234 } 11235 11236 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 11237 { 11238 vcpu_load(vcpu); 11239 __get_regs(vcpu, regs); 11240 vcpu_put(vcpu); 11241 return 0; 11242 } 11243 11244 static void __set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 11245 { 11246 vcpu->arch.emulate_regs_need_sync_from_vcpu = true; 11247 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; 11248 11249 kvm_rax_write(vcpu, regs->rax); 11250 kvm_rbx_write(vcpu, regs->rbx); 11251 kvm_rcx_write(vcpu, regs->rcx); 11252 kvm_rdx_write(vcpu, regs->rdx); 11253 kvm_rsi_write(vcpu, regs->rsi); 11254 kvm_rdi_write(vcpu, regs->rdi); 11255 kvm_rsp_write(vcpu, regs->rsp); 11256 kvm_rbp_write(vcpu, regs->rbp); 11257 #ifdef CONFIG_X86_64 11258 kvm_r8_write(vcpu, regs->r8); 11259 kvm_r9_write(vcpu, regs->r9); 11260 kvm_r10_write(vcpu, regs->r10); 11261 kvm_r11_write(vcpu, regs->r11); 11262 kvm_r12_write(vcpu, regs->r12); 11263 kvm_r13_write(vcpu, regs->r13); 11264 kvm_r14_write(vcpu, regs->r14); 11265 kvm_r15_write(vcpu, regs->r15); 11266 #endif 11267 11268 kvm_rip_write(vcpu, regs->rip); 11269 kvm_set_rflags(vcpu, regs->rflags | X86_EFLAGS_FIXED); 11270 11271 vcpu->arch.exception.pending = false; 11272 vcpu->arch.exception_vmexit.pending = false; 11273 11274 kvm_make_request(KVM_REQ_EVENT, vcpu); 11275 } 11276 11277 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 11278 { 11279 vcpu_load(vcpu); 11280 __set_regs(vcpu, regs); 11281 vcpu_put(vcpu); 11282 return 0; 11283 } 11284 11285 static void __get_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 11286 { 11287 struct desc_ptr dt; 11288 11289 if (vcpu->arch.guest_state_protected) 11290 goto skip_protected_regs; 11291 11292 kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS); 11293 kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS); 11294 kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES); 11295 kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS); 11296 kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS); 11297 kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS); 11298 11299 kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR); 11300 kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); 11301 11302 static_call(kvm_x86_get_idt)(vcpu, &dt); 11303 sregs->idt.limit = dt.size; 11304 sregs->idt.base = dt.address; 11305 static_call(kvm_x86_get_gdt)(vcpu, &dt); 11306 sregs->gdt.limit = dt.size; 11307 sregs->gdt.base = dt.address; 11308 11309 sregs->cr2 = vcpu->arch.cr2; 11310 sregs->cr3 = kvm_read_cr3(vcpu); 11311 11312 skip_protected_regs: 11313 sregs->cr0 = kvm_read_cr0(vcpu); 11314 sregs->cr4 = kvm_read_cr4(vcpu); 11315 sregs->cr8 = kvm_get_cr8(vcpu); 11316 sregs->efer = vcpu->arch.efer; 11317 sregs->apic_base = kvm_get_apic_base(vcpu); 11318 } 11319 11320 static void __get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 11321 { 11322 __get_sregs_common(vcpu, sregs); 11323 11324 if (vcpu->arch.guest_state_protected) 11325 return; 11326 11327 if (vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft) 11328 set_bit(vcpu->arch.interrupt.nr, 11329 (unsigned long *)sregs->interrupt_bitmap); 11330 } 11331 11332 static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2) 11333 { 11334 int i; 11335 11336 __get_sregs_common(vcpu, (struct kvm_sregs *)sregs2); 11337 11338 if (vcpu->arch.guest_state_protected) 11339 return; 11340 11341 if (is_pae_paging(vcpu)) { 11342 for (i = 0 ; i < 4 ; i++) 11343 sregs2->pdptrs[i] = kvm_pdptr_read(vcpu, i); 11344 sregs2->flags |= KVM_SREGS2_FLAGS_PDPTRS_VALID; 11345 } 11346 } 11347 11348 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 11349 struct kvm_sregs *sregs) 11350 { 11351 vcpu_load(vcpu); 11352 __get_sregs(vcpu, sregs); 11353 vcpu_put(vcpu); 11354 return 0; 11355 } 11356 11357 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 11358 struct kvm_mp_state *mp_state) 11359 { 11360 int r; 11361 11362 vcpu_load(vcpu); 11363 if (kvm_mpx_supported()) 11364 kvm_load_guest_fpu(vcpu); 11365 11366 r = kvm_apic_accept_events(vcpu); 11367 if (r < 0) 11368 goto out; 11369 r = 0; 11370 11371 if ((vcpu->arch.mp_state == KVM_MP_STATE_HALTED || 11372 vcpu->arch.mp_state == KVM_MP_STATE_AP_RESET_HOLD) && 11373 vcpu->arch.pv.pv_unhalted) 11374 mp_state->mp_state = KVM_MP_STATE_RUNNABLE; 11375 else 11376 mp_state->mp_state = vcpu->arch.mp_state; 11377 11378 out: 11379 if (kvm_mpx_supported()) 11380 kvm_put_guest_fpu(vcpu); 11381 vcpu_put(vcpu); 11382 return r; 11383 } 11384 11385 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 11386 struct kvm_mp_state *mp_state) 11387 { 11388 int ret = -EINVAL; 11389 11390 vcpu_load(vcpu); 11391 11392 switch (mp_state->mp_state) { 11393 case KVM_MP_STATE_UNINITIALIZED: 11394 case KVM_MP_STATE_HALTED: 11395 case KVM_MP_STATE_AP_RESET_HOLD: 11396 case KVM_MP_STATE_INIT_RECEIVED: 11397 case KVM_MP_STATE_SIPI_RECEIVED: 11398 if (!lapic_in_kernel(vcpu)) 11399 goto out; 11400 break; 11401 11402 case KVM_MP_STATE_RUNNABLE: 11403 break; 11404 11405 default: 11406 goto out; 11407 } 11408 11409 /* 11410 * Pending INITs are reported using KVM_SET_VCPU_EVENTS, disallow 11411 * forcing the guest into INIT/SIPI if those events are supposed to be 11412 * blocked. KVM prioritizes SMI over INIT, so reject INIT/SIPI state 11413 * if an SMI is pending as well. 11414 */ 11415 if ((!kvm_apic_init_sipi_allowed(vcpu) || vcpu->arch.smi_pending) && 11416 (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED || 11417 mp_state->mp_state == KVM_MP_STATE_INIT_RECEIVED)) 11418 goto out; 11419 11420 if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) { 11421 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; 11422 set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events); 11423 } else 11424 vcpu->arch.mp_state = mp_state->mp_state; 11425 kvm_make_request(KVM_REQ_EVENT, vcpu); 11426 11427 ret = 0; 11428 out: 11429 vcpu_put(vcpu); 11430 return ret; 11431 } 11432 11433 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, 11434 int reason, bool has_error_code, u32 error_code) 11435 { 11436 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 11437 int ret; 11438 11439 init_emulate_ctxt(vcpu); 11440 11441 ret = emulator_task_switch(ctxt, tss_selector, idt_index, reason, 11442 has_error_code, error_code); 11443 if (ret) { 11444 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 11445 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; 11446 vcpu->run->internal.ndata = 0; 11447 return 0; 11448 } 11449 11450 kvm_rip_write(vcpu, ctxt->eip); 11451 kvm_set_rflags(vcpu, ctxt->eflags); 11452 return 1; 11453 } 11454 EXPORT_SYMBOL_GPL(kvm_task_switch); 11455 11456 static bool kvm_is_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 11457 { 11458 if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) { 11459 /* 11460 * When EFER.LME and CR0.PG are set, the processor is in 11461 * 64-bit mode (though maybe in a 32-bit code segment). 11462 * CR4.PAE and EFER.LMA must be set. 11463 */ 11464 if (!(sregs->cr4 & X86_CR4_PAE) || !(sregs->efer & EFER_LMA)) 11465 return false; 11466 if (kvm_vcpu_is_illegal_gpa(vcpu, sregs->cr3)) 11467 return false; 11468 } else { 11469 /* 11470 * Not in 64-bit mode: EFER.LMA is clear and the code 11471 * segment cannot be 64-bit. 11472 */ 11473 if (sregs->efer & EFER_LMA || sregs->cs.l) 11474 return false; 11475 } 11476 11477 return kvm_is_valid_cr4(vcpu, sregs->cr4); 11478 } 11479 11480 static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs, 11481 int *mmu_reset_needed, bool update_pdptrs) 11482 { 11483 struct msr_data apic_base_msr; 11484 int idx; 11485 struct desc_ptr dt; 11486 11487 if (!kvm_is_valid_sregs(vcpu, sregs)) 11488 return -EINVAL; 11489 11490 apic_base_msr.data = sregs->apic_base; 11491 apic_base_msr.host_initiated = true; 11492 if (kvm_set_apic_base(vcpu, &apic_base_msr)) 11493 return -EINVAL; 11494 11495 if (vcpu->arch.guest_state_protected) 11496 return 0; 11497 11498 dt.size = sregs->idt.limit; 11499 dt.address = sregs->idt.base; 11500 static_call(kvm_x86_set_idt)(vcpu, &dt); 11501 dt.size = sregs->gdt.limit; 11502 dt.address = sregs->gdt.base; 11503 static_call(kvm_x86_set_gdt)(vcpu, &dt); 11504 11505 vcpu->arch.cr2 = sregs->cr2; 11506 *mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; 11507 vcpu->arch.cr3 = sregs->cr3; 11508 kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); 11509 static_call_cond(kvm_x86_post_set_cr3)(vcpu, sregs->cr3); 11510 11511 kvm_set_cr8(vcpu, sregs->cr8); 11512 11513 *mmu_reset_needed |= vcpu->arch.efer != sregs->efer; 11514 static_call(kvm_x86_set_efer)(vcpu, sregs->efer); 11515 11516 *mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0; 11517 static_call(kvm_x86_set_cr0)(vcpu, sregs->cr0); 11518 vcpu->arch.cr0 = sregs->cr0; 11519 11520 *mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; 11521 static_call(kvm_x86_set_cr4)(vcpu, sregs->cr4); 11522 11523 if (update_pdptrs) { 11524 idx = srcu_read_lock(&vcpu->kvm->srcu); 11525 if (is_pae_paging(vcpu)) { 11526 load_pdptrs(vcpu, kvm_read_cr3(vcpu)); 11527 *mmu_reset_needed = 1; 11528 } 11529 srcu_read_unlock(&vcpu->kvm->srcu, idx); 11530 } 11531 11532 kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS); 11533 kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS); 11534 kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES); 11535 kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS); 11536 kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS); 11537 kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS); 11538 11539 kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR); 11540 kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); 11541 11542 update_cr8_intercept(vcpu); 11543 11544 /* Older userspace won't unhalt the vcpu on reset. */ 11545 if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 && 11546 sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 && 11547 !is_protmode(vcpu)) 11548 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 11549 11550 return 0; 11551 } 11552 11553 static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 11554 { 11555 int pending_vec, max_bits; 11556 int mmu_reset_needed = 0; 11557 int ret = __set_sregs_common(vcpu, sregs, &mmu_reset_needed, true); 11558 11559 if (ret) 11560 return ret; 11561 11562 if (mmu_reset_needed) 11563 kvm_mmu_reset_context(vcpu); 11564 11565 max_bits = KVM_NR_INTERRUPTS; 11566 pending_vec = find_first_bit( 11567 (const unsigned long *)sregs->interrupt_bitmap, max_bits); 11568 11569 if (pending_vec < max_bits) { 11570 kvm_queue_interrupt(vcpu, pending_vec, false); 11571 pr_debug("Set back pending irq %d\n", pending_vec); 11572 kvm_make_request(KVM_REQ_EVENT, vcpu); 11573 } 11574 return 0; 11575 } 11576 11577 static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2) 11578 { 11579 int mmu_reset_needed = 0; 11580 bool valid_pdptrs = sregs2->flags & KVM_SREGS2_FLAGS_PDPTRS_VALID; 11581 bool pae = (sregs2->cr0 & X86_CR0_PG) && (sregs2->cr4 & X86_CR4_PAE) && 11582 !(sregs2->efer & EFER_LMA); 11583 int i, ret; 11584 11585 if (sregs2->flags & ~KVM_SREGS2_FLAGS_PDPTRS_VALID) 11586 return -EINVAL; 11587 11588 if (valid_pdptrs && (!pae || vcpu->arch.guest_state_protected)) 11589 return -EINVAL; 11590 11591 ret = __set_sregs_common(vcpu, (struct kvm_sregs *)sregs2, 11592 &mmu_reset_needed, !valid_pdptrs); 11593 if (ret) 11594 return ret; 11595 11596 if (valid_pdptrs) { 11597 for (i = 0; i < 4 ; i++) 11598 kvm_pdptr_write(vcpu, i, sregs2->pdptrs[i]); 11599 11600 kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR); 11601 mmu_reset_needed = 1; 11602 vcpu->arch.pdptrs_from_userspace = true; 11603 } 11604 if (mmu_reset_needed) 11605 kvm_mmu_reset_context(vcpu); 11606 return 0; 11607 } 11608 11609 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 11610 struct kvm_sregs *sregs) 11611 { 11612 int ret; 11613 11614 vcpu_load(vcpu); 11615 ret = __set_sregs(vcpu, sregs); 11616 vcpu_put(vcpu); 11617 return ret; 11618 } 11619 11620 static void kvm_arch_vcpu_guestdbg_update_apicv_inhibit(struct kvm *kvm) 11621 { 11622 bool set = false; 11623 struct kvm_vcpu *vcpu; 11624 unsigned long i; 11625 11626 if (!enable_apicv) 11627 return; 11628 11629 down_write(&kvm->arch.apicv_update_lock); 11630 11631 kvm_for_each_vcpu(i, vcpu, kvm) { 11632 if (vcpu->guest_debug & KVM_GUESTDBG_BLOCKIRQ) { 11633 set = true; 11634 break; 11635 } 11636 } 11637 __kvm_set_or_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_BLOCKIRQ, set); 11638 up_write(&kvm->arch.apicv_update_lock); 11639 } 11640 11641 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 11642 struct kvm_guest_debug *dbg) 11643 { 11644 unsigned long rflags; 11645 int i, r; 11646 11647 if (vcpu->arch.guest_state_protected) 11648 return -EINVAL; 11649 11650 vcpu_load(vcpu); 11651 11652 if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) { 11653 r = -EBUSY; 11654 if (kvm_is_exception_pending(vcpu)) 11655 goto out; 11656 if (dbg->control & KVM_GUESTDBG_INJECT_DB) 11657 kvm_queue_exception(vcpu, DB_VECTOR); 11658 else 11659 kvm_queue_exception(vcpu, BP_VECTOR); 11660 } 11661 11662 /* 11663 * Read rflags as long as potentially injected trace flags are still 11664 * filtered out. 11665 */ 11666 rflags = kvm_get_rflags(vcpu); 11667 11668 vcpu->guest_debug = dbg->control; 11669 if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)) 11670 vcpu->guest_debug = 0; 11671 11672 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { 11673 for (i = 0; i < KVM_NR_DB_REGS; ++i) 11674 vcpu->arch.eff_db[i] = dbg->arch.debugreg[i]; 11675 vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7]; 11676 } else { 11677 for (i = 0; i < KVM_NR_DB_REGS; i++) 11678 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; 11679 } 11680 kvm_update_dr7(vcpu); 11681 11682 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) 11683 vcpu->arch.singlestep_rip = kvm_get_linear_rip(vcpu); 11684 11685 /* 11686 * Trigger an rflags update that will inject or remove the trace 11687 * flags. 11688 */ 11689 kvm_set_rflags(vcpu, rflags); 11690 11691 static_call(kvm_x86_update_exception_bitmap)(vcpu); 11692 11693 kvm_arch_vcpu_guestdbg_update_apicv_inhibit(vcpu->kvm); 11694 11695 r = 0; 11696 11697 out: 11698 vcpu_put(vcpu); 11699 return r; 11700 } 11701 11702 /* 11703 * Translate a guest virtual address to a guest physical address. 11704 */ 11705 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 11706 struct kvm_translation *tr) 11707 { 11708 unsigned long vaddr = tr->linear_address; 11709 gpa_t gpa; 11710 int idx; 11711 11712 vcpu_load(vcpu); 11713 11714 idx = srcu_read_lock(&vcpu->kvm->srcu); 11715 gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL); 11716 srcu_read_unlock(&vcpu->kvm->srcu, idx); 11717 tr->physical_address = gpa; 11718 tr->valid = gpa != INVALID_GPA; 11719 tr->writeable = 1; 11720 tr->usermode = 0; 11721 11722 vcpu_put(vcpu); 11723 return 0; 11724 } 11725 11726 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 11727 { 11728 struct fxregs_state *fxsave; 11729 11730 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) 11731 return 0; 11732 11733 vcpu_load(vcpu); 11734 11735 fxsave = &vcpu->arch.guest_fpu.fpstate->regs.fxsave; 11736 memcpy(fpu->fpr, fxsave->st_space, 128); 11737 fpu->fcw = fxsave->cwd; 11738 fpu->fsw = fxsave->swd; 11739 fpu->ftwx = fxsave->twd; 11740 fpu->last_opcode = fxsave->fop; 11741 fpu->last_ip = fxsave->rip; 11742 fpu->last_dp = fxsave->rdp; 11743 memcpy(fpu->xmm, fxsave->xmm_space, sizeof(fxsave->xmm_space)); 11744 11745 vcpu_put(vcpu); 11746 return 0; 11747 } 11748 11749 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 11750 { 11751 struct fxregs_state *fxsave; 11752 11753 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) 11754 return 0; 11755 11756 vcpu_load(vcpu); 11757 11758 fxsave = &vcpu->arch.guest_fpu.fpstate->regs.fxsave; 11759 11760 memcpy(fxsave->st_space, fpu->fpr, 128); 11761 fxsave->cwd = fpu->fcw; 11762 fxsave->swd = fpu->fsw; 11763 fxsave->twd = fpu->ftwx; 11764 fxsave->fop = fpu->last_opcode; 11765 fxsave->rip = fpu->last_ip; 11766 fxsave->rdp = fpu->last_dp; 11767 memcpy(fxsave->xmm_space, fpu->xmm, sizeof(fxsave->xmm_space)); 11768 11769 vcpu_put(vcpu); 11770 return 0; 11771 } 11772 11773 static void store_regs(struct kvm_vcpu *vcpu) 11774 { 11775 BUILD_BUG_ON(sizeof(struct kvm_sync_regs) > SYNC_REGS_SIZE_BYTES); 11776 11777 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_REGS) 11778 __get_regs(vcpu, &vcpu->run->s.regs.regs); 11779 11780 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_SREGS) 11781 __get_sregs(vcpu, &vcpu->run->s.regs.sregs); 11782 11783 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_EVENTS) 11784 kvm_vcpu_ioctl_x86_get_vcpu_events( 11785 vcpu, &vcpu->run->s.regs.events); 11786 } 11787 11788 static int sync_regs(struct kvm_vcpu *vcpu) 11789 { 11790 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_REGS) { 11791 __set_regs(vcpu, &vcpu->run->s.regs.regs); 11792 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_REGS; 11793 } 11794 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_SREGS) { 11795 if (__set_sregs(vcpu, &vcpu->run->s.regs.sregs)) 11796 return -EINVAL; 11797 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_SREGS; 11798 } 11799 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_EVENTS) { 11800 if (kvm_vcpu_ioctl_x86_set_vcpu_events( 11801 vcpu, &vcpu->run->s.regs.events)) 11802 return -EINVAL; 11803 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_EVENTS; 11804 } 11805 11806 return 0; 11807 } 11808 11809 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) 11810 { 11811 if (kvm_check_tsc_unstable() && kvm->created_vcpus) 11812 pr_warn_once("kvm: SMP vm created on host with unstable TSC; " 11813 "guest TSC will not be reliable\n"); 11814 11815 if (!kvm->arch.max_vcpu_ids) 11816 kvm->arch.max_vcpu_ids = KVM_MAX_VCPU_IDS; 11817 11818 if (id >= kvm->arch.max_vcpu_ids) 11819 return -EINVAL; 11820 11821 return static_call(kvm_x86_vcpu_precreate)(kvm); 11822 } 11823 11824 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) 11825 { 11826 struct page *page; 11827 int r; 11828 11829 vcpu->arch.last_vmentry_cpu = -1; 11830 vcpu->arch.regs_avail = ~0; 11831 vcpu->arch.regs_dirty = ~0; 11832 11833 kvm_gpc_init(&vcpu->arch.pv_time); 11834 11835 if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu)) 11836 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 11837 else 11838 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; 11839 11840 r = kvm_mmu_create(vcpu); 11841 if (r < 0) 11842 return r; 11843 11844 if (irqchip_in_kernel(vcpu->kvm)) { 11845 r = kvm_create_lapic(vcpu, lapic_timer_advance_ns); 11846 if (r < 0) 11847 goto fail_mmu_destroy; 11848 11849 /* 11850 * Defer evaluating inhibits until the vCPU is first run, as 11851 * this vCPU will not get notified of any changes until this 11852 * vCPU is visible to other vCPUs (marked online and added to 11853 * the set of vCPUs). Opportunistically mark APICv active as 11854 * VMX in particularly is highly unlikely to have inhibits. 11855 * Ignore the current per-VM APICv state so that vCPU creation 11856 * is guaranteed to run with a deterministic value, the request 11857 * will ensure the vCPU gets the correct state before VM-Entry. 11858 */ 11859 if (enable_apicv) { 11860 vcpu->arch.apic->apicv_active = true; 11861 kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu); 11862 } 11863 } else 11864 static_branch_inc(&kvm_has_noapic_vcpu); 11865 11866 r = -ENOMEM; 11867 11868 page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); 11869 if (!page) 11870 goto fail_free_lapic; 11871 vcpu->arch.pio_data = page_address(page); 11872 11873 vcpu->arch.mce_banks = kcalloc(KVM_MAX_MCE_BANKS * 4, sizeof(u64), 11874 GFP_KERNEL_ACCOUNT); 11875 vcpu->arch.mci_ctl2_banks = kcalloc(KVM_MAX_MCE_BANKS, sizeof(u64), 11876 GFP_KERNEL_ACCOUNT); 11877 if (!vcpu->arch.mce_banks || !vcpu->arch.mci_ctl2_banks) 11878 goto fail_free_mce_banks; 11879 vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS; 11880 11881 if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, 11882 GFP_KERNEL_ACCOUNT)) 11883 goto fail_free_mce_banks; 11884 11885 if (!alloc_emulate_ctxt(vcpu)) 11886 goto free_wbinvd_dirty_mask; 11887 11888 if (!fpu_alloc_guest_fpstate(&vcpu->arch.guest_fpu)) { 11889 pr_err("kvm: failed to allocate vcpu's fpu\n"); 11890 goto free_emulate_ctxt; 11891 } 11892 11893 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); 11894 vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu); 11895 11896 vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT; 11897 11898 kvm_async_pf_hash_reset(vcpu); 11899 11900 vcpu->arch.perf_capabilities = kvm_caps.supported_perf_cap; 11901 kvm_pmu_init(vcpu); 11902 11903 vcpu->arch.pending_external_vector = -1; 11904 vcpu->arch.preempted_in_kernel = false; 11905 11906 #if IS_ENABLED(CONFIG_HYPERV) 11907 vcpu->arch.hv_root_tdp = INVALID_PAGE; 11908 #endif 11909 11910 r = static_call(kvm_x86_vcpu_create)(vcpu); 11911 if (r) 11912 goto free_guest_fpu; 11913 11914 vcpu->arch.arch_capabilities = kvm_get_arch_capabilities(); 11915 vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT; 11916 kvm_xen_init_vcpu(vcpu); 11917 kvm_vcpu_mtrr_init(vcpu); 11918 vcpu_load(vcpu); 11919 kvm_set_tsc_khz(vcpu, vcpu->kvm->arch.default_tsc_khz); 11920 kvm_vcpu_reset(vcpu, false); 11921 kvm_init_mmu(vcpu); 11922 vcpu_put(vcpu); 11923 return 0; 11924 11925 free_guest_fpu: 11926 fpu_free_guest_fpstate(&vcpu->arch.guest_fpu); 11927 free_emulate_ctxt: 11928 kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt); 11929 free_wbinvd_dirty_mask: 11930 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); 11931 fail_free_mce_banks: 11932 kfree(vcpu->arch.mce_banks); 11933 kfree(vcpu->arch.mci_ctl2_banks); 11934 free_page((unsigned long)vcpu->arch.pio_data); 11935 fail_free_lapic: 11936 kvm_free_lapic(vcpu); 11937 fail_mmu_destroy: 11938 kvm_mmu_destroy(vcpu); 11939 return r; 11940 } 11941 11942 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 11943 { 11944 struct kvm *kvm = vcpu->kvm; 11945 11946 if (mutex_lock_killable(&vcpu->mutex)) 11947 return; 11948 vcpu_load(vcpu); 11949 kvm_synchronize_tsc(vcpu, 0); 11950 vcpu_put(vcpu); 11951 11952 /* poll control enabled by default */ 11953 vcpu->arch.msr_kvm_poll_control = 1; 11954 11955 mutex_unlock(&vcpu->mutex); 11956 11957 if (kvmclock_periodic_sync && vcpu->vcpu_idx == 0) 11958 schedule_delayed_work(&kvm->arch.kvmclock_sync_work, 11959 KVMCLOCK_SYNC_PERIOD); 11960 } 11961 11962 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 11963 { 11964 int idx; 11965 11966 kvmclock_reset(vcpu); 11967 11968 static_call(kvm_x86_vcpu_free)(vcpu); 11969 11970 kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt); 11971 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); 11972 fpu_free_guest_fpstate(&vcpu->arch.guest_fpu); 11973 11974 kvm_xen_destroy_vcpu(vcpu); 11975 kvm_hv_vcpu_uninit(vcpu); 11976 kvm_pmu_destroy(vcpu); 11977 kfree(vcpu->arch.mce_banks); 11978 kfree(vcpu->arch.mci_ctl2_banks); 11979 kvm_free_lapic(vcpu); 11980 idx = srcu_read_lock(&vcpu->kvm->srcu); 11981 kvm_mmu_destroy(vcpu); 11982 srcu_read_unlock(&vcpu->kvm->srcu, idx); 11983 free_page((unsigned long)vcpu->arch.pio_data); 11984 kvfree(vcpu->arch.cpuid_entries); 11985 if (!lapic_in_kernel(vcpu)) 11986 static_branch_dec(&kvm_has_noapic_vcpu); 11987 } 11988 11989 void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) 11990 { 11991 struct kvm_cpuid_entry2 *cpuid_0x1; 11992 unsigned long old_cr0 = kvm_read_cr0(vcpu); 11993 unsigned long new_cr0; 11994 11995 /* 11996 * Several of the "set" flows, e.g. ->set_cr0(), read other registers 11997 * to handle side effects. RESET emulation hits those flows and relies 11998 * on emulated/virtualized registers, including those that are loaded 11999 * into hardware, to be zeroed at vCPU creation. Use CRs as a sentinel 12000 * to detect improper or missing initialization. 12001 */ 12002 WARN_ON_ONCE(!init_event && 12003 (old_cr0 || kvm_read_cr3(vcpu) || kvm_read_cr4(vcpu))); 12004 12005 kvm_lapic_reset(vcpu, init_event); 12006 12007 vcpu->arch.hflags = 0; 12008 12009 vcpu->arch.smi_pending = 0; 12010 vcpu->arch.smi_count = 0; 12011 atomic_set(&vcpu->arch.nmi_queued, 0); 12012 vcpu->arch.nmi_pending = 0; 12013 vcpu->arch.nmi_injected = false; 12014 kvm_clear_interrupt_queue(vcpu); 12015 kvm_clear_exception_queue(vcpu); 12016 12017 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); 12018 kvm_update_dr0123(vcpu); 12019 vcpu->arch.dr6 = DR6_ACTIVE_LOW; 12020 vcpu->arch.dr7 = DR7_FIXED_1; 12021 kvm_update_dr7(vcpu); 12022 12023 vcpu->arch.cr2 = 0; 12024 12025 kvm_make_request(KVM_REQ_EVENT, vcpu); 12026 vcpu->arch.apf.msr_en_val = 0; 12027 vcpu->arch.apf.msr_int_val = 0; 12028 vcpu->arch.st.msr_val = 0; 12029 12030 kvmclock_reset(vcpu); 12031 12032 kvm_clear_async_pf_completion_queue(vcpu); 12033 kvm_async_pf_hash_reset(vcpu); 12034 vcpu->arch.apf.halted = false; 12035 12036 if (vcpu->arch.guest_fpu.fpstate && kvm_mpx_supported()) { 12037 struct fpstate *fpstate = vcpu->arch.guest_fpu.fpstate; 12038 12039 /* 12040 * All paths that lead to INIT are required to load the guest's 12041 * FPU state (because most paths are buried in KVM_RUN). 12042 */ 12043 if (init_event) 12044 kvm_put_guest_fpu(vcpu); 12045 12046 fpstate_clear_xstate_component(fpstate, XFEATURE_BNDREGS); 12047 fpstate_clear_xstate_component(fpstate, XFEATURE_BNDCSR); 12048 12049 if (init_event) 12050 kvm_load_guest_fpu(vcpu); 12051 } 12052 12053 if (!init_event) { 12054 kvm_pmu_reset(vcpu); 12055 vcpu->arch.smbase = 0x30000; 12056 12057 vcpu->arch.msr_misc_features_enables = 0; 12058 vcpu->arch.ia32_misc_enable_msr = MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL | 12059 MSR_IA32_MISC_ENABLE_BTS_UNAVAIL; 12060 12061 __kvm_set_xcr(vcpu, 0, XFEATURE_MASK_FP); 12062 __kvm_set_msr(vcpu, MSR_IA32_XSS, 0, true); 12063 } 12064 12065 /* All GPRs except RDX (handled below) are zeroed on RESET/INIT. */ 12066 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); 12067 kvm_register_mark_dirty(vcpu, VCPU_REGS_RSP); 12068 12069 /* 12070 * Fall back to KVM's default Family/Model/Stepping of 0x600 (P6/Athlon) 12071 * if no CPUID match is found. Note, it's impossible to get a match at 12072 * RESET since KVM emulates RESET before exposing the vCPU to userspace, 12073 * i.e. it's impossible for kvm_find_cpuid_entry() to find a valid entry 12074 * on RESET. But, go through the motions in case that's ever remedied. 12075 */ 12076 cpuid_0x1 = kvm_find_cpuid_entry(vcpu, 1); 12077 kvm_rdx_write(vcpu, cpuid_0x1 ? cpuid_0x1->eax : 0x600); 12078 12079 static_call(kvm_x86_vcpu_reset)(vcpu, init_event); 12080 12081 kvm_set_rflags(vcpu, X86_EFLAGS_FIXED); 12082 kvm_rip_write(vcpu, 0xfff0); 12083 12084 vcpu->arch.cr3 = 0; 12085 kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); 12086 12087 /* 12088 * CR0.CD/NW are set on RESET, preserved on INIT. Note, some versions 12089 * of Intel's SDM list CD/NW as being set on INIT, but they contradict 12090 * (or qualify) that with a footnote stating that CD/NW are preserved. 12091 */ 12092 new_cr0 = X86_CR0_ET; 12093 if (init_event) 12094 new_cr0 |= (old_cr0 & (X86_CR0_NW | X86_CR0_CD)); 12095 else 12096 new_cr0 |= X86_CR0_NW | X86_CR0_CD; 12097 12098 static_call(kvm_x86_set_cr0)(vcpu, new_cr0); 12099 static_call(kvm_x86_set_cr4)(vcpu, 0); 12100 static_call(kvm_x86_set_efer)(vcpu, 0); 12101 static_call(kvm_x86_update_exception_bitmap)(vcpu); 12102 12103 /* 12104 * On the standard CR0/CR4/EFER modification paths, there are several 12105 * complex conditions determining whether the MMU has to be reset and/or 12106 * which PCIDs have to be flushed. However, CR0.WP and the paging-related 12107 * bits in CR4 and EFER are irrelevant if CR0.PG was '0'; and a reset+flush 12108 * is needed anyway if CR0.PG was '1' (which can only happen for INIT, as 12109 * CR0 will be '0' prior to RESET). So we only need to check CR0.PG here. 12110 */ 12111 if (old_cr0 & X86_CR0_PG) { 12112 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 12113 kvm_mmu_reset_context(vcpu); 12114 } 12115 12116 /* 12117 * Intel's SDM states that all TLB entries are flushed on INIT. AMD's 12118 * APM states the TLBs are untouched by INIT, but it also states that 12119 * the TLBs are flushed on "External initialization of the processor." 12120 * Flush the guest TLB regardless of vendor, there is no meaningful 12121 * benefit in relying on the guest to flush the TLB immediately after 12122 * INIT. A spurious TLB flush is benign and likely negligible from a 12123 * performance perspective. 12124 */ 12125 if (init_event) 12126 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 12127 } 12128 EXPORT_SYMBOL_GPL(kvm_vcpu_reset); 12129 12130 void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) 12131 { 12132 struct kvm_segment cs; 12133 12134 kvm_get_segment(vcpu, &cs, VCPU_SREG_CS); 12135 cs.selector = vector << 8; 12136 cs.base = vector << 12; 12137 kvm_set_segment(vcpu, &cs, VCPU_SREG_CS); 12138 kvm_rip_write(vcpu, 0); 12139 } 12140 EXPORT_SYMBOL_GPL(kvm_vcpu_deliver_sipi_vector); 12141 12142 int kvm_arch_hardware_enable(void) 12143 { 12144 struct kvm *kvm; 12145 struct kvm_vcpu *vcpu; 12146 unsigned long i; 12147 int ret; 12148 u64 local_tsc; 12149 u64 max_tsc = 0; 12150 bool stable, backwards_tsc = false; 12151 12152 kvm_user_return_msr_cpu_online(); 12153 ret = static_call(kvm_x86_hardware_enable)(); 12154 if (ret != 0) 12155 return ret; 12156 12157 local_tsc = rdtsc(); 12158 stable = !kvm_check_tsc_unstable(); 12159 list_for_each_entry(kvm, &vm_list, vm_list) { 12160 kvm_for_each_vcpu(i, vcpu, kvm) { 12161 if (!stable && vcpu->cpu == smp_processor_id()) 12162 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 12163 if (stable && vcpu->arch.last_host_tsc > local_tsc) { 12164 backwards_tsc = true; 12165 if (vcpu->arch.last_host_tsc > max_tsc) 12166 max_tsc = vcpu->arch.last_host_tsc; 12167 } 12168 } 12169 } 12170 12171 /* 12172 * Sometimes, even reliable TSCs go backwards. This happens on 12173 * platforms that reset TSC during suspend or hibernate actions, but 12174 * maintain synchronization. We must compensate. Fortunately, we can 12175 * detect that condition here, which happens early in CPU bringup, 12176 * before any KVM threads can be running. Unfortunately, we can't 12177 * bring the TSCs fully up to date with real time, as we aren't yet far 12178 * enough into CPU bringup that we know how much real time has actually 12179 * elapsed; our helper function, ktime_get_boottime_ns() will be using boot 12180 * variables that haven't been updated yet. 12181 * 12182 * So we simply find the maximum observed TSC above, then record the 12183 * adjustment to TSC in each VCPU. When the VCPU later gets loaded, 12184 * the adjustment will be applied. Note that we accumulate 12185 * adjustments, in case multiple suspend cycles happen before some VCPU 12186 * gets a chance to run again. In the event that no KVM threads get a 12187 * chance to run, we will miss the entire elapsed period, as we'll have 12188 * reset last_host_tsc, so VCPUs will not have the TSC adjusted and may 12189 * loose cycle time. This isn't too big a deal, since the loss will be 12190 * uniform across all VCPUs (not to mention the scenario is extremely 12191 * unlikely). It is possible that a second hibernate recovery happens 12192 * much faster than a first, causing the observed TSC here to be 12193 * smaller; this would require additional padding adjustment, which is 12194 * why we set last_host_tsc to the local tsc observed here. 12195 * 12196 * N.B. - this code below runs only on platforms with reliable TSC, 12197 * as that is the only way backwards_tsc is set above. Also note 12198 * that this runs for ALL vcpus, which is not a bug; all VCPUs should 12199 * have the same delta_cyc adjustment applied if backwards_tsc 12200 * is detected. Note further, this adjustment is only done once, 12201 * as we reset last_host_tsc on all VCPUs to stop this from being 12202 * called multiple times (one for each physical CPU bringup). 12203 * 12204 * Platforms with unreliable TSCs don't have to deal with this, they 12205 * will be compensated by the logic in vcpu_load, which sets the TSC to 12206 * catchup mode. This will catchup all VCPUs to real time, but cannot 12207 * guarantee that they stay in perfect synchronization. 12208 */ 12209 if (backwards_tsc) { 12210 u64 delta_cyc = max_tsc - local_tsc; 12211 list_for_each_entry(kvm, &vm_list, vm_list) { 12212 kvm->arch.backwards_tsc_observed = true; 12213 kvm_for_each_vcpu(i, vcpu, kvm) { 12214 vcpu->arch.tsc_offset_adjustment += delta_cyc; 12215 vcpu->arch.last_host_tsc = local_tsc; 12216 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); 12217 } 12218 12219 /* 12220 * We have to disable TSC offset matching.. if you were 12221 * booting a VM while issuing an S4 host suspend.... 12222 * you may have some problem. Solving this issue is 12223 * left as an exercise to the reader. 12224 */ 12225 kvm->arch.last_tsc_nsec = 0; 12226 kvm->arch.last_tsc_write = 0; 12227 } 12228 12229 } 12230 return 0; 12231 } 12232 12233 void kvm_arch_hardware_disable(void) 12234 { 12235 static_call(kvm_x86_hardware_disable)(); 12236 drop_user_return_notifiers(); 12237 } 12238 12239 static inline void kvm_ops_update(struct kvm_x86_init_ops *ops) 12240 { 12241 memcpy(&kvm_x86_ops, ops->runtime_ops, sizeof(kvm_x86_ops)); 12242 12243 #define __KVM_X86_OP(func) \ 12244 static_call_update(kvm_x86_##func, kvm_x86_ops.func); 12245 #define KVM_X86_OP(func) \ 12246 WARN_ON(!kvm_x86_ops.func); __KVM_X86_OP(func) 12247 #define KVM_X86_OP_OPTIONAL __KVM_X86_OP 12248 #define KVM_X86_OP_OPTIONAL_RET0(func) \ 12249 static_call_update(kvm_x86_##func, (void *)kvm_x86_ops.func ? : \ 12250 (void *)__static_call_return0); 12251 #include <asm/kvm-x86-ops.h> 12252 #undef __KVM_X86_OP 12253 12254 kvm_pmu_ops_update(ops->pmu_ops); 12255 } 12256 12257 int kvm_arch_hardware_setup(void *opaque) 12258 { 12259 struct kvm_x86_init_ops *ops = opaque; 12260 int r; 12261 12262 rdmsrl_safe(MSR_EFER, &host_efer); 12263 12264 if (boot_cpu_has(X86_FEATURE_XSAVES)) 12265 rdmsrl(MSR_IA32_XSS, host_xss); 12266 12267 kvm_init_pmu_capability(); 12268 12269 r = ops->hardware_setup(); 12270 if (r != 0) 12271 return r; 12272 12273 kvm_ops_update(ops); 12274 12275 kvm_register_perf_callbacks(ops->handle_intel_pt_intr); 12276 12277 if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES)) 12278 kvm_caps.supported_xss = 0; 12279 12280 #define __kvm_cpu_cap_has(UNUSED_, f) kvm_cpu_cap_has(f) 12281 cr4_reserved_bits = __cr4_reserved_bits(__kvm_cpu_cap_has, UNUSED_); 12282 #undef __kvm_cpu_cap_has 12283 12284 if (kvm_caps.has_tsc_control) { 12285 /* 12286 * Make sure the user can only configure tsc_khz values that 12287 * fit into a signed integer. 12288 * A min value is not calculated because it will always 12289 * be 1 on all machines. 12290 */ 12291 u64 max = min(0x7fffffffULL, 12292 __scale_tsc(kvm_caps.max_tsc_scaling_ratio, tsc_khz)); 12293 kvm_caps.max_guest_tsc_khz = max; 12294 } 12295 kvm_caps.default_tsc_scaling_ratio = 1ULL << kvm_caps.tsc_scaling_ratio_frac_bits; 12296 kvm_init_msr_list(); 12297 return 0; 12298 } 12299 12300 void kvm_arch_hardware_unsetup(void) 12301 { 12302 kvm_unregister_perf_callbacks(); 12303 12304 static_call(kvm_x86_hardware_unsetup)(); 12305 } 12306 12307 int kvm_arch_check_processor_compat(void *opaque) 12308 { 12309 struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); 12310 struct kvm_x86_init_ops *ops = opaque; 12311 12312 WARN_ON(!irqs_disabled()); 12313 12314 if (__cr4_reserved_bits(cpu_has, c) != 12315 __cr4_reserved_bits(cpu_has, &boot_cpu_data)) 12316 return -EIO; 12317 12318 return ops->check_processor_compatibility(); 12319 } 12320 12321 bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu) 12322 { 12323 return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id; 12324 } 12325 EXPORT_SYMBOL_GPL(kvm_vcpu_is_reset_bsp); 12326 12327 bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu) 12328 { 12329 return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0; 12330 } 12331 12332 __read_mostly DEFINE_STATIC_KEY_FALSE(kvm_has_noapic_vcpu); 12333 EXPORT_SYMBOL_GPL(kvm_has_noapic_vcpu); 12334 12335 void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) 12336 { 12337 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 12338 12339 vcpu->arch.l1tf_flush_l1d = true; 12340 if (pmu->version && unlikely(pmu->event_count)) { 12341 pmu->need_cleanup = true; 12342 kvm_make_request(KVM_REQ_PMU, vcpu); 12343 } 12344 static_call(kvm_x86_sched_in)(vcpu, cpu); 12345 } 12346 12347 void kvm_arch_free_vm(struct kvm *kvm) 12348 { 12349 kfree(to_kvm_hv(kvm)->hv_pa_pg); 12350 __kvm_arch_free_vm(kvm); 12351 } 12352 12353 12354 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 12355 { 12356 int ret; 12357 unsigned long flags; 12358 12359 if (type) 12360 return -EINVAL; 12361 12362 ret = kvm_page_track_init(kvm); 12363 if (ret) 12364 goto out; 12365 12366 ret = kvm_mmu_init_vm(kvm); 12367 if (ret) 12368 goto out_page_track; 12369 12370 ret = static_call(kvm_x86_vm_init)(kvm); 12371 if (ret) 12372 goto out_uninit_mmu; 12373 12374 INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list); 12375 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); 12376 atomic_set(&kvm->arch.noncoherent_dma_count, 0); 12377 12378 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ 12379 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); 12380 /* Reserve bit 1 of irq_sources_bitmap for irqfd-resampler */ 12381 set_bit(KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, 12382 &kvm->arch.irq_sources_bitmap); 12383 12384 raw_spin_lock_init(&kvm->arch.tsc_write_lock); 12385 mutex_init(&kvm->arch.apic_map_lock); 12386 seqcount_raw_spinlock_init(&kvm->arch.pvclock_sc, &kvm->arch.tsc_write_lock); 12387 kvm->arch.kvmclock_offset = -get_kvmclock_base_ns(); 12388 12389 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); 12390 pvclock_update_vm_gtod_copy(kvm); 12391 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); 12392 12393 kvm->arch.default_tsc_khz = max_tsc_khz ? : tsc_khz; 12394 kvm->arch.guest_can_read_msr_platform_info = true; 12395 kvm->arch.enable_pmu = enable_pmu; 12396 12397 #if IS_ENABLED(CONFIG_HYPERV) 12398 spin_lock_init(&kvm->arch.hv_root_tdp_lock); 12399 kvm->arch.hv_root_tdp = INVALID_PAGE; 12400 #endif 12401 12402 INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn); 12403 INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn); 12404 12405 kvm_apicv_init(kvm); 12406 kvm_hv_init_vm(kvm); 12407 kvm_xen_init_vm(kvm); 12408 12409 return 0; 12410 12411 out_uninit_mmu: 12412 kvm_mmu_uninit_vm(kvm); 12413 out_page_track: 12414 kvm_page_track_cleanup(kvm); 12415 out: 12416 return ret; 12417 } 12418 12419 int kvm_arch_post_init_vm(struct kvm *kvm) 12420 { 12421 return kvm_mmu_post_init_vm(kvm); 12422 } 12423 12424 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu) 12425 { 12426 vcpu_load(vcpu); 12427 kvm_mmu_unload(vcpu); 12428 vcpu_put(vcpu); 12429 } 12430 12431 static void kvm_unload_vcpu_mmus(struct kvm *kvm) 12432 { 12433 unsigned long i; 12434 struct kvm_vcpu *vcpu; 12435 12436 kvm_for_each_vcpu(i, vcpu, kvm) { 12437 kvm_clear_async_pf_completion_queue(vcpu); 12438 kvm_unload_vcpu_mmu(vcpu); 12439 } 12440 } 12441 12442 void kvm_arch_sync_events(struct kvm *kvm) 12443 { 12444 cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work); 12445 cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work); 12446 kvm_free_pit(kvm); 12447 } 12448 12449 /** 12450 * __x86_set_memory_region: Setup KVM internal memory slot 12451 * 12452 * @kvm: the kvm pointer to the VM. 12453 * @id: the slot ID to setup. 12454 * @gpa: the GPA to install the slot (unused when @size == 0). 12455 * @size: the size of the slot. Set to zero to uninstall a slot. 12456 * 12457 * This function helps to setup a KVM internal memory slot. Specify 12458 * @size > 0 to install a new slot, while @size == 0 to uninstall a 12459 * slot. The return code can be one of the following: 12460 * 12461 * HVA: on success (uninstall will return a bogus HVA) 12462 * -errno: on error 12463 * 12464 * The caller should always use IS_ERR() to check the return value 12465 * before use. Note, the KVM internal memory slots are guaranteed to 12466 * remain valid and unchanged until the VM is destroyed, i.e., the 12467 * GPA->HVA translation will not change. However, the HVA is a user 12468 * address, i.e. its accessibility is not guaranteed, and must be 12469 * accessed via __copy_{to,from}_user(). 12470 */ 12471 void __user * __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, 12472 u32 size) 12473 { 12474 int i, r; 12475 unsigned long hva, old_npages; 12476 struct kvm_memslots *slots = kvm_memslots(kvm); 12477 struct kvm_memory_slot *slot; 12478 12479 /* Called with kvm->slots_lock held. */ 12480 if (WARN_ON(id >= KVM_MEM_SLOTS_NUM)) 12481 return ERR_PTR_USR(-EINVAL); 12482 12483 slot = id_to_memslot(slots, id); 12484 if (size) { 12485 if (slot && slot->npages) 12486 return ERR_PTR_USR(-EEXIST); 12487 12488 /* 12489 * MAP_SHARED to prevent internal slot pages from being moved 12490 * by fork()/COW. 12491 */ 12492 hva = vm_mmap(NULL, 0, size, PROT_READ | PROT_WRITE, 12493 MAP_SHARED | MAP_ANONYMOUS, 0); 12494 if (IS_ERR((void *)hva)) 12495 return (void __user *)hva; 12496 } else { 12497 if (!slot || !slot->npages) 12498 return NULL; 12499 12500 old_npages = slot->npages; 12501 hva = slot->userspace_addr; 12502 } 12503 12504 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 12505 struct kvm_userspace_memory_region m; 12506 12507 m.slot = id | (i << 16); 12508 m.flags = 0; 12509 m.guest_phys_addr = gpa; 12510 m.userspace_addr = hva; 12511 m.memory_size = size; 12512 r = __kvm_set_memory_region(kvm, &m); 12513 if (r < 0) 12514 return ERR_PTR_USR(r); 12515 } 12516 12517 if (!size) 12518 vm_munmap(hva, old_npages * PAGE_SIZE); 12519 12520 return (void __user *)hva; 12521 } 12522 EXPORT_SYMBOL_GPL(__x86_set_memory_region); 12523 12524 void kvm_arch_pre_destroy_vm(struct kvm *kvm) 12525 { 12526 kvm_mmu_pre_destroy_vm(kvm); 12527 } 12528 12529 void kvm_arch_destroy_vm(struct kvm *kvm) 12530 { 12531 if (current->mm == kvm->mm) { 12532 /* 12533 * Free memory regions allocated on behalf of userspace, 12534 * unless the memory map has changed due to process exit 12535 * or fd copying. 12536 */ 12537 mutex_lock(&kvm->slots_lock); 12538 __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 12539 0, 0); 12540 __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 12541 0, 0); 12542 __x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0); 12543 mutex_unlock(&kvm->slots_lock); 12544 } 12545 kvm_unload_vcpu_mmus(kvm); 12546 static_call_cond(kvm_x86_vm_destroy)(kvm); 12547 kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1)); 12548 kvm_pic_destroy(kvm); 12549 kvm_ioapic_destroy(kvm); 12550 kvm_destroy_vcpus(kvm); 12551 kvfree(rcu_dereference_check(kvm->arch.apic_map, 1)); 12552 kfree(srcu_dereference_check(kvm->arch.pmu_event_filter, &kvm->srcu, 1)); 12553 kvm_mmu_uninit_vm(kvm); 12554 kvm_page_track_cleanup(kvm); 12555 kvm_xen_destroy_vm(kvm); 12556 kvm_hv_destroy_vm(kvm); 12557 } 12558 12559 static void memslot_rmap_free(struct kvm_memory_slot *slot) 12560 { 12561 int i; 12562 12563 for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { 12564 kvfree(slot->arch.rmap[i]); 12565 slot->arch.rmap[i] = NULL; 12566 } 12567 } 12568 12569 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) 12570 { 12571 int i; 12572 12573 memslot_rmap_free(slot); 12574 12575 for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) { 12576 kvfree(slot->arch.lpage_info[i - 1]); 12577 slot->arch.lpage_info[i - 1] = NULL; 12578 } 12579 12580 kvm_page_track_free_memslot(slot); 12581 } 12582 12583 int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages) 12584 { 12585 const int sz = sizeof(*slot->arch.rmap[0]); 12586 int i; 12587 12588 for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { 12589 int level = i + 1; 12590 int lpages = __kvm_mmu_slot_lpages(slot, npages, level); 12591 12592 if (slot->arch.rmap[i]) 12593 continue; 12594 12595 slot->arch.rmap[i] = __vcalloc(lpages, sz, GFP_KERNEL_ACCOUNT); 12596 if (!slot->arch.rmap[i]) { 12597 memslot_rmap_free(slot); 12598 return -ENOMEM; 12599 } 12600 } 12601 12602 return 0; 12603 } 12604 12605 static int kvm_alloc_memslot_metadata(struct kvm *kvm, 12606 struct kvm_memory_slot *slot) 12607 { 12608 unsigned long npages = slot->npages; 12609 int i, r; 12610 12611 /* 12612 * Clear out the previous array pointers for the KVM_MR_MOVE case. The 12613 * old arrays will be freed by __kvm_set_memory_region() if installing 12614 * the new memslot is successful. 12615 */ 12616 memset(&slot->arch, 0, sizeof(slot->arch)); 12617 12618 if (kvm_memslots_have_rmaps(kvm)) { 12619 r = memslot_rmap_alloc(slot, npages); 12620 if (r) 12621 return r; 12622 } 12623 12624 for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) { 12625 struct kvm_lpage_info *linfo; 12626 unsigned long ugfn; 12627 int lpages; 12628 int level = i + 1; 12629 12630 lpages = __kvm_mmu_slot_lpages(slot, npages, level); 12631 12632 linfo = __vcalloc(lpages, sizeof(*linfo), GFP_KERNEL_ACCOUNT); 12633 if (!linfo) 12634 goto out_free; 12635 12636 slot->arch.lpage_info[i - 1] = linfo; 12637 12638 if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1)) 12639 linfo[0].disallow_lpage = 1; 12640 if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1)) 12641 linfo[lpages - 1].disallow_lpage = 1; 12642 ugfn = slot->userspace_addr >> PAGE_SHIFT; 12643 /* 12644 * If the gfn and userspace address are not aligned wrt each 12645 * other, disable large page support for this slot. 12646 */ 12647 if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1)) { 12648 unsigned long j; 12649 12650 for (j = 0; j < lpages; ++j) 12651 linfo[j].disallow_lpage = 1; 12652 } 12653 } 12654 12655 if (kvm_page_track_create_memslot(kvm, slot, npages)) 12656 goto out_free; 12657 12658 return 0; 12659 12660 out_free: 12661 memslot_rmap_free(slot); 12662 12663 for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) { 12664 kvfree(slot->arch.lpage_info[i - 1]); 12665 slot->arch.lpage_info[i - 1] = NULL; 12666 } 12667 return -ENOMEM; 12668 } 12669 12670 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) 12671 { 12672 struct kvm_vcpu *vcpu; 12673 unsigned long i; 12674 12675 /* 12676 * memslots->generation has been incremented. 12677 * mmio generation may have reached its maximum value. 12678 */ 12679 kvm_mmu_invalidate_mmio_sptes(kvm, gen); 12680 12681 /* Force re-initialization of steal_time cache */ 12682 kvm_for_each_vcpu(i, vcpu, kvm) 12683 kvm_vcpu_kick(vcpu); 12684 } 12685 12686 int kvm_arch_prepare_memory_region(struct kvm *kvm, 12687 const struct kvm_memory_slot *old, 12688 struct kvm_memory_slot *new, 12689 enum kvm_mr_change change) 12690 { 12691 if (change == KVM_MR_CREATE || change == KVM_MR_MOVE) { 12692 if ((new->base_gfn + new->npages - 1) > kvm_mmu_max_gfn()) 12693 return -EINVAL; 12694 12695 return kvm_alloc_memslot_metadata(kvm, new); 12696 } 12697 12698 if (change == KVM_MR_FLAGS_ONLY) 12699 memcpy(&new->arch, &old->arch, sizeof(old->arch)); 12700 else if (WARN_ON_ONCE(change != KVM_MR_DELETE)) 12701 return -EIO; 12702 12703 return 0; 12704 } 12705 12706 12707 static void kvm_mmu_update_cpu_dirty_logging(struct kvm *kvm, bool enable) 12708 { 12709 struct kvm_arch *ka = &kvm->arch; 12710 12711 if (!kvm_x86_ops.cpu_dirty_log_size) 12712 return; 12713 12714 if ((enable && ++ka->cpu_dirty_logging_count == 1) || 12715 (!enable && --ka->cpu_dirty_logging_count == 0)) 12716 kvm_make_all_cpus_request(kvm, KVM_REQ_UPDATE_CPU_DIRTY_LOGGING); 12717 12718 WARN_ON_ONCE(ka->cpu_dirty_logging_count < 0); 12719 } 12720 12721 static void kvm_mmu_slot_apply_flags(struct kvm *kvm, 12722 struct kvm_memory_slot *old, 12723 const struct kvm_memory_slot *new, 12724 enum kvm_mr_change change) 12725 { 12726 u32 old_flags = old ? old->flags : 0; 12727 u32 new_flags = new ? new->flags : 0; 12728 bool log_dirty_pages = new_flags & KVM_MEM_LOG_DIRTY_PAGES; 12729 12730 /* 12731 * Update CPU dirty logging if dirty logging is being toggled. This 12732 * applies to all operations. 12733 */ 12734 if ((old_flags ^ new_flags) & KVM_MEM_LOG_DIRTY_PAGES) 12735 kvm_mmu_update_cpu_dirty_logging(kvm, log_dirty_pages); 12736 12737 /* 12738 * Nothing more to do for RO slots (which can't be dirtied and can't be 12739 * made writable) or CREATE/MOVE/DELETE of a slot. 12740 * 12741 * For a memslot with dirty logging disabled: 12742 * CREATE: No dirty mappings will already exist. 12743 * MOVE/DELETE: The old mappings will already have been cleaned up by 12744 * kvm_arch_flush_shadow_memslot() 12745 * 12746 * For a memslot with dirty logging enabled: 12747 * CREATE: No shadow pages exist, thus nothing to write-protect 12748 * and no dirty bits to clear. 12749 * MOVE/DELETE: The old mappings will already have been cleaned up by 12750 * kvm_arch_flush_shadow_memslot(). 12751 */ 12752 if ((change != KVM_MR_FLAGS_ONLY) || (new_flags & KVM_MEM_READONLY)) 12753 return; 12754 12755 /* 12756 * READONLY and non-flags changes were filtered out above, and the only 12757 * other flag is LOG_DIRTY_PAGES, i.e. something is wrong if dirty 12758 * logging isn't being toggled on or off. 12759 */ 12760 if (WARN_ON_ONCE(!((old_flags ^ new_flags) & KVM_MEM_LOG_DIRTY_PAGES))) 12761 return; 12762 12763 if (!log_dirty_pages) { 12764 /* 12765 * Dirty logging tracks sptes in 4k granularity, meaning that 12766 * large sptes have to be split. If live migration succeeds, 12767 * the guest in the source machine will be destroyed and large 12768 * sptes will be created in the destination. However, if the 12769 * guest continues to run in the source machine (for example if 12770 * live migration fails), small sptes will remain around and 12771 * cause bad performance. 12772 * 12773 * Scan sptes if dirty logging has been stopped, dropping those 12774 * which can be collapsed into a single large-page spte. Later 12775 * page faults will create the large-page sptes. 12776 */ 12777 kvm_mmu_zap_collapsible_sptes(kvm, new); 12778 } else { 12779 /* 12780 * Initially-all-set does not require write protecting any page, 12781 * because they're all assumed to be dirty. 12782 */ 12783 if (kvm_dirty_log_manual_protect_and_init_set(kvm)) 12784 return; 12785 12786 if (READ_ONCE(eager_page_split)) 12787 kvm_mmu_slot_try_split_huge_pages(kvm, new, PG_LEVEL_4K); 12788 12789 if (kvm_x86_ops.cpu_dirty_log_size) { 12790 kvm_mmu_slot_leaf_clear_dirty(kvm, new); 12791 kvm_mmu_slot_remove_write_access(kvm, new, PG_LEVEL_2M); 12792 } else { 12793 kvm_mmu_slot_remove_write_access(kvm, new, PG_LEVEL_4K); 12794 } 12795 12796 /* 12797 * Unconditionally flush the TLBs after enabling dirty logging. 12798 * A flush is almost always going to be necessary (see below), 12799 * and unconditionally flushing allows the helpers to omit 12800 * the subtly complex checks when removing write access. 12801 * 12802 * Do the flush outside of mmu_lock to reduce the amount of 12803 * time mmu_lock is held. Flushing after dropping mmu_lock is 12804 * safe as KVM only needs to guarantee the slot is fully 12805 * write-protected before returning to userspace, i.e. before 12806 * userspace can consume the dirty status. 12807 * 12808 * Flushing outside of mmu_lock requires KVM to be careful when 12809 * making decisions based on writable status of an SPTE, e.g. a 12810 * !writable SPTE doesn't guarantee a CPU can't perform writes. 12811 * 12812 * Specifically, KVM also write-protects guest page tables to 12813 * monitor changes when using shadow paging, and must guarantee 12814 * no CPUs can write to those page before mmu_lock is dropped. 12815 * Because CPUs may have stale TLB entries at this point, a 12816 * !writable SPTE doesn't guarantee CPUs can't perform writes. 12817 * 12818 * KVM also allows making SPTES writable outside of mmu_lock, 12819 * e.g. to allow dirty logging without taking mmu_lock. 12820 * 12821 * To handle these scenarios, KVM uses a separate software-only 12822 * bit (MMU-writable) to track if a SPTE is !writable due to 12823 * a guest page table being write-protected (KVM clears the 12824 * MMU-writable flag when write-protecting for shadow paging). 12825 * 12826 * The use of MMU-writable is also the primary motivation for 12827 * the unconditional flush. Because KVM must guarantee that a 12828 * CPU doesn't contain stale, writable TLB entries for a 12829 * !MMU-writable SPTE, KVM must flush if it encounters any 12830 * MMU-writable SPTE regardless of whether the actual hardware 12831 * writable bit was set. I.e. KVM is almost guaranteed to need 12832 * to flush, while unconditionally flushing allows the "remove 12833 * write access" helpers to ignore MMU-writable entirely. 12834 * 12835 * See is_writable_pte() for more details (the case involving 12836 * access-tracked SPTEs is particularly relevant). 12837 */ 12838 kvm_arch_flush_remote_tlbs_memslot(kvm, new); 12839 } 12840 } 12841 12842 void kvm_arch_commit_memory_region(struct kvm *kvm, 12843 struct kvm_memory_slot *old, 12844 const struct kvm_memory_slot *new, 12845 enum kvm_mr_change change) 12846 { 12847 if (!kvm->arch.n_requested_mmu_pages && 12848 (change == KVM_MR_CREATE || change == KVM_MR_DELETE)) { 12849 unsigned long nr_mmu_pages; 12850 12851 nr_mmu_pages = kvm->nr_memslot_pages / KVM_MEMSLOT_PAGES_TO_MMU_PAGES_RATIO; 12852 nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES); 12853 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); 12854 } 12855 12856 kvm_mmu_slot_apply_flags(kvm, old, new, change); 12857 12858 /* Free the arrays associated with the old memslot. */ 12859 if (change == KVM_MR_MOVE) 12860 kvm_arch_free_memslot(kvm, old); 12861 } 12862 12863 void kvm_arch_flush_shadow_all(struct kvm *kvm) 12864 { 12865 kvm_mmu_zap_all(kvm); 12866 } 12867 12868 void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 12869 struct kvm_memory_slot *slot) 12870 { 12871 kvm_page_track_flush_slot(kvm, slot); 12872 } 12873 12874 static inline bool kvm_guest_apic_has_interrupt(struct kvm_vcpu *vcpu) 12875 { 12876 return (is_guest_mode(vcpu) && 12877 static_call(kvm_x86_guest_apic_has_interrupt)(vcpu)); 12878 } 12879 12880 static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) 12881 { 12882 if (!list_empty_careful(&vcpu->async_pf.done)) 12883 return true; 12884 12885 if (kvm_apic_has_pending_init_or_sipi(vcpu) && 12886 kvm_apic_init_sipi_allowed(vcpu)) 12887 return true; 12888 12889 if (vcpu->arch.pv.pv_unhalted) 12890 return true; 12891 12892 if (kvm_is_exception_pending(vcpu)) 12893 return true; 12894 12895 if (kvm_test_request(KVM_REQ_NMI, vcpu) || 12896 (vcpu->arch.nmi_pending && 12897 static_call(kvm_x86_nmi_allowed)(vcpu, false))) 12898 return true; 12899 12900 if (kvm_test_request(KVM_REQ_SMI, vcpu) || 12901 (vcpu->arch.smi_pending && 12902 static_call(kvm_x86_smi_allowed)(vcpu, false))) 12903 return true; 12904 12905 if (kvm_arch_interrupt_allowed(vcpu) && 12906 (kvm_cpu_has_interrupt(vcpu) || 12907 kvm_guest_apic_has_interrupt(vcpu))) 12908 return true; 12909 12910 if (kvm_hv_has_stimer_pending(vcpu)) 12911 return true; 12912 12913 if (is_guest_mode(vcpu) && 12914 kvm_x86_ops.nested_ops->has_events && 12915 kvm_x86_ops.nested_ops->has_events(vcpu)) 12916 return true; 12917 12918 if (kvm_xen_has_pending_events(vcpu)) 12919 return true; 12920 12921 return false; 12922 } 12923 12924 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) 12925 { 12926 return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu); 12927 } 12928 12929 bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu) 12930 { 12931 if (kvm_vcpu_apicv_active(vcpu) && 12932 static_call(kvm_x86_dy_apicv_has_pending_interrupt)(vcpu)) 12933 return true; 12934 12935 return false; 12936 } 12937 12938 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) 12939 { 12940 if (READ_ONCE(vcpu->arch.pv.pv_unhalted)) 12941 return true; 12942 12943 if (kvm_test_request(KVM_REQ_NMI, vcpu) || 12944 kvm_test_request(KVM_REQ_SMI, vcpu) || 12945 kvm_test_request(KVM_REQ_EVENT, vcpu)) 12946 return true; 12947 12948 return kvm_arch_dy_has_pending_interrupt(vcpu); 12949 } 12950 12951 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) 12952 { 12953 if (vcpu->arch.guest_state_protected) 12954 return true; 12955 12956 return vcpu->arch.preempted_in_kernel; 12957 } 12958 12959 unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu) 12960 { 12961 return kvm_rip_read(vcpu); 12962 } 12963 12964 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) 12965 { 12966 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; 12967 } 12968 12969 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu) 12970 { 12971 return static_call(kvm_x86_interrupt_allowed)(vcpu, false); 12972 } 12973 12974 unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu) 12975 { 12976 /* Can't read the RIP when guest state is protected, just return 0 */ 12977 if (vcpu->arch.guest_state_protected) 12978 return 0; 12979 12980 if (is_64_bit_mode(vcpu)) 12981 return kvm_rip_read(vcpu); 12982 return (u32)(get_segment_base(vcpu, VCPU_SREG_CS) + 12983 kvm_rip_read(vcpu)); 12984 } 12985 EXPORT_SYMBOL_GPL(kvm_get_linear_rip); 12986 12987 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip) 12988 { 12989 return kvm_get_linear_rip(vcpu) == linear_rip; 12990 } 12991 EXPORT_SYMBOL_GPL(kvm_is_linear_rip); 12992 12993 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu) 12994 { 12995 unsigned long rflags; 12996 12997 rflags = static_call(kvm_x86_get_rflags)(vcpu); 12998 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) 12999 rflags &= ~X86_EFLAGS_TF; 13000 return rflags; 13001 } 13002 EXPORT_SYMBOL_GPL(kvm_get_rflags); 13003 13004 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) 13005 { 13006 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP && 13007 kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) 13008 rflags |= X86_EFLAGS_TF; 13009 static_call(kvm_x86_set_rflags)(vcpu, rflags); 13010 } 13011 13012 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) 13013 { 13014 __kvm_set_rflags(vcpu, rflags); 13015 kvm_make_request(KVM_REQ_EVENT, vcpu); 13016 } 13017 EXPORT_SYMBOL_GPL(kvm_set_rflags); 13018 13019 static inline u32 kvm_async_pf_hash_fn(gfn_t gfn) 13020 { 13021 BUILD_BUG_ON(!is_power_of_2(ASYNC_PF_PER_VCPU)); 13022 13023 return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU)); 13024 } 13025 13026 static inline u32 kvm_async_pf_next_probe(u32 key) 13027 { 13028 return (key + 1) & (ASYNC_PF_PER_VCPU - 1); 13029 } 13030 13031 static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 13032 { 13033 u32 key = kvm_async_pf_hash_fn(gfn); 13034 13035 while (vcpu->arch.apf.gfns[key] != ~0) 13036 key = kvm_async_pf_next_probe(key); 13037 13038 vcpu->arch.apf.gfns[key] = gfn; 13039 } 13040 13041 static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn) 13042 { 13043 int i; 13044 u32 key = kvm_async_pf_hash_fn(gfn); 13045 13046 for (i = 0; i < ASYNC_PF_PER_VCPU && 13047 (vcpu->arch.apf.gfns[key] != gfn && 13048 vcpu->arch.apf.gfns[key] != ~0); i++) 13049 key = kvm_async_pf_next_probe(key); 13050 13051 return key; 13052 } 13053 13054 bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 13055 { 13056 return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn; 13057 } 13058 13059 static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 13060 { 13061 u32 i, j, k; 13062 13063 i = j = kvm_async_pf_gfn_slot(vcpu, gfn); 13064 13065 if (WARN_ON_ONCE(vcpu->arch.apf.gfns[i] != gfn)) 13066 return; 13067 13068 while (true) { 13069 vcpu->arch.apf.gfns[i] = ~0; 13070 do { 13071 j = kvm_async_pf_next_probe(j); 13072 if (vcpu->arch.apf.gfns[j] == ~0) 13073 return; 13074 k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]); 13075 /* 13076 * k lies cyclically in ]i,j] 13077 * | i.k.j | 13078 * |....j i.k.| or |.k..j i...| 13079 */ 13080 } while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j)); 13081 vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j]; 13082 i = j; 13083 } 13084 } 13085 13086 static inline int apf_put_user_notpresent(struct kvm_vcpu *vcpu) 13087 { 13088 u32 reason = KVM_PV_REASON_PAGE_NOT_PRESENT; 13089 13090 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &reason, 13091 sizeof(reason)); 13092 } 13093 13094 static inline int apf_put_user_ready(struct kvm_vcpu *vcpu, u32 token) 13095 { 13096 unsigned int offset = offsetof(struct kvm_vcpu_pv_apf_data, token); 13097 13098 return kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data, 13099 &token, offset, sizeof(token)); 13100 } 13101 13102 static inline bool apf_pageready_slot_free(struct kvm_vcpu *vcpu) 13103 { 13104 unsigned int offset = offsetof(struct kvm_vcpu_pv_apf_data, token); 13105 u32 val; 13106 13107 if (kvm_read_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data, 13108 &val, offset, sizeof(val))) 13109 return false; 13110 13111 return !val; 13112 } 13113 13114 static bool kvm_can_deliver_async_pf(struct kvm_vcpu *vcpu) 13115 { 13116 13117 if (!kvm_pv_async_pf_enabled(vcpu)) 13118 return false; 13119 13120 if (vcpu->arch.apf.send_user_only && 13121 static_call(kvm_x86_get_cpl)(vcpu) == 0) 13122 return false; 13123 13124 if (is_guest_mode(vcpu)) { 13125 /* 13126 * L1 needs to opt into the special #PF vmexits that are 13127 * used to deliver async page faults. 13128 */ 13129 return vcpu->arch.apf.delivery_as_pf_vmexit; 13130 } else { 13131 /* 13132 * Play it safe in case the guest temporarily disables paging. 13133 * The real mode IDT in particular is unlikely to have a #PF 13134 * exception setup. 13135 */ 13136 return is_paging(vcpu); 13137 } 13138 } 13139 13140 bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu) 13141 { 13142 if (unlikely(!lapic_in_kernel(vcpu) || 13143 kvm_event_needs_reinjection(vcpu) || 13144 kvm_is_exception_pending(vcpu))) 13145 return false; 13146 13147 if (kvm_hlt_in_guest(vcpu->kvm) && !kvm_can_deliver_async_pf(vcpu)) 13148 return false; 13149 13150 /* 13151 * If interrupts are off we cannot even use an artificial 13152 * halt state. 13153 */ 13154 return kvm_arch_interrupt_allowed(vcpu); 13155 } 13156 13157 bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, 13158 struct kvm_async_pf *work) 13159 { 13160 struct x86_exception fault; 13161 13162 trace_kvm_async_pf_not_present(work->arch.token, work->cr2_or_gpa); 13163 kvm_add_async_pf_gfn(vcpu, work->arch.gfn); 13164 13165 if (kvm_can_deliver_async_pf(vcpu) && 13166 !apf_put_user_notpresent(vcpu)) { 13167 fault.vector = PF_VECTOR; 13168 fault.error_code_valid = true; 13169 fault.error_code = 0; 13170 fault.nested_page_fault = false; 13171 fault.address = work->arch.token; 13172 fault.async_page_fault = true; 13173 kvm_inject_page_fault(vcpu, &fault); 13174 return true; 13175 } else { 13176 /* 13177 * It is not possible to deliver a paravirtualized asynchronous 13178 * page fault, but putting the guest in an artificial halt state 13179 * can be beneficial nevertheless: if an interrupt arrives, we 13180 * can deliver it timely and perhaps the guest will schedule 13181 * another process. When the instruction that triggered a page 13182 * fault is retried, hopefully the page will be ready in the host. 13183 */ 13184 kvm_make_request(KVM_REQ_APF_HALT, vcpu); 13185 return false; 13186 } 13187 } 13188 13189 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, 13190 struct kvm_async_pf *work) 13191 { 13192 struct kvm_lapic_irq irq = { 13193 .delivery_mode = APIC_DM_FIXED, 13194 .vector = vcpu->arch.apf.vec 13195 }; 13196 13197 if (work->wakeup_all) 13198 work->arch.token = ~0; /* broadcast wakeup */ 13199 else 13200 kvm_del_async_pf_gfn(vcpu, work->arch.gfn); 13201 trace_kvm_async_pf_ready(work->arch.token, work->cr2_or_gpa); 13202 13203 if ((work->wakeup_all || work->notpresent_injected) && 13204 kvm_pv_async_pf_enabled(vcpu) && 13205 !apf_put_user_ready(vcpu, work->arch.token)) { 13206 vcpu->arch.apf.pageready_pending = true; 13207 kvm_apic_set_irq(vcpu, &irq, NULL); 13208 } 13209 13210 vcpu->arch.apf.halted = false; 13211 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 13212 } 13213 13214 void kvm_arch_async_page_present_queued(struct kvm_vcpu *vcpu) 13215 { 13216 kvm_make_request(KVM_REQ_APF_READY, vcpu); 13217 if (!vcpu->arch.apf.pageready_pending) 13218 kvm_vcpu_kick(vcpu); 13219 } 13220 13221 bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu) 13222 { 13223 if (!kvm_pv_async_pf_enabled(vcpu)) 13224 return true; 13225 else 13226 return kvm_lapic_enabled(vcpu) && apf_pageready_slot_free(vcpu); 13227 } 13228 13229 void kvm_arch_start_assignment(struct kvm *kvm) 13230 { 13231 if (atomic_inc_return(&kvm->arch.assigned_device_count) == 1) 13232 static_call_cond(kvm_x86_pi_start_assignment)(kvm); 13233 } 13234 EXPORT_SYMBOL_GPL(kvm_arch_start_assignment); 13235 13236 void kvm_arch_end_assignment(struct kvm *kvm) 13237 { 13238 atomic_dec(&kvm->arch.assigned_device_count); 13239 } 13240 EXPORT_SYMBOL_GPL(kvm_arch_end_assignment); 13241 13242 bool noinstr kvm_arch_has_assigned_device(struct kvm *kvm) 13243 { 13244 return arch_atomic_read(&kvm->arch.assigned_device_count); 13245 } 13246 EXPORT_SYMBOL_GPL(kvm_arch_has_assigned_device); 13247 13248 void kvm_arch_register_noncoherent_dma(struct kvm *kvm) 13249 { 13250 atomic_inc(&kvm->arch.noncoherent_dma_count); 13251 } 13252 EXPORT_SYMBOL_GPL(kvm_arch_register_noncoherent_dma); 13253 13254 void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm) 13255 { 13256 atomic_dec(&kvm->arch.noncoherent_dma_count); 13257 } 13258 EXPORT_SYMBOL_GPL(kvm_arch_unregister_noncoherent_dma); 13259 13260 bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) 13261 { 13262 return atomic_read(&kvm->arch.noncoherent_dma_count); 13263 } 13264 EXPORT_SYMBOL_GPL(kvm_arch_has_noncoherent_dma); 13265 13266 bool kvm_arch_has_irq_bypass(void) 13267 { 13268 return true; 13269 } 13270 13271 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons, 13272 struct irq_bypass_producer *prod) 13273 { 13274 struct kvm_kernel_irqfd *irqfd = 13275 container_of(cons, struct kvm_kernel_irqfd, consumer); 13276 int ret; 13277 13278 irqfd->producer = prod; 13279 kvm_arch_start_assignment(irqfd->kvm); 13280 ret = static_call(kvm_x86_pi_update_irte)(irqfd->kvm, 13281 prod->irq, irqfd->gsi, 1); 13282 13283 if (ret) 13284 kvm_arch_end_assignment(irqfd->kvm); 13285 13286 return ret; 13287 } 13288 13289 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, 13290 struct irq_bypass_producer *prod) 13291 { 13292 int ret; 13293 struct kvm_kernel_irqfd *irqfd = 13294 container_of(cons, struct kvm_kernel_irqfd, consumer); 13295 13296 WARN_ON(irqfd->producer != prod); 13297 irqfd->producer = NULL; 13298 13299 /* 13300 * When producer of consumer is unregistered, we change back to 13301 * remapped mode, so we can re-use the current implementation 13302 * when the irq is masked/disabled or the consumer side (KVM 13303 * int this case doesn't want to receive the interrupts. 13304 */ 13305 ret = static_call(kvm_x86_pi_update_irte)(irqfd->kvm, prod->irq, irqfd->gsi, 0); 13306 if (ret) 13307 printk(KERN_INFO "irq bypass consumer (token %p) unregistration" 13308 " fails: %d\n", irqfd->consumer.token, ret); 13309 13310 kvm_arch_end_assignment(irqfd->kvm); 13311 } 13312 13313 int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, 13314 uint32_t guest_irq, bool set) 13315 { 13316 return static_call(kvm_x86_pi_update_irte)(kvm, host_irq, guest_irq, set); 13317 } 13318 13319 bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *old, 13320 struct kvm_kernel_irq_routing_entry *new) 13321 { 13322 if (new->type != KVM_IRQ_ROUTING_MSI) 13323 return true; 13324 13325 return !!memcmp(&old->msi, &new->msi, sizeof(new->msi)); 13326 } 13327 13328 bool kvm_vector_hashing_enabled(void) 13329 { 13330 return vector_hashing; 13331 } 13332 13333 bool kvm_arch_no_poll(struct kvm_vcpu *vcpu) 13334 { 13335 return (vcpu->arch.msr_kvm_poll_control & 1) == 0; 13336 } 13337 EXPORT_SYMBOL_GPL(kvm_arch_no_poll); 13338 13339 13340 int kvm_spec_ctrl_test_value(u64 value) 13341 { 13342 /* 13343 * test that setting IA32_SPEC_CTRL to given value 13344 * is allowed by the host processor 13345 */ 13346 13347 u64 saved_value; 13348 unsigned long flags; 13349 int ret = 0; 13350 13351 local_irq_save(flags); 13352 13353 if (rdmsrl_safe(MSR_IA32_SPEC_CTRL, &saved_value)) 13354 ret = 1; 13355 else if (wrmsrl_safe(MSR_IA32_SPEC_CTRL, value)) 13356 ret = 1; 13357 else 13358 wrmsrl(MSR_IA32_SPEC_CTRL, saved_value); 13359 13360 local_irq_restore(flags); 13361 13362 return ret; 13363 } 13364 EXPORT_SYMBOL_GPL(kvm_spec_ctrl_test_value); 13365 13366 void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code) 13367 { 13368 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 13369 struct x86_exception fault; 13370 u64 access = error_code & 13371 (PFERR_WRITE_MASK | PFERR_FETCH_MASK | PFERR_USER_MASK); 13372 13373 if (!(error_code & PFERR_PRESENT_MASK) || 13374 mmu->gva_to_gpa(vcpu, mmu, gva, access, &fault) != INVALID_GPA) { 13375 /* 13376 * If vcpu->arch.walk_mmu->gva_to_gpa succeeded, the page 13377 * tables probably do not match the TLB. Just proceed 13378 * with the error code that the processor gave. 13379 */ 13380 fault.vector = PF_VECTOR; 13381 fault.error_code_valid = true; 13382 fault.error_code = error_code; 13383 fault.nested_page_fault = false; 13384 fault.address = gva; 13385 fault.async_page_fault = false; 13386 } 13387 vcpu->arch.walk_mmu->inject_page_fault(vcpu, &fault); 13388 } 13389 EXPORT_SYMBOL_GPL(kvm_fixup_and_inject_pf_error); 13390 13391 /* 13392 * Handles kvm_read/write_guest_virt*() result and either injects #PF or returns 13393 * KVM_EXIT_INTERNAL_ERROR for cases not currently handled by KVM. Return value 13394 * indicates whether exit to userspace is needed. 13395 */ 13396 int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r, 13397 struct x86_exception *e) 13398 { 13399 if (r == X86EMUL_PROPAGATE_FAULT) { 13400 kvm_inject_emulated_page_fault(vcpu, e); 13401 return 1; 13402 } 13403 13404 /* 13405 * In case kvm_read/write_guest_virt*() failed with X86EMUL_IO_NEEDED 13406 * while handling a VMX instruction KVM could've handled the request 13407 * correctly by exiting to userspace and performing I/O but there 13408 * doesn't seem to be a real use-case behind such requests, just return 13409 * KVM_EXIT_INTERNAL_ERROR for now. 13410 */ 13411 kvm_prepare_emulation_failure_exit(vcpu); 13412 13413 return 0; 13414 } 13415 EXPORT_SYMBOL_GPL(kvm_handle_memory_failure); 13416 13417 int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva) 13418 { 13419 bool pcid_enabled; 13420 struct x86_exception e; 13421 struct { 13422 u64 pcid; 13423 u64 gla; 13424 } operand; 13425 int r; 13426 13427 r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e); 13428 if (r != X86EMUL_CONTINUE) 13429 return kvm_handle_memory_failure(vcpu, r, &e); 13430 13431 if (operand.pcid >> 12 != 0) { 13432 kvm_inject_gp(vcpu, 0); 13433 return 1; 13434 } 13435 13436 pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE); 13437 13438 switch (type) { 13439 case INVPCID_TYPE_INDIV_ADDR: 13440 if ((!pcid_enabled && (operand.pcid != 0)) || 13441 is_noncanonical_address(operand.gla, vcpu)) { 13442 kvm_inject_gp(vcpu, 0); 13443 return 1; 13444 } 13445 kvm_mmu_invpcid_gva(vcpu, operand.gla, operand.pcid); 13446 return kvm_skip_emulated_instruction(vcpu); 13447 13448 case INVPCID_TYPE_SINGLE_CTXT: 13449 if (!pcid_enabled && (operand.pcid != 0)) { 13450 kvm_inject_gp(vcpu, 0); 13451 return 1; 13452 } 13453 13454 kvm_invalidate_pcid(vcpu, operand.pcid); 13455 return kvm_skip_emulated_instruction(vcpu); 13456 13457 case INVPCID_TYPE_ALL_NON_GLOBAL: 13458 /* 13459 * Currently, KVM doesn't mark global entries in the shadow 13460 * page tables, so a non-global flush just degenerates to a 13461 * global flush. If needed, we could optimize this later by 13462 * keeping track of global entries in shadow page tables. 13463 */ 13464 13465 fallthrough; 13466 case INVPCID_TYPE_ALL_INCL_GLOBAL: 13467 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 13468 return kvm_skip_emulated_instruction(vcpu); 13469 13470 default: 13471 kvm_inject_gp(vcpu, 0); 13472 return 1; 13473 } 13474 } 13475 EXPORT_SYMBOL_GPL(kvm_handle_invpcid); 13476 13477 static int complete_sev_es_emulated_mmio(struct kvm_vcpu *vcpu) 13478 { 13479 struct kvm_run *run = vcpu->run; 13480 struct kvm_mmio_fragment *frag; 13481 unsigned int len; 13482 13483 BUG_ON(!vcpu->mmio_needed); 13484 13485 /* Complete previous fragment */ 13486 frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment]; 13487 len = min(8u, frag->len); 13488 if (!vcpu->mmio_is_write) 13489 memcpy(frag->data, run->mmio.data, len); 13490 13491 if (frag->len <= 8) { 13492 /* Switch to the next fragment. */ 13493 frag++; 13494 vcpu->mmio_cur_fragment++; 13495 } else { 13496 /* Go forward to the next mmio piece. */ 13497 frag->data += len; 13498 frag->gpa += len; 13499 frag->len -= len; 13500 } 13501 13502 if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) { 13503 vcpu->mmio_needed = 0; 13504 13505 // VMG change, at this point, we're always done 13506 // RIP has already been advanced 13507 return 1; 13508 } 13509 13510 // More MMIO is needed 13511 run->mmio.phys_addr = frag->gpa; 13512 run->mmio.len = min(8u, frag->len); 13513 run->mmio.is_write = vcpu->mmio_is_write; 13514 if (run->mmio.is_write) 13515 memcpy(run->mmio.data, frag->data, min(8u, frag->len)); 13516 run->exit_reason = KVM_EXIT_MMIO; 13517 13518 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio; 13519 13520 return 0; 13521 } 13522 13523 int kvm_sev_es_mmio_write(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes, 13524 void *data) 13525 { 13526 int handled; 13527 struct kvm_mmio_fragment *frag; 13528 13529 if (!data) 13530 return -EINVAL; 13531 13532 handled = write_emultor.read_write_mmio(vcpu, gpa, bytes, data); 13533 if (handled == bytes) 13534 return 1; 13535 13536 bytes -= handled; 13537 gpa += handled; 13538 data += handled; 13539 13540 /*TODO: Check if need to increment number of frags */ 13541 frag = vcpu->mmio_fragments; 13542 vcpu->mmio_nr_fragments = 1; 13543 frag->len = bytes; 13544 frag->gpa = gpa; 13545 frag->data = data; 13546 13547 vcpu->mmio_needed = 1; 13548 vcpu->mmio_cur_fragment = 0; 13549 13550 vcpu->run->mmio.phys_addr = gpa; 13551 vcpu->run->mmio.len = min(8u, frag->len); 13552 vcpu->run->mmio.is_write = 1; 13553 memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); 13554 vcpu->run->exit_reason = KVM_EXIT_MMIO; 13555 13556 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio; 13557 13558 return 0; 13559 } 13560 EXPORT_SYMBOL_GPL(kvm_sev_es_mmio_write); 13561 13562 int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes, 13563 void *data) 13564 { 13565 int handled; 13566 struct kvm_mmio_fragment *frag; 13567 13568 if (!data) 13569 return -EINVAL; 13570 13571 handled = read_emultor.read_write_mmio(vcpu, gpa, bytes, data); 13572 if (handled == bytes) 13573 return 1; 13574 13575 bytes -= handled; 13576 gpa += handled; 13577 data += handled; 13578 13579 /*TODO: Check if need to increment number of frags */ 13580 frag = vcpu->mmio_fragments; 13581 vcpu->mmio_nr_fragments = 1; 13582 frag->len = bytes; 13583 frag->gpa = gpa; 13584 frag->data = data; 13585 13586 vcpu->mmio_needed = 1; 13587 vcpu->mmio_cur_fragment = 0; 13588 13589 vcpu->run->mmio.phys_addr = gpa; 13590 vcpu->run->mmio.len = min(8u, frag->len); 13591 vcpu->run->mmio.is_write = 0; 13592 vcpu->run->exit_reason = KVM_EXIT_MMIO; 13593 13594 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio; 13595 13596 return 0; 13597 } 13598 EXPORT_SYMBOL_GPL(kvm_sev_es_mmio_read); 13599 13600 static void advance_sev_es_emulated_pio(struct kvm_vcpu *vcpu, unsigned count, int size) 13601 { 13602 vcpu->arch.sev_pio_count -= count; 13603 vcpu->arch.sev_pio_data += count * size; 13604 } 13605 13606 static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size, 13607 unsigned int port); 13608 13609 static int complete_sev_es_emulated_outs(struct kvm_vcpu *vcpu) 13610 { 13611 int size = vcpu->arch.pio.size; 13612 int port = vcpu->arch.pio.port; 13613 13614 vcpu->arch.pio.count = 0; 13615 if (vcpu->arch.sev_pio_count) 13616 return kvm_sev_es_outs(vcpu, size, port); 13617 return 1; 13618 } 13619 13620 static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size, 13621 unsigned int port) 13622 { 13623 for (;;) { 13624 unsigned int count = 13625 min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count); 13626 int ret = emulator_pio_out(vcpu, size, port, vcpu->arch.sev_pio_data, count); 13627 13628 /* memcpy done already by emulator_pio_out. */ 13629 advance_sev_es_emulated_pio(vcpu, count, size); 13630 if (!ret) 13631 break; 13632 13633 /* Emulation done by the kernel. */ 13634 if (!vcpu->arch.sev_pio_count) 13635 return 1; 13636 } 13637 13638 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_outs; 13639 return 0; 13640 } 13641 13642 static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size, 13643 unsigned int port); 13644 13645 static int complete_sev_es_emulated_ins(struct kvm_vcpu *vcpu) 13646 { 13647 unsigned count = vcpu->arch.pio.count; 13648 int size = vcpu->arch.pio.size; 13649 int port = vcpu->arch.pio.port; 13650 13651 complete_emulator_pio_in(vcpu, vcpu->arch.sev_pio_data); 13652 advance_sev_es_emulated_pio(vcpu, count, size); 13653 if (vcpu->arch.sev_pio_count) 13654 return kvm_sev_es_ins(vcpu, size, port); 13655 return 1; 13656 } 13657 13658 static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size, 13659 unsigned int port) 13660 { 13661 for (;;) { 13662 unsigned int count = 13663 min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count); 13664 if (!emulator_pio_in(vcpu, size, port, vcpu->arch.sev_pio_data, count)) 13665 break; 13666 13667 /* Emulation done by the kernel. */ 13668 advance_sev_es_emulated_pio(vcpu, count, size); 13669 if (!vcpu->arch.sev_pio_count) 13670 return 1; 13671 } 13672 13673 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_ins; 13674 return 0; 13675 } 13676 13677 int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size, 13678 unsigned int port, void *data, unsigned int count, 13679 int in) 13680 { 13681 vcpu->arch.sev_pio_data = data; 13682 vcpu->arch.sev_pio_count = count; 13683 return in ? kvm_sev_es_ins(vcpu, size, port) 13684 : kvm_sev_es_outs(vcpu, size, port); 13685 } 13686 EXPORT_SYMBOL_GPL(kvm_sev_es_string_io); 13687 13688 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_entry); 13689 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit); 13690 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio); 13691 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq); 13692 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault); 13693 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr); 13694 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr); 13695 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmenter); 13696 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit); 13697 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject); 13698 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit); 13699 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmenter_failed); 13700 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga); 13701 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit); 13702 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts); 13703 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_write_tsc_offset); 13704 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ple_window_update); 13705 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pml_full); 13706 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pi_irte_update); 13707 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_unaccelerated_access); 13708 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_incomplete_ipi); 13709 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_ga_log); 13710 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_kick_vcpu_slowpath); 13711 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_doorbell); 13712 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_apicv_accept_irq); 13713 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_enter); 13714 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_exit); 13715 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_enter); 13716 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_exit); 13717 13718 static int __init kvm_x86_init(void) 13719 { 13720 kvm_mmu_x86_module_init(); 13721 return 0; 13722 } 13723 module_init(kvm_x86_init); 13724 13725 static void __exit kvm_x86_exit(void) 13726 { 13727 /* 13728 * If module_init() is implemented, module_exit() must also be 13729 * implemented to allow module unload. 13730 */ 13731 } 13732 module_exit(kvm_x86_exit); 13733