1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Kernel-based Virtual Machine driver for Linux 4 * 5 * derived from drivers/kvm/kvm_main.c 6 * 7 * Copyright (C) 2006 Qumranet, Inc. 8 * Copyright (C) 2008 Qumranet, Inc. 9 * Copyright IBM Corporation, 2008 10 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 11 * 12 * Authors: 13 * Avi Kivity <avi@qumranet.com> 14 * Yaniv Kamay <yaniv@qumranet.com> 15 * Amit Shah <amit.shah@qumranet.com> 16 * Ben-Ami Yassour <benami@il.ibm.com> 17 */ 18 19 #include <linux/kvm_host.h> 20 #include "irq.h" 21 #include "ioapic.h" 22 #include "mmu.h" 23 #include "i8254.h" 24 #include "tss.h" 25 #include "kvm_cache_regs.h" 26 #include "kvm_emulate.h" 27 #include "x86.h" 28 #include "cpuid.h" 29 #include "pmu.h" 30 #include "hyperv.h" 31 #include "lapic.h" 32 #include "xen.h" 33 34 #include <linux/clocksource.h> 35 #include <linux/interrupt.h> 36 #include <linux/kvm.h> 37 #include <linux/fs.h> 38 #include <linux/vmalloc.h> 39 #include <linux/export.h> 40 #include <linux/moduleparam.h> 41 #include <linux/mman.h> 42 #include <linux/highmem.h> 43 #include <linux/iommu.h> 44 #include <linux/cpufreq.h> 45 #include <linux/user-return-notifier.h> 46 #include <linux/srcu.h> 47 #include <linux/slab.h> 48 #include <linux/perf_event.h> 49 #include <linux/uaccess.h> 50 #include <linux/hash.h> 51 #include <linux/pci.h> 52 #include <linux/timekeeper_internal.h> 53 #include <linux/pvclock_gtod.h> 54 #include <linux/kvm_irqfd.h> 55 #include <linux/irqbypass.h> 56 #include <linux/sched/stat.h> 57 #include <linux/sched/isolation.h> 58 #include <linux/mem_encrypt.h> 59 #include <linux/entry-kvm.h> 60 #include <linux/suspend.h> 61 62 #include <trace/events/kvm.h> 63 64 #include <asm/debugreg.h> 65 #include <asm/msr.h> 66 #include <asm/desc.h> 67 #include <asm/mce.h> 68 #include <asm/pkru.h> 69 #include <linux/kernel_stat.h> 70 #include <asm/fpu/api.h> 71 #include <asm/fpu/xcr.h> 72 #include <asm/fpu/xstate.h> 73 #include <asm/pvclock.h> 74 #include <asm/div64.h> 75 #include <asm/irq_remapping.h> 76 #include <asm/mshyperv.h> 77 #include <asm/hypervisor.h> 78 #include <asm/tlbflush.h> 79 #include <asm/intel_pt.h> 80 #include <asm/emulate_prefix.h> 81 #include <asm/sgx.h> 82 #include <clocksource/hyperv_timer.h> 83 84 #define CREATE_TRACE_POINTS 85 #include "trace.h" 86 87 #define MAX_IO_MSRS 256 88 #define KVM_MAX_MCE_BANKS 32 89 90 struct kvm_caps kvm_caps __read_mostly = { 91 .supported_mce_cap = MCG_CTL_P | MCG_SER_P, 92 }; 93 EXPORT_SYMBOL_GPL(kvm_caps); 94 95 #define ERR_PTR_USR(e) ((void __user *)ERR_PTR(e)) 96 97 #define emul_to_vcpu(ctxt) \ 98 ((struct kvm_vcpu *)(ctxt)->vcpu) 99 100 /* EFER defaults: 101 * - enable syscall per default because its emulated by KVM 102 * - enable LME and LMA per default on 64 bit KVM 103 */ 104 #ifdef CONFIG_X86_64 105 static 106 u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA)); 107 #else 108 static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE); 109 #endif 110 111 static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS; 112 113 #define KVM_EXIT_HYPERCALL_VALID_MASK (1 << KVM_HC_MAP_GPA_RANGE) 114 115 #define KVM_CAP_PMU_VALID_MASK KVM_PMU_CAP_DISABLE 116 117 #define KVM_X2APIC_API_VALID_FLAGS (KVM_X2APIC_API_USE_32BIT_IDS | \ 118 KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK) 119 120 static void update_cr8_intercept(struct kvm_vcpu *vcpu); 121 static void process_nmi(struct kvm_vcpu *vcpu); 122 static void process_smi(struct kvm_vcpu *vcpu); 123 static void enter_smm(struct kvm_vcpu *vcpu); 124 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); 125 static void store_regs(struct kvm_vcpu *vcpu); 126 static int sync_regs(struct kvm_vcpu *vcpu); 127 static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu); 128 129 static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2); 130 static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2); 131 132 struct kvm_x86_ops kvm_x86_ops __read_mostly; 133 134 #define KVM_X86_OP(func) \ 135 DEFINE_STATIC_CALL_NULL(kvm_x86_##func, \ 136 *(((struct kvm_x86_ops *)0)->func)); 137 #define KVM_X86_OP_OPTIONAL KVM_X86_OP 138 #define KVM_X86_OP_OPTIONAL_RET0 KVM_X86_OP 139 #include <asm/kvm-x86-ops.h> 140 EXPORT_STATIC_CALL_GPL(kvm_x86_get_cs_db_l_bits); 141 EXPORT_STATIC_CALL_GPL(kvm_x86_cache_reg); 142 143 static bool __read_mostly ignore_msrs = 0; 144 module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR); 145 146 bool __read_mostly report_ignored_msrs = true; 147 module_param(report_ignored_msrs, bool, S_IRUGO | S_IWUSR); 148 EXPORT_SYMBOL_GPL(report_ignored_msrs); 149 150 unsigned int min_timer_period_us = 200; 151 module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR); 152 153 static bool __read_mostly kvmclock_periodic_sync = true; 154 module_param(kvmclock_periodic_sync, bool, S_IRUGO); 155 156 /* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */ 157 static u32 __read_mostly tsc_tolerance_ppm = 250; 158 module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR); 159 160 /* 161 * lapic timer advance (tscdeadline mode only) in nanoseconds. '-1' enables 162 * adaptive tuning starting from default advancement of 1000ns. '0' disables 163 * advancement entirely. Any other value is used as-is and disables adaptive 164 * tuning, i.e. allows privileged userspace to set an exact advancement time. 165 */ 166 static int __read_mostly lapic_timer_advance_ns = -1; 167 module_param(lapic_timer_advance_ns, int, S_IRUGO | S_IWUSR); 168 169 static bool __read_mostly vector_hashing = true; 170 module_param(vector_hashing, bool, S_IRUGO); 171 172 bool __read_mostly enable_vmware_backdoor = false; 173 module_param(enable_vmware_backdoor, bool, S_IRUGO); 174 EXPORT_SYMBOL_GPL(enable_vmware_backdoor); 175 176 static bool __read_mostly force_emulation_prefix = false; 177 module_param(force_emulation_prefix, bool, S_IRUGO); 178 179 int __read_mostly pi_inject_timer = -1; 180 module_param(pi_inject_timer, bint, S_IRUGO | S_IWUSR); 181 182 /* Enable/disable PMU virtualization */ 183 bool __read_mostly enable_pmu = true; 184 EXPORT_SYMBOL_GPL(enable_pmu); 185 module_param(enable_pmu, bool, 0444); 186 187 bool __read_mostly eager_page_split = true; 188 module_param(eager_page_split, bool, 0644); 189 190 /* 191 * Restoring the host value for MSRs that are only consumed when running in 192 * usermode, e.g. SYSCALL MSRs and TSC_AUX, can be deferred until the CPU 193 * returns to userspace, i.e. the kernel can run with the guest's value. 194 */ 195 #define KVM_MAX_NR_USER_RETURN_MSRS 16 196 197 struct kvm_user_return_msrs { 198 struct user_return_notifier urn; 199 bool registered; 200 struct kvm_user_return_msr_values { 201 u64 host; 202 u64 curr; 203 } values[KVM_MAX_NR_USER_RETURN_MSRS]; 204 }; 205 206 u32 __read_mostly kvm_nr_uret_msrs; 207 EXPORT_SYMBOL_GPL(kvm_nr_uret_msrs); 208 static u32 __read_mostly kvm_uret_msrs_list[KVM_MAX_NR_USER_RETURN_MSRS]; 209 static struct kvm_user_return_msrs __percpu *user_return_msrs; 210 211 #define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \ 212 | XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \ 213 | XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512 \ 214 | XFEATURE_MASK_PKRU | XFEATURE_MASK_XTILE) 215 216 u64 __read_mostly host_efer; 217 EXPORT_SYMBOL_GPL(host_efer); 218 219 bool __read_mostly allow_smaller_maxphyaddr = 0; 220 EXPORT_SYMBOL_GPL(allow_smaller_maxphyaddr); 221 222 bool __read_mostly enable_apicv = true; 223 EXPORT_SYMBOL_GPL(enable_apicv); 224 225 u64 __read_mostly host_xss; 226 EXPORT_SYMBOL_GPL(host_xss); 227 228 const struct _kvm_stats_desc kvm_vm_stats_desc[] = { 229 KVM_GENERIC_VM_STATS(), 230 STATS_DESC_COUNTER(VM, mmu_shadow_zapped), 231 STATS_DESC_COUNTER(VM, mmu_pte_write), 232 STATS_DESC_COUNTER(VM, mmu_pde_zapped), 233 STATS_DESC_COUNTER(VM, mmu_flooded), 234 STATS_DESC_COUNTER(VM, mmu_recycled), 235 STATS_DESC_COUNTER(VM, mmu_cache_miss), 236 STATS_DESC_ICOUNTER(VM, mmu_unsync), 237 STATS_DESC_ICOUNTER(VM, pages_4k), 238 STATS_DESC_ICOUNTER(VM, pages_2m), 239 STATS_DESC_ICOUNTER(VM, pages_1g), 240 STATS_DESC_ICOUNTER(VM, nx_lpage_splits), 241 STATS_DESC_PCOUNTER(VM, max_mmu_rmap_size), 242 STATS_DESC_PCOUNTER(VM, max_mmu_page_hash_collisions) 243 }; 244 245 const struct kvm_stats_header kvm_vm_stats_header = { 246 .name_size = KVM_STATS_NAME_SIZE, 247 .num_desc = ARRAY_SIZE(kvm_vm_stats_desc), 248 .id_offset = sizeof(struct kvm_stats_header), 249 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, 250 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + 251 sizeof(kvm_vm_stats_desc), 252 }; 253 254 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { 255 KVM_GENERIC_VCPU_STATS(), 256 STATS_DESC_COUNTER(VCPU, pf_taken), 257 STATS_DESC_COUNTER(VCPU, pf_fixed), 258 STATS_DESC_COUNTER(VCPU, pf_emulate), 259 STATS_DESC_COUNTER(VCPU, pf_spurious), 260 STATS_DESC_COUNTER(VCPU, pf_fast), 261 STATS_DESC_COUNTER(VCPU, pf_mmio_spte_created), 262 STATS_DESC_COUNTER(VCPU, pf_guest), 263 STATS_DESC_COUNTER(VCPU, tlb_flush), 264 STATS_DESC_COUNTER(VCPU, invlpg), 265 STATS_DESC_COUNTER(VCPU, exits), 266 STATS_DESC_COUNTER(VCPU, io_exits), 267 STATS_DESC_COUNTER(VCPU, mmio_exits), 268 STATS_DESC_COUNTER(VCPU, signal_exits), 269 STATS_DESC_COUNTER(VCPU, irq_window_exits), 270 STATS_DESC_COUNTER(VCPU, nmi_window_exits), 271 STATS_DESC_COUNTER(VCPU, l1d_flush), 272 STATS_DESC_COUNTER(VCPU, halt_exits), 273 STATS_DESC_COUNTER(VCPU, request_irq_exits), 274 STATS_DESC_COUNTER(VCPU, irq_exits), 275 STATS_DESC_COUNTER(VCPU, host_state_reload), 276 STATS_DESC_COUNTER(VCPU, fpu_reload), 277 STATS_DESC_COUNTER(VCPU, insn_emulation), 278 STATS_DESC_COUNTER(VCPU, insn_emulation_fail), 279 STATS_DESC_COUNTER(VCPU, hypercalls), 280 STATS_DESC_COUNTER(VCPU, irq_injections), 281 STATS_DESC_COUNTER(VCPU, nmi_injections), 282 STATS_DESC_COUNTER(VCPU, req_event), 283 STATS_DESC_COUNTER(VCPU, nested_run), 284 STATS_DESC_COUNTER(VCPU, directed_yield_attempted), 285 STATS_DESC_COUNTER(VCPU, directed_yield_successful), 286 STATS_DESC_COUNTER(VCPU, preemption_reported), 287 STATS_DESC_COUNTER(VCPU, preemption_other), 288 STATS_DESC_IBOOLEAN(VCPU, guest_mode), 289 STATS_DESC_COUNTER(VCPU, notify_window_exits), 290 }; 291 292 const struct kvm_stats_header kvm_vcpu_stats_header = { 293 .name_size = KVM_STATS_NAME_SIZE, 294 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc), 295 .id_offset = sizeof(struct kvm_stats_header), 296 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, 297 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + 298 sizeof(kvm_vcpu_stats_desc), 299 }; 300 301 u64 __read_mostly host_xcr0; 302 303 static struct kmem_cache *x86_emulator_cache; 304 305 /* 306 * When called, it means the previous get/set msr reached an invalid msr. 307 * Return true if we want to ignore/silent this failed msr access. 308 */ 309 static bool kvm_msr_ignored_check(u32 msr, u64 data, bool write) 310 { 311 const char *op = write ? "wrmsr" : "rdmsr"; 312 313 if (ignore_msrs) { 314 if (report_ignored_msrs) 315 kvm_pr_unimpl("ignored %s: 0x%x data 0x%llx\n", 316 op, msr, data); 317 /* Mask the error */ 318 return true; 319 } else { 320 kvm_debug_ratelimited("unhandled %s: 0x%x data 0x%llx\n", 321 op, msr, data); 322 return false; 323 } 324 } 325 326 static struct kmem_cache *kvm_alloc_emulator_cache(void) 327 { 328 unsigned int useroffset = offsetof(struct x86_emulate_ctxt, src); 329 unsigned int size = sizeof(struct x86_emulate_ctxt); 330 331 return kmem_cache_create_usercopy("x86_emulator", size, 332 __alignof__(struct x86_emulate_ctxt), 333 SLAB_ACCOUNT, useroffset, 334 size - useroffset, NULL); 335 } 336 337 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt); 338 339 static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu) 340 { 341 int i; 342 for (i = 0; i < ASYNC_PF_PER_VCPU; i++) 343 vcpu->arch.apf.gfns[i] = ~0; 344 } 345 346 static void kvm_on_user_return(struct user_return_notifier *urn) 347 { 348 unsigned slot; 349 struct kvm_user_return_msrs *msrs 350 = container_of(urn, struct kvm_user_return_msrs, urn); 351 struct kvm_user_return_msr_values *values; 352 unsigned long flags; 353 354 /* 355 * Disabling irqs at this point since the following code could be 356 * interrupted and executed through kvm_arch_hardware_disable() 357 */ 358 local_irq_save(flags); 359 if (msrs->registered) { 360 msrs->registered = false; 361 user_return_notifier_unregister(urn); 362 } 363 local_irq_restore(flags); 364 for (slot = 0; slot < kvm_nr_uret_msrs; ++slot) { 365 values = &msrs->values[slot]; 366 if (values->host != values->curr) { 367 wrmsrl(kvm_uret_msrs_list[slot], values->host); 368 values->curr = values->host; 369 } 370 } 371 } 372 373 static int kvm_probe_user_return_msr(u32 msr) 374 { 375 u64 val; 376 int ret; 377 378 preempt_disable(); 379 ret = rdmsrl_safe(msr, &val); 380 if (ret) 381 goto out; 382 ret = wrmsrl_safe(msr, val); 383 out: 384 preempt_enable(); 385 return ret; 386 } 387 388 int kvm_add_user_return_msr(u32 msr) 389 { 390 BUG_ON(kvm_nr_uret_msrs >= KVM_MAX_NR_USER_RETURN_MSRS); 391 392 if (kvm_probe_user_return_msr(msr)) 393 return -1; 394 395 kvm_uret_msrs_list[kvm_nr_uret_msrs] = msr; 396 return kvm_nr_uret_msrs++; 397 } 398 EXPORT_SYMBOL_GPL(kvm_add_user_return_msr); 399 400 int kvm_find_user_return_msr(u32 msr) 401 { 402 int i; 403 404 for (i = 0; i < kvm_nr_uret_msrs; ++i) { 405 if (kvm_uret_msrs_list[i] == msr) 406 return i; 407 } 408 return -1; 409 } 410 EXPORT_SYMBOL_GPL(kvm_find_user_return_msr); 411 412 static void kvm_user_return_msr_cpu_online(void) 413 { 414 unsigned int cpu = smp_processor_id(); 415 struct kvm_user_return_msrs *msrs = per_cpu_ptr(user_return_msrs, cpu); 416 u64 value; 417 int i; 418 419 for (i = 0; i < kvm_nr_uret_msrs; ++i) { 420 rdmsrl_safe(kvm_uret_msrs_list[i], &value); 421 msrs->values[i].host = value; 422 msrs->values[i].curr = value; 423 } 424 } 425 426 int kvm_set_user_return_msr(unsigned slot, u64 value, u64 mask) 427 { 428 unsigned int cpu = smp_processor_id(); 429 struct kvm_user_return_msrs *msrs = per_cpu_ptr(user_return_msrs, cpu); 430 int err; 431 432 value = (value & mask) | (msrs->values[slot].host & ~mask); 433 if (value == msrs->values[slot].curr) 434 return 0; 435 err = wrmsrl_safe(kvm_uret_msrs_list[slot], value); 436 if (err) 437 return 1; 438 439 msrs->values[slot].curr = value; 440 if (!msrs->registered) { 441 msrs->urn.on_user_return = kvm_on_user_return; 442 user_return_notifier_register(&msrs->urn); 443 msrs->registered = true; 444 } 445 return 0; 446 } 447 EXPORT_SYMBOL_GPL(kvm_set_user_return_msr); 448 449 static void drop_user_return_notifiers(void) 450 { 451 unsigned int cpu = smp_processor_id(); 452 struct kvm_user_return_msrs *msrs = per_cpu_ptr(user_return_msrs, cpu); 453 454 if (msrs->registered) 455 kvm_on_user_return(&msrs->urn); 456 } 457 458 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu) 459 { 460 return vcpu->arch.apic_base; 461 } 462 EXPORT_SYMBOL_GPL(kvm_get_apic_base); 463 464 enum lapic_mode kvm_get_apic_mode(struct kvm_vcpu *vcpu) 465 { 466 return kvm_apic_mode(kvm_get_apic_base(vcpu)); 467 } 468 EXPORT_SYMBOL_GPL(kvm_get_apic_mode); 469 470 int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 471 { 472 enum lapic_mode old_mode = kvm_get_apic_mode(vcpu); 473 enum lapic_mode new_mode = kvm_apic_mode(msr_info->data); 474 u64 reserved_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu) | 0x2ff | 475 (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) ? 0 : X2APIC_ENABLE); 476 477 if ((msr_info->data & reserved_bits) != 0 || new_mode == LAPIC_MODE_INVALID) 478 return 1; 479 if (!msr_info->host_initiated) { 480 if (old_mode == LAPIC_MODE_X2APIC && new_mode == LAPIC_MODE_XAPIC) 481 return 1; 482 if (old_mode == LAPIC_MODE_DISABLED && new_mode == LAPIC_MODE_X2APIC) 483 return 1; 484 } 485 486 kvm_lapic_set_base(vcpu, msr_info->data); 487 kvm_recalculate_apic_map(vcpu->kvm); 488 return 0; 489 } 490 EXPORT_SYMBOL_GPL(kvm_set_apic_base); 491 492 /* 493 * Handle a fault on a hardware virtualization (VMX or SVM) instruction. 494 * 495 * Hardware virtualization extension instructions may fault if a reboot turns 496 * off virtualization while processes are running. Usually after catching the 497 * fault we just panic; during reboot instead the instruction is ignored. 498 */ 499 noinstr void kvm_spurious_fault(void) 500 { 501 /* Fault while not rebooting. We want the trace. */ 502 BUG_ON(!kvm_rebooting); 503 } 504 EXPORT_SYMBOL_GPL(kvm_spurious_fault); 505 506 #define EXCPT_BENIGN 0 507 #define EXCPT_CONTRIBUTORY 1 508 #define EXCPT_PF 2 509 510 static int exception_class(int vector) 511 { 512 switch (vector) { 513 case PF_VECTOR: 514 return EXCPT_PF; 515 case DE_VECTOR: 516 case TS_VECTOR: 517 case NP_VECTOR: 518 case SS_VECTOR: 519 case GP_VECTOR: 520 return EXCPT_CONTRIBUTORY; 521 default: 522 break; 523 } 524 return EXCPT_BENIGN; 525 } 526 527 #define EXCPT_FAULT 0 528 #define EXCPT_TRAP 1 529 #define EXCPT_ABORT 2 530 #define EXCPT_INTERRUPT 3 531 532 static int exception_type(int vector) 533 { 534 unsigned int mask; 535 536 if (WARN_ON(vector > 31 || vector == NMI_VECTOR)) 537 return EXCPT_INTERRUPT; 538 539 mask = 1 << vector; 540 541 /* #DB is trap, as instruction watchpoints are handled elsewhere */ 542 if (mask & ((1 << DB_VECTOR) | (1 << BP_VECTOR) | (1 << OF_VECTOR))) 543 return EXCPT_TRAP; 544 545 if (mask & ((1 << DF_VECTOR) | (1 << MC_VECTOR))) 546 return EXCPT_ABORT; 547 548 /* Reserved exceptions will result in fault */ 549 return EXCPT_FAULT; 550 } 551 552 void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu) 553 { 554 unsigned nr = vcpu->arch.exception.nr; 555 bool has_payload = vcpu->arch.exception.has_payload; 556 unsigned long payload = vcpu->arch.exception.payload; 557 558 if (!has_payload) 559 return; 560 561 switch (nr) { 562 case DB_VECTOR: 563 /* 564 * "Certain debug exceptions may clear bit 0-3. The 565 * remaining contents of the DR6 register are never 566 * cleared by the processor". 567 */ 568 vcpu->arch.dr6 &= ~DR_TRAP_BITS; 569 /* 570 * In order to reflect the #DB exception payload in guest 571 * dr6, three components need to be considered: active low 572 * bit, FIXED_1 bits and active high bits (e.g. DR6_BD, 573 * DR6_BS and DR6_BT) 574 * DR6_ACTIVE_LOW contains the FIXED_1 and active low bits. 575 * In the target guest dr6: 576 * FIXED_1 bits should always be set. 577 * Active low bits should be cleared if 1-setting in payload. 578 * Active high bits should be set if 1-setting in payload. 579 * 580 * Note, the payload is compatible with the pending debug 581 * exceptions/exit qualification under VMX, that active_low bits 582 * are active high in payload. 583 * So they need to be flipped for DR6. 584 */ 585 vcpu->arch.dr6 |= DR6_ACTIVE_LOW; 586 vcpu->arch.dr6 |= payload; 587 vcpu->arch.dr6 ^= payload & DR6_ACTIVE_LOW; 588 589 /* 590 * The #DB payload is defined as compatible with the 'pending 591 * debug exceptions' field under VMX, not DR6. While bit 12 is 592 * defined in the 'pending debug exceptions' field (enabled 593 * breakpoint), it is reserved and must be zero in DR6. 594 */ 595 vcpu->arch.dr6 &= ~BIT(12); 596 break; 597 case PF_VECTOR: 598 vcpu->arch.cr2 = payload; 599 break; 600 } 601 602 vcpu->arch.exception.has_payload = false; 603 vcpu->arch.exception.payload = 0; 604 } 605 EXPORT_SYMBOL_GPL(kvm_deliver_exception_payload); 606 607 static void kvm_multiple_exception(struct kvm_vcpu *vcpu, 608 unsigned nr, bool has_error, u32 error_code, 609 bool has_payload, unsigned long payload, bool reinject) 610 { 611 u32 prev_nr; 612 int class1, class2; 613 614 kvm_make_request(KVM_REQ_EVENT, vcpu); 615 616 if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) { 617 queue: 618 if (reinject) { 619 /* 620 * On vmentry, vcpu->arch.exception.pending is only 621 * true if an event injection was blocked by 622 * nested_run_pending. In that case, however, 623 * vcpu_enter_guest requests an immediate exit, 624 * and the guest shouldn't proceed far enough to 625 * need reinjection. 626 */ 627 WARN_ON_ONCE(vcpu->arch.exception.pending); 628 vcpu->arch.exception.injected = true; 629 if (WARN_ON_ONCE(has_payload)) { 630 /* 631 * A reinjected event has already 632 * delivered its payload. 633 */ 634 has_payload = false; 635 payload = 0; 636 } 637 } else { 638 vcpu->arch.exception.pending = true; 639 vcpu->arch.exception.injected = false; 640 } 641 vcpu->arch.exception.has_error_code = has_error; 642 vcpu->arch.exception.nr = nr; 643 vcpu->arch.exception.error_code = error_code; 644 vcpu->arch.exception.has_payload = has_payload; 645 vcpu->arch.exception.payload = payload; 646 if (!is_guest_mode(vcpu)) 647 kvm_deliver_exception_payload(vcpu); 648 return; 649 } 650 651 /* to check exception */ 652 prev_nr = vcpu->arch.exception.nr; 653 if (prev_nr == DF_VECTOR) { 654 /* triple fault -> shutdown */ 655 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 656 return; 657 } 658 class1 = exception_class(prev_nr); 659 class2 = exception_class(nr); 660 if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY) 661 || (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) { 662 /* 663 * Generate double fault per SDM Table 5-5. Set 664 * exception.pending = true so that the double fault 665 * can trigger a nested vmexit. 666 */ 667 vcpu->arch.exception.pending = true; 668 vcpu->arch.exception.injected = false; 669 vcpu->arch.exception.has_error_code = true; 670 vcpu->arch.exception.nr = DF_VECTOR; 671 vcpu->arch.exception.error_code = 0; 672 vcpu->arch.exception.has_payload = false; 673 vcpu->arch.exception.payload = 0; 674 } else 675 /* replace previous exception with a new one in a hope 676 that instruction re-execution will regenerate lost 677 exception */ 678 goto queue; 679 } 680 681 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr) 682 { 683 kvm_multiple_exception(vcpu, nr, false, 0, false, 0, false); 684 } 685 EXPORT_SYMBOL_GPL(kvm_queue_exception); 686 687 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr) 688 { 689 kvm_multiple_exception(vcpu, nr, false, 0, false, 0, true); 690 } 691 EXPORT_SYMBOL_GPL(kvm_requeue_exception); 692 693 void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr, 694 unsigned long payload) 695 { 696 kvm_multiple_exception(vcpu, nr, false, 0, true, payload, false); 697 } 698 EXPORT_SYMBOL_GPL(kvm_queue_exception_p); 699 700 static void kvm_queue_exception_e_p(struct kvm_vcpu *vcpu, unsigned nr, 701 u32 error_code, unsigned long payload) 702 { 703 kvm_multiple_exception(vcpu, nr, true, error_code, 704 true, payload, false); 705 } 706 707 int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err) 708 { 709 if (err) 710 kvm_inject_gp(vcpu, 0); 711 else 712 return kvm_skip_emulated_instruction(vcpu); 713 714 return 1; 715 } 716 EXPORT_SYMBOL_GPL(kvm_complete_insn_gp); 717 718 static int complete_emulated_insn_gp(struct kvm_vcpu *vcpu, int err) 719 { 720 if (err) { 721 kvm_inject_gp(vcpu, 0); 722 return 1; 723 } 724 725 return kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE | EMULTYPE_SKIP | 726 EMULTYPE_COMPLETE_USER_EXIT); 727 } 728 729 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) 730 { 731 ++vcpu->stat.pf_guest; 732 vcpu->arch.exception.nested_apf = 733 is_guest_mode(vcpu) && fault->async_page_fault; 734 if (vcpu->arch.exception.nested_apf) { 735 vcpu->arch.apf.nested_apf_token = fault->address; 736 kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code); 737 } else { 738 kvm_queue_exception_e_p(vcpu, PF_VECTOR, fault->error_code, 739 fault->address); 740 } 741 } 742 EXPORT_SYMBOL_GPL(kvm_inject_page_fault); 743 744 /* Returns true if the page fault was immediately morphed into a VM-Exit. */ 745 bool kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu, 746 struct x86_exception *fault) 747 { 748 struct kvm_mmu *fault_mmu; 749 WARN_ON_ONCE(fault->vector != PF_VECTOR); 750 751 fault_mmu = fault->nested_page_fault ? vcpu->arch.mmu : 752 vcpu->arch.walk_mmu; 753 754 /* 755 * Invalidate the TLB entry for the faulting address, if it exists, 756 * else the access will fault indefinitely (and to emulate hardware). 757 */ 758 if ((fault->error_code & PFERR_PRESENT_MASK) && 759 !(fault->error_code & PFERR_RSVD_MASK)) 760 kvm_mmu_invalidate_gva(vcpu, fault_mmu, fault->address, 761 fault_mmu->root.hpa); 762 763 /* 764 * A workaround for KVM's bad exception handling. If KVM injected an 765 * exception into L2, and L2 encountered a #PF while vectoring the 766 * injected exception, manually check to see if L1 wants to intercept 767 * #PF, otherwise queuing the #PF will lead to #DF or a lost exception. 768 * In all other cases, defer the check to nested_ops->check_events(), 769 * which will correctly handle priority (this does not). Note, other 770 * exceptions, e.g. #GP, are theoretically affected, #PF is simply the 771 * most problematic, e.g. when L0 and L1 are both intercepting #PF for 772 * shadow paging. 773 * 774 * TODO: Rewrite exception handling to track injected and pending 775 * (VM-Exit) exceptions separately. 776 */ 777 if (unlikely(vcpu->arch.exception.injected && is_guest_mode(vcpu)) && 778 kvm_x86_ops.nested_ops->handle_page_fault_workaround(vcpu, fault)) 779 return true; 780 781 fault_mmu->inject_page_fault(vcpu, fault); 782 return false; 783 } 784 EXPORT_SYMBOL_GPL(kvm_inject_emulated_page_fault); 785 786 void kvm_inject_nmi(struct kvm_vcpu *vcpu) 787 { 788 atomic_inc(&vcpu->arch.nmi_queued); 789 kvm_make_request(KVM_REQ_NMI, vcpu); 790 } 791 EXPORT_SYMBOL_GPL(kvm_inject_nmi); 792 793 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) 794 { 795 kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, false); 796 } 797 EXPORT_SYMBOL_GPL(kvm_queue_exception_e); 798 799 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) 800 { 801 kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, true); 802 } 803 EXPORT_SYMBOL_GPL(kvm_requeue_exception_e); 804 805 /* 806 * Checks if cpl <= required_cpl; if true, return true. Otherwise queue 807 * a #GP and return false. 808 */ 809 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl) 810 { 811 if (static_call(kvm_x86_get_cpl)(vcpu) <= required_cpl) 812 return true; 813 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); 814 return false; 815 } 816 EXPORT_SYMBOL_GPL(kvm_require_cpl); 817 818 bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr) 819 { 820 if ((dr != 4 && dr != 5) || !kvm_read_cr4_bits(vcpu, X86_CR4_DE)) 821 return true; 822 823 kvm_queue_exception(vcpu, UD_VECTOR); 824 return false; 825 } 826 EXPORT_SYMBOL_GPL(kvm_require_dr); 827 828 static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu) 829 { 830 return vcpu->arch.reserved_gpa_bits | rsvd_bits(5, 8) | rsvd_bits(1, 2); 831 } 832 833 /* 834 * Load the pae pdptrs. Return 1 if they are all valid, 0 otherwise. 835 */ 836 int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3) 837 { 838 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 839 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT; 840 gpa_t real_gpa; 841 int i; 842 int ret; 843 u64 pdpte[ARRAY_SIZE(mmu->pdptrs)]; 844 845 /* 846 * If the MMU is nested, CR3 holds an L2 GPA and needs to be translated 847 * to an L1 GPA. 848 */ 849 real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(pdpt_gfn), 850 PFERR_USER_MASK | PFERR_WRITE_MASK, NULL); 851 if (real_gpa == INVALID_GPA) 852 return 0; 853 854 /* Note the offset, PDPTRs are 32 byte aligned when using PAE paging. */ 855 ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(real_gpa), pdpte, 856 cr3 & GENMASK(11, 5), sizeof(pdpte)); 857 if (ret < 0) 858 return 0; 859 860 for (i = 0; i < ARRAY_SIZE(pdpte); ++i) { 861 if ((pdpte[i] & PT_PRESENT_MASK) && 862 (pdpte[i] & pdptr_rsvd_bits(vcpu))) { 863 return 0; 864 } 865 } 866 867 /* 868 * Marking VCPU_EXREG_PDPTR dirty doesn't work for !tdp_enabled. 869 * Shadow page roots need to be reconstructed instead. 870 */ 871 if (!tdp_enabled && memcmp(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs))) 872 kvm_mmu_free_roots(vcpu->kvm, mmu, KVM_MMU_ROOT_CURRENT); 873 874 memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs)); 875 kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR); 876 kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu); 877 vcpu->arch.pdptrs_from_userspace = false; 878 879 return 1; 880 } 881 EXPORT_SYMBOL_GPL(load_pdptrs); 882 883 void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0) 884 { 885 if ((cr0 ^ old_cr0) & X86_CR0_PG) { 886 kvm_clear_async_pf_completion_queue(vcpu); 887 kvm_async_pf_hash_reset(vcpu); 888 889 /* 890 * Clearing CR0.PG is defined to flush the TLB from the guest's 891 * perspective. 892 */ 893 if (!(cr0 & X86_CR0_PG)) 894 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 895 } 896 897 if ((cr0 ^ old_cr0) & KVM_MMU_CR0_ROLE_BITS) 898 kvm_mmu_reset_context(vcpu); 899 900 if (((cr0 ^ old_cr0) & X86_CR0_CD) && 901 kvm_arch_has_noncoherent_dma(vcpu->kvm) && 902 !kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) 903 kvm_zap_gfn_range(vcpu->kvm, 0, ~0ULL); 904 } 905 EXPORT_SYMBOL_GPL(kvm_post_set_cr0); 906 907 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 908 { 909 unsigned long old_cr0 = kvm_read_cr0(vcpu); 910 911 cr0 |= X86_CR0_ET; 912 913 #ifdef CONFIG_X86_64 914 if (cr0 & 0xffffffff00000000UL) 915 return 1; 916 #endif 917 918 cr0 &= ~CR0_RESERVED_BITS; 919 920 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) 921 return 1; 922 923 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) 924 return 1; 925 926 #ifdef CONFIG_X86_64 927 if ((vcpu->arch.efer & EFER_LME) && !is_paging(vcpu) && 928 (cr0 & X86_CR0_PG)) { 929 int cs_db, cs_l; 930 931 if (!is_pae(vcpu)) 932 return 1; 933 static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l); 934 if (cs_l) 935 return 1; 936 } 937 #endif 938 if (!(vcpu->arch.efer & EFER_LME) && (cr0 & X86_CR0_PG) && 939 is_pae(vcpu) && ((cr0 ^ old_cr0) & X86_CR0_PDPTR_BITS) && 940 !load_pdptrs(vcpu, kvm_read_cr3(vcpu))) 941 return 1; 942 943 if (!(cr0 & X86_CR0_PG) && 944 (is_64_bit_mode(vcpu) || kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE))) 945 return 1; 946 947 static_call(kvm_x86_set_cr0)(vcpu, cr0); 948 949 kvm_post_set_cr0(vcpu, old_cr0, cr0); 950 951 return 0; 952 } 953 EXPORT_SYMBOL_GPL(kvm_set_cr0); 954 955 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) 956 { 957 (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f)); 958 } 959 EXPORT_SYMBOL_GPL(kvm_lmsw); 960 961 void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu) 962 { 963 if (vcpu->arch.guest_state_protected) 964 return; 965 966 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) { 967 968 if (vcpu->arch.xcr0 != host_xcr0) 969 xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); 970 971 if (vcpu->arch.xsaves_enabled && 972 vcpu->arch.ia32_xss != host_xss) 973 wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss); 974 } 975 976 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS 977 if (static_cpu_has(X86_FEATURE_PKU) && 978 vcpu->arch.pkru != vcpu->arch.host_pkru && 979 ((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) || 980 kvm_read_cr4_bits(vcpu, X86_CR4_PKE))) 981 write_pkru(vcpu->arch.pkru); 982 #endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */ 983 } 984 EXPORT_SYMBOL_GPL(kvm_load_guest_xsave_state); 985 986 void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu) 987 { 988 if (vcpu->arch.guest_state_protected) 989 return; 990 991 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS 992 if (static_cpu_has(X86_FEATURE_PKU) && 993 ((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) || 994 kvm_read_cr4_bits(vcpu, X86_CR4_PKE))) { 995 vcpu->arch.pkru = rdpkru(); 996 if (vcpu->arch.pkru != vcpu->arch.host_pkru) 997 write_pkru(vcpu->arch.host_pkru); 998 } 999 #endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */ 1000 1001 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) { 1002 1003 if (vcpu->arch.xcr0 != host_xcr0) 1004 xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0); 1005 1006 if (vcpu->arch.xsaves_enabled && 1007 vcpu->arch.ia32_xss != host_xss) 1008 wrmsrl(MSR_IA32_XSS, host_xss); 1009 } 1010 1011 } 1012 EXPORT_SYMBOL_GPL(kvm_load_host_xsave_state); 1013 1014 static inline u64 kvm_guest_supported_xcr0(struct kvm_vcpu *vcpu) 1015 { 1016 return vcpu->arch.guest_fpu.fpstate->user_xfeatures; 1017 } 1018 1019 #ifdef CONFIG_X86_64 1020 static inline u64 kvm_guest_supported_xfd(struct kvm_vcpu *vcpu) 1021 { 1022 return kvm_guest_supported_xcr0(vcpu) & XFEATURE_MASK_USER_DYNAMIC; 1023 } 1024 #endif 1025 1026 static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) 1027 { 1028 u64 xcr0 = xcr; 1029 u64 old_xcr0 = vcpu->arch.xcr0; 1030 u64 valid_bits; 1031 1032 /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now */ 1033 if (index != XCR_XFEATURE_ENABLED_MASK) 1034 return 1; 1035 if (!(xcr0 & XFEATURE_MASK_FP)) 1036 return 1; 1037 if ((xcr0 & XFEATURE_MASK_YMM) && !(xcr0 & XFEATURE_MASK_SSE)) 1038 return 1; 1039 1040 /* 1041 * Do not allow the guest to set bits that we do not support 1042 * saving. However, xcr0 bit 0 is always set, even if the 1043 * emulated CPU does not support XSAVE (see kvm_vcpu_reset()). 1044 */ 1045 valid_bits = kvm_guest_supported_xcr0(vcpu) | XFEATURE_MASK_FP; 1046 if (xcr0 & ~valid_bits) 1047 return 1; 1048 1049 if ((!(xcr0 & XFEATURE_MASK_BNDREGS)) != 1050 (!(xcr0 & XFEATURE_MASK_BNDCSR))) 1051 return 1; 1052 1053 if (xcr0 & XFEATURE_MASK_AVX512) { 1054 if (!(xcr0 & XFEATURE_MASK_YMM)) 1055 return 1; 1056 if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512) 1057 return 1; 1058 } 1059 1060 if ((xcr0 & XFEATURE_MASK_XTILE) && 1061 ((xcr0 & XFEATURE_MASK_XTILE) != XFEATURE_MASK_XTILE)) 1062 return 1; 1063 1064 vcpu->arch.xcr0 = xcr0; 1065 1066 if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND) 1067 kvm_update_cpuid_runtime(vcpu); 1068 return 0; 1069 } 1070 1071 int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu) 1072 { 1073 if (static_call(kvm_x86_get_cpl)(vcpu) != 0 || 1074 __kvm_set_xcr(vcpu, kvm_rcx_read(vcpu), kvm_read_edx_eax(vcpu))) { 1075 kvm_inject_gp(vcpu, 0); 1076 return 1; 1077 } 1078 1079 return kvm_skip_emulated_instruction(vcpu); 1080 } 1081 EXPORT_SYMBOL_GPL(kvm_emulate_xsetbv); 1082 1083 bool __kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 1084 { 1085 if (cr4 & cr4_reserved_bits) 1086 return false; 1087 1088 if (cr4 & vcpu->arch.cr4_guest_rsvd_bits) 1089 return false; 1090 1091 return true; 1092 } 1093 EXPORT_SYMBOL_GPL(__kvm_is_valid_cr4); 1094 1095 static bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 1096 { 1097 return __kvm_is_valid_cr4(vcpu, cr4) && 1098 static_call(kvm_x86_is_valid_cr4)(vcpu, cr4); 1099 } 1100 1101 void kvm_post_set_cr4(struct kvm_vcpu *vcpu, unsigned long old_cr4, unsigned long cr4) 1102 { 1103 if ((cr4 ^ old_cr4) & KVM_MMU_CR4_ROLE_BITS) 1104 kvm_mmu_reset_context(vcpu); 1105 1106 /* 1107 * If CR4.PCIDE is changed 0 -> 1, there is no need to flush the TLB 1108 * according to the SDM; however, stale prev_roots could be reused 1109 * incorrectly in the future after a MOV to CR3 with NOFLUSH=1, so we 1110 * free them all. This is *not* a superset of KVM_REQ_TLB_FLUSH_GUEST 1111 * or KVM_REQ_TLB_FLUSH_CURRENT, because the hardware TLB is not flushed, 1112 * so fall through. 1113 */ 1114 if (!tdp_enabled && 1115 (cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) 1116 kvm_mmu_unload(vcpu); 1117 1118 /* 1119 * The TLB has to be flushed for all PCIDs if any of the following 1120 * (architecturally required) changes happen: 1121 * - CR4.PCIDE is changed from 1 to 0 1122 * - CR4.PGE is toggled 1123 * 1124 * This is a superset of KVM_REQ_TLB_FLUSH_CURRENT. 1125 */ 1126 if (((cr4 ^ old_cr4) & X86_CR4_PGE) || 1127 (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE))) 1128 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 1129 1130 /* 1131 * The TLB has to be flushed for the current PCID if any of the 1132 * following (architecturally required) changes happen: 1133 * - CR4.SMEP is changed from 0 to 1 1134 * - CR4.PAE is toggled 1135 */ 1136 else if (((cr4 ^ old_cr4) & X86_CR4_PAE) || 1137 ((cr4 & X86_CR4_SMEP) && !(old_cr4 & X86_CR4_SMEP))) 1138 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); 1139 1140 } 1141 EXPORT_SYMBOL_GPL(kvm_post_set_cr4); 1142 1143 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 1144 { 1145 unsigned long old_cr4 = kvm_read_cr4(vcpu); 1146 1147 if (!kvm_is_valid_cr4(vcpu, cr4)) 1148 return 1; 1149 1150 if (is_long_mode(vcpu)) { 1151 if (!(cr4 & X86_CR4_PAE)) 1152 return 1; 1153 if ((cr4 ^ old_cr4) & X86_CR4_LA57) 1154 return 1; 1155 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) 1156 && ((cr4 ^ old_cr4) & X86_CR4_PDPTR_BITS) 1157 && !load_pdptrs(vcpu, kvm_read_cr3(vcpu))) 1158 return 1; 1159 1160 if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) { 1161 if (!guest_cpuid_has(vcpu, X86_FEATURE_PCID)) 1162 return 1; 1163 1164 /* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */ 1165 if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu)) 1166 return 1; 1167 } 1168 1169 static_call(kvm_x86_set_cr4)(vcpu, cr4); 1170 1171 kvm_post_set_cr4(vcpu, old_cr4, cr4); 1172 1173 return 0; 1174 } 1175 EXPORT_SYMBOL_GPL(kvm_set_cr4); 1176 1177 static void kvm_invalidate_pcid(struct kvm_vcpu *vcpu, unsigned long pcid) 1178 { 1179 struct kvm_mmu *mmu = vcpu->arch.mmu; 1180 unsigned long roots_to_free = 0; 1181 int i; 1182 1183 /* 1184 * MOV CR3 and INVPCID are usually not intercepted when using TDP, but 1185 * this is reachable when running EPT=1 and unrestricted_guest=0, and 1186 * also via the emulator. KVM's TDP page tables are not in the scope of 1187 * the invalidation, but the guest's TLB entries need to be flushed as 1188 * the CPU may have cached entries in its TLB for the target PCID. 1189 */ 1190 if (unlikely(tdp_enabled)) { 1191 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 1192 return; 1193 } 1194 1195 /* 1196 * If neither the current CR3 nor any of the prev_roots use the given 1197 * PCID, then nothing needs to be done here because a resync will 1198 * happen anyway before switching to any other CR3. 1199 */ 1200 if (kvm_get_active_pcid(vcpu) == pcid) { 1201 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); 1202 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); 1203 } 1204 1205 /* 1206 * If PCID is disabled, there is no need to free prev_roots even if the 1207 * PCIDs for them are also 0, because MOV to CR3 always flushes the TLB 1208 * with PCIDE=0. 1209 */ 1210 if (!kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)) 1211 return; 1212 1213 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) 1214 if (kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd) == pcid) 1215 roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i); 1216 1217 kvm_mmu_free_roots(vcpu->kvm, mmu, roots_to_free); 1218 } 1219 1220 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) 1221 { 1222 bool skip_tlb_flush = false; 1223 unsigned long pcid = 0; 1224 #ifdef CONFIG_X86_64 1225 bool pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE); 1226 1227 if (pcid_enabled) { 1228 skip_tlb_flush = cr3 & X86_CR3_PCID_NOFLUSH; 1229 cr3 &= ~X86_CR3_PCID_NOFLUSH; 1230 pcid = cr3 & X86_CR3_PCID_MASK; 1231 } 1232 #endif 1233 1234 /* PDPTRs are always reloaded for PAE paging. */ 1235 if (cr3 == kvm_read_cr3(vcpu) && !is_pae_paging(vcpu)) 1236 goto handle_tlb_flush; 1237 1238 /* 1239 * Do not condition the GPA check on long mode, this helper is used to 1240 * stuff CR3, e.g. for RSM emulation, and there is no guarantee that 1241 * the current vCPU mode is accurate. 1242 */ 1243 if (kvm_vcpu_is_illegal_gpa(vcpu, cr3)) 1244 return 1; 1245 1246 if (is_pae_paging(vcpu) && !load_pdptrs(vcpu, cr3)) 1247 return 1; 1248 1249 if (cr3 != kvm_read_cr3(vcpu)) 1250 kvm_mmu_new_pgd(vcpu, cr3); 1251 1252 vcpu->arch.cr3 = cr3; 1253 kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); 1254 /* Do not call post_set_cr3, we do not get here for confidential guests. */ 1255 1256 handle_tlb_flush: 1257 /* 1258 * A load of CR3 that flushes the TLB flushes only the current PCID, 1259 * even if PCID is disabled, in which case PCID=0 is flushed. It's a 1260 * moot point in the end because _disabling_ PCID will flush all PCIDs, 1261 * and it's impossible to use a non-zero PCID when PCID is disabled, 1262 * i.e. only PCID=0 can be relevant. 1263 */ 1264 if (!skip_tlb_flush) 1265 kvm_invalidate_pcid(vcpu, pcid); 1266 1267 return 0; 1268 } 1269 EXPORT_SYMBOL_GPL(kvm_set_cr3); 1270 1271 int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) 1272 { 1273 if (cr8 & CR8_RESERVED_BITS) 1274 return 1; 1275 if (lapic_in_kernel(vcpu)) 1276 kvm_lapic_set_tpr(vcpu, cr8); 1277 else 1278 vcpu->arch.cr8 = cr8; 1279 return 0; 1280 } 1281 EXPORT_SYMBOL_GPL(kvm_set_cr8); 1282 1283 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu) 1284 { 1285 if (lapic_in_kernel(vcpu)) 1286 return kvm_lapic_get_cr8(vcpu); 1287 else 1288 return vcpu->arch.cr8; 1289 } 1290 EXPORT_SYMBOL_GPL(kvm_get_cr8); 1291 1292 static void kvm_update_dr0123(struct kvm_vcpu *vcpu) 1293 { 1294 int i; 1295 1296 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) { 1297 for (i = 0; i < KVM_NR_DB_REGS; i++) 1298 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; 1299 } 1300 } 1301 1302 void kvm_update_dr7(struct kvm_vcpu *vcpu) 1303 { 1304 unsigned long dr7; 1305 1306 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) 1307 dr7 = vcpu->arch.guest_debug_dr7; 1308 else 1309 dr7 = vcpu->arch.dr7; 1310 static_call(kvm_x86_set_dr7)(vcpu, dr7); 1311 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED; 1312 if (dr7 & DR7_BP_EN_MASK) 1313 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED; 1314 } 1315 EXPORT_SYMBOL_GPL(kvm_update_dr7); 1316 1317 static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu) 1318 { 1319 u64 fixed = DR6_FIXED_1; 1320 1321 if (!guest_cpuid_has(vcpu, X86_FEATURE_RTM)) 1322 fixed |= DR6_RTM; 1323 1324 if (!guest_cpuid_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT)) 1325 fixed |= DR6_BUS_LOCK; 1326 return fixed; 1327 } 1328 1329 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) 1330 { 1331 size_t size = ARRAY_SIZE(vcpu->arch.db); 1332 1333 switch (dr) { 1334 case 0 ... 3: 1335 vcpu->arch.db[array_index_nospec(dr, size)] = val; 1336 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) 1337 vcpu->arch.eff_db[dr] = val; 1338 break; 1339 case 4: 1340 case 6: 1341 if (!kvm_dr6_valid(val)) 1342 return 1; /* #GP */ 1343 vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu); 1344 break; 1345 case 5: 1346 default: /* 7 */ 1347 if (!kvm_dr7_valid(val)) 1348 return 1; /* #GP */ 1349 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; 1350 kvm_update_dr7(vcpu); 1351 break; 1352 } 1353 1354 return 0; 1355 } 1356 EXPORT_SYMBOL_GPL(kvm_set_dr); 1357 1358 void kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) 1359 { 1360 size_t size = ARRAY_SIZE(vcpu->arch.db); 1361 1362 switch (dr) { 1363 case 0 ... 3: 1364 *val = vcpu->arch.db[array_index_nospec(dr, size)]; 1365 break; 1366 case 4: 1367 case 6: 1368 *val = vcpu->arch.dr6; 1369 break; 1370 case 5: 1371 default: /* 7 */ 1372 *val = vcpu->arch.dr7; 1373 break; 1374 } 1375 } 1376 EXPORT_SYMBOL_GPL(kvm_get_dr); 1377 1378 int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu) 1379 { 1380 u32 ecx = kvm_rcx_read(vcpu); 1381 u64 data; 1382 1383 if (kvm_pmu_rdpmc(vcpu, ecx, &data)) { 1384 kvm_inject_gp(vcpu, 0); 1385 return 1; 1386 } 1387 1388 kvm_rax_write(vcpu, (u32)data); 1389 kvm_rdx_write(vcpu, data >> 32); 1390 return kvm_skip_emulated_instruction(vcpu); 1391 } 1392 EXPORT_SYMBOL_GPL(kvm_emulate_rdpmc); 1393 1394 /* 1395 * List of msr numbers which we expose to userspace through KVM_GET_MSRS 1396 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. 1397 * 1398 * The three MSR lists(msrs_to_save, emulated_msrs, msr_based_features) 1399 * extract the supported MSRs from the related const lists. 1400 * msrs_to_save is selected from the msrs_to_save_all to reflect the 1401 * capabilities of the host cpu. This capabilities test skips MSRs that are 1402 * kvm-specific. Those are put in emulated_msrs_all; filtering of emulated_msrs 1403 * may depend on host virtualization features rather than host cpu features. 1404 */ 1405 1406 static const u32 msrs_to_save_all[] = { 1407 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, 1408 MSR_STAR, 1409 #ifdef CONFIG_X86_64 1410 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR, 1411 #endif 1412 MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA, 1413 MSR_IA32_FEAT_CTL, MSR_IA32_BNDCFGS, MSR_TSC_AUX, 1414 MSR_IA32_SPEC_CTRL, 1415 MSR_IA32_RTIT_CTL, MSR_IA32_RTIT_STATUS, MSR_IA32_RTIT_CR3_MATCH, 1416 MSR_IA32_RTIT_OUTPUT_BASE, MSR_IA32_RTIT_OUTPUT_MASK, 1417 MSR_IA32_RTIT_ADDR0_A, MSR_IA32_RTIT_ADDR0_B, 1418 MSR_IA32_RTIT_ADDR1_A, MSR_IA32_RTIT_ADDR1_B, 1419 MSR_IA32_RTIT_ADDR2_A, MSR_IA32_RTIT_ADDR2_B, 1420 MSR_IA32_RTIT_ADDR3_A, MSR_IA32_RTIT_ADDR3_B, 1421 MSR_IA32_UMWAIT_CONTROL, 1422 1423 MSR_ARCH_PERFMON_FIXED_CTR0, MSR_ARCH_PERFMON_FIXED_CTR1, 1424 MSR_ARCH_PERFMON_FIXED_CTR0 + 2, 1425 MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_STATUS, 1426 MSR_CORE_PERF_GLOBAL_CTRL, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 1427 MSR_ARCH_PERFMON_PERFCTR0, MSR_ARCH_PERFMON_PERFCTR1, 1428 MSR_ARCH_PERFMON_PERFCTR0 + 2, MSR_ARCH_PERFMON_PERFCTR0 + 3, 1429 MSR_ARCH_PERFMON_PERFCTR0 + 4, MSR_ARCH_PERFMON_PERFCTR0 + 5, 1430 MSR_ARCH_PERFMON_PERFCTR0 + 6, MSR_ARCH_PERFMON_PERFCTR0 + 7, 1431 MSR_ARCH_PERFMON_PERFCTR0 + 8, MSR_ARCH_PERFMON_PERFCTR0 + 9, 1432 MSR_ARCH_PERFMON_PERFCTR0 + 10, MSR_ARCH_PERFMON_PERFCTR0 + 11, 1433 MSR_ARCH_PERFMON_PERFCTR0 + 12, MSR_ARCH_PERFMON_PERFCTR0 + 13, 1434 MSR_ARCH_PERFMON_PERFCTR0 + 14, MSR_ARCH_PERFMON_PERFCTR0 + 15, 1435 MSR_ARCH_PERFMON_PERFCTR0 + 16, MSR_ARCH_PERFMON_PERFCTR0 + 17, 1436 MSR_ARCH_PERFMON_EVENTSEL0, MSR_ARCH_PERFMON_EVENTSEL1, 1437 MSR_ARCH_PERFMON_EVENTSEL0 + 2, MSR_ARCH_PERFMON_EVENTSEL0 + 3, 1438 MSR_ARCH_PERFMON_EVENTSEL0 + 4, MSR_ARCH_PERFMON_EVENTSEL0 + 5, 1439 MSR_ARCH_PERFMON_EVENTSEL0 + 6, MSR_ARCH_PERFMON_EVENTSEL0 + 7, 1440 MSR_ARCH_PERFMON_EVENTSEL0 + 8, MSR_ARCH_PERFMON_EVENTSEL0 + 9, 1441 MSR_ARCH_PERFMON_EVENTSEL0 + 10, MSR_ARCH_PERFMON_EVENTSEL0 + 11, 1442 MSR_ARCH_PERFMON_EVENTSEL0 + 12, MSR_ARCH_PERFMON_EVENTSEL0 + 13, 1443 MSR_ARCH_PERFMON_EVENTSEL0 + 14, MSR_ARCH_PERFMON_EVENTSEL0 + 15, 1444 MSR_ARCH_PERFMON_EVENTSEL0 + 16, MSR_ARCH_PERFMON_EVENTSEL0 + 17, 1445 MSR_IA32_PEBS_ENABLE, MSR_IA32_DS_AREA, MSR_PEBS_DATA_CFG, 1446 1447 MSR_K7_EVNTSEL0, MSR_K7_EVNTSEL1, MSR_K7_EVNTSEL2, MSR_K7_EVNTSEL3, 1448 MSR_K7_PERFCTR0, MSR_K7_PERFCTR1, MSR_K7_PERFCTR2, MSR_K7_PERFCTR3, 1449 MSR_F15H_PERF_CTL0, MSR_F15H_PERF_CTL1, MSR_F15H_PERF_CTL2, 1450 MSR_F15H_PERF_CTL3, MSR_F15H_PERF_CTL4, MSR_F15H_PERF_CTL5, 1451 MSR_F15H_PERF_CTR0, MSR_F15H_PERF_CTR1, MSR_F15H_PERF_CTR2, 1452 MSR_F15H_PERF_CTR3, MSR_F15H_PERF_CTR4, MSR_F15H_PERF_CTR5, 1453 MSR_IA32_XFD, MSR_IA32_XFD_ERR, 1454 }; 1455 1456 static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_all)]; 1457 static unsigned num_msrs_to_save; 1458 1459 static const u32 emulated_msrs_all[] = { 1460 MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, 1461 MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW, 1462 HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL, 1463 HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC, 1464 HV_X64_MSR_TSC_FREQUENCY, HV_X64_MSR_APIC_FREQUENCY, 1465 HV_X64_MSR_CRASH_P0, HV_X64_MSR_CRASH_P1, HV_X64_MSR_CRASH_P2, 1466 HV_X64_MSR_CRASH_P3, HV_X64_MSR_CRASH_P4, HV_X64_MSR_CRASH_CTL, 1467 HV_X64_MSR_RESET, 1468 HV_X64_MSR_VP_INDEX, 1469 HV_X64_MSR_VP_RUNTIME, 1470 HV_X64_MSR_SCONTROL, 1471 HV_X64_MSR_STIMER0_CONFIG, 1472 HV_X64_MSR_VP_ASSIST_PAGE, 1473 HV_X64_MSR_REENLIGHTENMENT_CONTROL, HV_X64_MSR_TSC_EMULATION_CONTROL, 1474 HV_X64_MSR_TSC_EMULATION_STATUS, 1475 HV_X64_MSR_SYNDBG_OPTIONS, 1476 HV_X64_MSR_SYNDBG_CONTROL, HV_X64_MSR_SYNDBG_STATUS, 1477 HV_X64_MSR_SYNDBG_SEND_BUFFER, HV_X64_MSR_SYNDBG_RECV_BUFFER, 1478 HV_X64_MSR_SYNDBG_PENDING_BUFFER, 1479 1480 MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME, 1481 MSR_KVM_PV_EOI_EN, MSR_KVM_ASYNC_PF_INT, MSR_KVM_ASYNC_PF_ACK, 1482 1483 MSR_IA32_TSC_ADJUST, 1484 MSR_IA32_TSC_DEADLINE, 1485 MSR_IA32_ARCH_CAPABILITIES, 1486 MSR_IA32_PERF_CAPABILITIES, 1487 MSR_IA32_MISC_ENABLE, 1488 MSR_IA32_MCG_STATUS, 1489 MSR_IA32_MCG_CTL, 1490 MSR_IA32_MCG_EXT_CTL, 1491 MSR_IA32_SMBASE, 1492 MSR_SMI_COUNT, 1493 MSR_PLATFORM_INFO, 1494 MSR_MISC_FEATURES_ENABLES, 1495 MSR_AMD64_VIRT_SPEC_CTRL, 1496 MSR_AMD64_TSC_RATIO, 1497 MSR_IA32_POWER_CTL, 1498 MSR_IA32_UCODE_REV, 1499 1500 /* 1501 * The following list leaves out MSRs whose values are determined 1502 * by arch/x86/kvm/vmx/nested.c based on CPUID or other MSRs. 1503 * We always support the "true" VMX control MSRs, even if the host 1504 * processor does not, so I am putting these registers here rather 1505 * than in msrs_to_save_all. 1506 */ 1507 MSR_IA32_VMX_BASIC, 1508 MSR_IA32_VMX_TRUE_PINBASED_CTLS, 1509 MSR_IA32_VMX_TRUE_PROCBASED_CTLS, 1510 MSR_IA32_VMX_TRUE_EXIT_CTLS, 1511 MSR_IA32_VMX_TRUE_ENTRY_CTLS, 1512 MSR_IA32_VMX_MISC, 1513 MSR_IA32_VMX_CR0_FIXED0, 1514 MSR_IA32_VMX_CR4_FIXED0, 1515 MSR_IA32_VMX_VMCS_ENUM, 1516 MSR_IA32_VMX_PROCBASED_CTLS2, 1517 MSR_IA32_VMX_EPT_VPID_CAP, 1518 MSR_IA32_VMX_VMFUNC, 1519 1520 MSR_K7_HWCR, 1521 MSR_KVM_POLL_CONTROL, 1522 }; 1523 1524 static u32 emulated_msrs[ARRAY_SIZE(emulated_msrs_all)]; 1525 static unsigned num_emulated_msrs; 1526 1527 /* 1528 * List of msr numbers which are used to expose MSR-based features that 1529 * can be used by a hypervisor to validate requested CPU features. 1530 */ 1531 static const u32 msr_based_features_all[] = { 1532 MSR_IA32_VMX_BASIC, 1533 MSR_IA32_VMX_TRUE_PINBASED_CTLS, 1534 MSR_IA32_VMX_PINBASED_CTLS, 1535 MSR_IA32_VMX_TRUE_PROCBASED_CTLS, 1536 MSR_IA32_VMX_PROCBASED_CTLS, 1537 MSR_IA32_VMX_TRUE_EXIT_CTLS, 1538 MSR_IA32_VMX_EXIT_CTLS, 1539 MSR_IA32_VMX_TRUE_ENTRY_CTLS, 1540 MSR_IA32_VMX_ENTRY_CTLS, 1541 MSR_IA32_VMX_MISC, 1542 MSR_IA32_VMX_CR0_FIXED0, 1543 MSR_IA32_VMX_CR0_FIXED1, 1544 MSR_IA32_VMX_CR4_FIXED0, 1545 MSR_IA32_VMX_CR4_FIXED1, 1546 MSR_IA32_VMX_VMCS_ENUM, 1547 MSR_IA32_VMX_PROCBASED_CTLS2, 1548 MSR_IA32_VMX_EPT_VPID_CAP, 1549 MSR_IA32_VMX_VMFUNC, 1550 1551 MSR_F10H_DECFG, 1552 MSR_IA32_UCODE_REV, 1553 MSR_IA32_ARCH_CAPABILITIES, 1554 MSR_IA32_PERF_CAPABILITIES, 1555 }; 1556 1557 static u32 msr_based_features[ARRAY_SIZE(msr_based_features_all)]; 1558 static unsigned int num_msr_based_features; 1559 1560 static u64 kvm_get_arch_capabilities(void) 1561 { 1562 u64 data = 0; 1563 1564 if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) 1565 rdmsrl(MSR_IA32_ARCH_CAPABILITIES, data); 1566 1567 /* 1568 * If nx_huge_pages is enabled, KVM's shadow paging will ensure that 1569 * the nested hypervisor runs with NX huge pages. If it is not, 1570 * L1 is anyway vulnerable to ITLB_MULTIHIT exploits from other 1571 * L1 guests, so it need not worry about its own (L2) guests. 1572 */ 1573 data |= ARCH_CAP_PSCHANGE_MC_NO; 1574 1575 /* 1576 * If we're doing cache flushes (either "always" or "cond") 1577 * we will do one whenever the guest does a vmlaunch/vmresume. 1578 * If an outer hypervisor is doing the cache flush for us 1579 * (VMENTER_L1D_FLUSH_NESTED_VM), we can safely pass that 1580 * capability to the guest too, and if EPT is disabled we're not 1581 * vulnerable. Overall, only VMENTER_L1D_FLUSH_NEVER will 1582 * require a nested hypervisor to do a flush of its own. 1583 */ 1584 if (l1tf_vmx_mitigation != VMENTER_L1D_FLUSH_NEVER) 1585 data |= ARCH_CAP_SKIP_VMENTRY_L1DFLUSH; 1586 1587 if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN)) 1588 data |= ARCH_CAP_RDCL_NO; 1589 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) 1590 data |= ARCH_CAP_SSB_NO; 1591 if (!boot_cpu_has_bug(X86_BUG_MDS)) 1592 data |= ARCH_CAP_MDS_NO; 1593 1594 if (!boot_cpu_has(X86_FEATURE_RTM)) { 1595 /* 1596 * If RTM=0 because the kernel has disabled TSX, the host might 1597 * have TAA_NO or TSX_CTRL. Clear TAA_NO (the guest sees RTM=0 1598 * and therefore knows that there cannot be TAA) but keep 1599 * TSX_CTRL: some buggy userspaces leave it set on tsx=on hosts, 1600 * and we want to allow migrating those guests to tsx=off hosts. 1601 */ 1602 data &= ~ARCH_CAP_TAA_NO; 1603 } else if (!boot_cpu_has_bug(X86_BUG_TAA)) { 1604 data |= ARCH_CAP_TAA_NO; 1605 } else { 1606 /* 1607 * Nothing to do here; we emulate TSX_CTRL if present on the 1608 * host so the guest can choose between disabling TSX or 1609 * using VERW to clear CPU buffers. 1610 */ 1611 } 1612 1613 /* Guests don't need to know "Fill buffer clear control" exists */ 1614 data &= ~ARCH_CAP_FB_CLEAR_CTRL; 1615 1616 return data; 1617 } 1618 1619 static int kvm_get_msr_feature(struct kvm_msr_entry *msr) 1620 { 1621 switch (msr->index) { 1622 case MSR_IA32_ARCH_CAPABILITIES: 1623 msr->data = kvm_get_arch_capabilities(); 1624 break; 1625 case MSR_IA32_UCODE_REV: 1626 rdmsrl_safe(msr->index, &msr->data); 1627 break; 1628 default: 1629 return static_call(kvm_x86_get_msr_feature)(msr); 1630 } 1631 return 0; 1632 } 1633 1634 static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data) 1635 { 1636 struct kvm_msr_entry msr; 1637 int r; 1638 1639 msr.index = index; 1640 r = kvm_get_msr_feature(&msr); 1641 1642 if (r == KVM_MSR_RET_INVALID) { 1643 /* Unconditionally clear the output for simplicity */ 1644 *data = 0; 1645 if (kvm_msr_ignored_check(index, 0, false)) 1646 r = 0; 1647 } 1648 1649 if (r) 1650 return r; 1651 1652 *data = msr.data; 1653 1654 return 0; 1655 } 1656 1657 static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) 1658 { 1659 if (efer & EFER_FFXSR && !guest_cpuid_has(vcpu, X86_FEATURE_FXSR_OPT)) 1660 return false; 1661 1662 if (efer & EFER_SVME && !guest_cpuid_has(vcpu, X86_FEATURE_SVM)) 1663 return false; 1664 1665 if (efer & (EFER_LME | EFER_LMA) && 1666 !guest_cpuid_has(vcpu, X86_FEATURE_LM)) 1667 return false; 1668 1669 if (efer & EFER_NX && !guest_cpuid_has(vcpu, X86_FEATURE_NX)) 1670 return false; 1671 1672 return true; 1673 1674 } 1675 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) 1676 { 1677 if (efer & efer_reserved_bits) 1678 return false; 1679 1680 return __kvm_valid_efer(vcpu, efer); 1681 } 1682 EXPORT_SYMBOL_GPL(kvm_valid_efer); 1683 1684 static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 1685 { 1686 u64 old_efer = vcpu->arch.efer; 1687 u64 efer = msr_info->data; 1688 int r; 1689 1690 if (efer & efer_reserved_bits) 1691 return 1; 1692 1693 if (!msr_info->host_initiated) { 1694 if (!__kvm_valid_efer(vcpu, efer)) 1695 return 1; 1696 1697 if (is_paging(vcpu) && 1698 (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) 1699 return 1; 1700 } 1701 1702 efer &= ~EFER_LMA; 1703 efer |= vcpu->arch.efer & EFER_LMA; 1704 1705 r = static_call(kvm_x86_set_efer)(vcpu, efer); 1706 if (r) { 1707 WARN_ON(r > 0); 1708 return r; 1709 } 1710 1711 if ((efer ^ old_efer) & KVM_MMU_EFER_ROLE_BITS) 1712 kvm_mmu_reset_context(vcpu); 1713 1714 return 0; 1715 } 1716 1717 void kvm_enable_efer_bits(u64 mask) 1718 { 1719 efer_reserved_bits &= ~mask; 1720 } 1721 EXPORT_SYMBOL_GPL(kvm_enable_efer_bits); 1722 1723 bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type) 1724 { 1725 struct kvm_x86_msr_filter *msr_filter; 1726 struct msr_bitmap_range *ranges; 1727 struct kvm *kvm = vcpu->kvm; 1728 bool allowed; 1729 int idx; 1730 u32 i; 1731 1732 /* x2APIC MSRs do not support filtering. */ 1733 if (index >= 0x800 && index <= 0x8ff) 1734 return true; 1735 1736 idx = srcu_read_lock(&kvm->srcu); 1737 1738 msr_filter = srcu_dereference(kvm->arch.msr_filter, &kvm->srcu); 1739 if (!msr_filter) { 1740 allowed = true; 1741 goto out; 1742 } 1743 1744 allowed = msr_filter->default_allow; 1745 ranges = msr_filter->ranges; 1746 1747 for (i = 0; i < msr_filter->count; i++) { 1748 u32 start = ranges[i].base; 1749 u32 end = start + ranges[i].nmsrs; 1750 u32 flags = ranges[i].flags; 1751 unsigned long *bitmap = ranges[i].bitmap; 1752 1753 if ((index >= start) && (index < end) && (flags & type)) { 1754 allowed = !!test_bit(index - start, bitmap); 1755 break; 1756 } 1757 } 1758 1759 out: 1760 srcu_read_unlock(&kvm->srcu, idx); 1761 1762 return allowed; 1763 } 1764 EXPORT_SYMBOL_GPL(kvm_msr_allowed); 1765 1766 /* 1767 * Write @data into the MSR specified by @index. Select MSR specific fault 1768 * checks are bypassed if @host_initiated is %true. 1769 * Returns 0 on success, non-0 otherwise. 1770 * Assumes vcpu_load() was already called. 1771 */ 1772 static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data, 1773 bool host_initiated) 1774 { 1775 struct msr_data msr; 1776 1777 switch (index) { 1778 case MSR_FS_BASE: 1779 case MSR_GS_BASE: 1780 case MSR_KERNEL_GS_BASE: 1781 case MSR_CSTAR: 1782 case MSR_LSTAR: 1783 if (is_noncanonical_address(data, vcpu)) 1784 return 1; 1785 break; 1786 case MSR_IA32_SYSENTER_EIP: 1787 case MSR_IA32_SYSENTER_ESP: 1788 /* 1789 * IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if 1790 * non-canonical address is written on Intel but not on 1791 * AMD (which ignores the top 32-bits, because it does 1792 * not implement 64-bit SYSENTER). 1793 * 1794 * 64-bit code should hence be able to write a non-canonical 1795 * value on AMD. Making the address canonical ensures that 1796 * vmentry does not fail on Intel after writing a non-canonical 1797 * value, and that something deterministic happens if the guest 1798 * invokes 64-bit SYSENTER. 1799 */ 1800 data = __canonical_address(data, vcpu_virt_addr_bits(vcpu)); 1801 break; 1802 case MSR_TSC_AUX: 1803 if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX)) 1804 return 1; 1805 1806 if (!host_initiated && 1807 !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) && 1808 !guest_cpuid_has(vcpu, X86_FEATURE_RDPID)) 1809 return 1; 1810 1811 /* 1812 * Per Intel's SDM, bits 63:32 are reserved, but AMD's APM has 1813 * incomplete and conflicting architectural behavior. Current 1814 * AMD CPUs completely ignore bits 63:32, i.e. they aren't 1815 * reserved and always read as zeros. Enforce Intel's reserved 1816 * bits check if and only if the guest CPU is Intel, and clear 1817 * the bits in all other cases. This ensures cross-vendor 1818 * migration will provide consistent behavior for the guest. 1819 */ 1820 if (guest_cpuid_is_intel(vcpu) && (data >> 32) != 0) 1821 return 1; 1822 1823 data = (u32)data; 1824 break; 1825 } 1826 1827 msr.data = data; 1828 msr.index = index; 1829 msr.host_initiated = host_initiated; 1830 1831 return static_call(kvm_x86_set_msr)(vcpu, &msr); 1832 } 1833 1834 static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu, 1835 u32 index, u64 data, bool host_initiated) 1836 { 1837 int ret = __kvm_set_msr(vcpu, index, data, host_initiated); 1838 1839 if (ret == KVM_MSR_RET_INVALID) 1840 if (kvm_msr_ignored_check(index, data, true)) 1841 ret = 0; 1842 1843 return ret; 1844 } 1845 1846 /* 1847 * Read the MSR specified by @index into @data. Select MSR specific fault 1848 * checks are bypassed if @host_initiated is %true. 1849 * Returns 0 on success, non-0 otherwise. 1850 * Assumes vcpu_load() was already called. 1851 */ 1852 int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, 1853 bool host_initiated) 1854 { 1855 struct msr_data msr; 1856 int ret; 1857 1858 switch (index) { 1859 case MSR_TSC_AUX: 1860 if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX)) 1861 return 1; 1862 1863 if (!host_initiated && 1864 !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) && 1865 !guest_cpuid_has(vcpu, X86_FEATURE_RDPID)) 1866 return 1; 1867 break; 1868 } 1869 1870 msr.index = index; 1871 msr.host_initiated = host_initiated; 1872 1873 ret = static_call(kvm_x86_get_msr)(vcpu, &msr); 1874 if (!ret) 1875 *data = msr.data; 1876 return ret; 1877 } 1878 1879 static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu, 1880 u32 index, u64 *data, bool host_initiated) 1881 { 1882 int ret = __kvm_get_msr(vcpu, index, data, host_initiated); 1883 1884 if (ret == KVM_MSR_RET_INVALID) { 1885 /* Unconditionally clear *data for simplicity */ 1886 *data = 0; 1887 if (kvm_msr_ignored_check(index, 0, false)) 1888 ret = 0; 1889 } 1890 1891 return ret; 1892 } 1893 1894 static int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data) 1895 { 1896 if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_READ)) 1897 return KVM_MSR_RET_FILTERED; 1898 return kvm_get_msr_ignored_check(vcpu, index, data, false); 1899 } 1900 1901 static int kvm_set_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 data) 1902 { 1903 if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_WRITE)) 1904 return KVM_MSR_RET_FILTERED; 1905 return kvm_set_msr_ignored_check(vcpu, index, data, false); 1906 } 1907 1908 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data) 1909 { 1910 return kvm_get_msr_ignored_check(vcpu, index, data, false); 1911 } 1912 EXPORT_SYMBOL_GPL(kvm_get_msr); 1913 1914 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data) 1915 { 1916 return kvm_set_msr_ignored_check(vcpu, index, data, false); 1917 } 1918 EXPORT_SYMBOL_GPL(kvm_set_msr); 1919 1920 static void complete_userspace_rdmsr(struct kvm_vcpu *vcpu) 1921 { 1922 if (!vcpu->run->msr.error) { 1923 kvm_rax_write(vcpu, (u32)vcpu->run->msr.data); 1924 kvm_rdx_write(vcpu, vcpu->run->msr.data >> 32); 1925 } 1926 } 1927 1928 static int complete_emulated_msr_access(struct kvm_vcpu *vcpu) 1929 { 1930 return complete_emulated_insn_gp(vcpu, vcpu->run->msr.error); 1931 } 1932 1933 static int complete_emulated_rdmsr(struct kvm_vcpu *vcpu) 1934 { 1935 complete_userspace_rdmsr(vcpu); 1936 return complete_emulated_msr_access(vcpu); 1937 } 1938 1939 static int complete_fast_msr_access(struct kvm_vcpu *vcpu) 1940 { 1941 return static_call(kvm_x86_complete_emulated_msr)(vcpu, vcpu->run->msr.error); 1942 } 1943 1944 static int complete_fast_rdmsr(struct kvm_vcpu *vcpu) 1945 { 1946 complete_userspace_rdmsr(vcpu); 1947 return complete_fast_msr_access(vcpu); 1948 } 1949 1950 static u64 kvm_msr_reason(int r) 1951 { 1952 switch (r) { 1953 case KVM_MSR_RET_INVALID: 1954 return KVM_MSR_EXIT_REASON_UNKNOWN; 1955 case KVM_MSR_RET_FILTERED: 1956 return KVM_MSR_EXIT_REASON_FILTER; 1957 default: 1958 return KVM_MSR_EXIT_REASON_INVAL; 1959 } 1960 } 1961 1962 static int kvm_msr_user_space(struct kvm_vcpu *vcpu, u32 index, 1963 u32 exit_reason, u64 data, 1964 int (*completion)(struct kvm_vcpu *vcpu), 1965 int r) 1966 { 1967 u64 msr_reason = kvm_msr_reason(r); 1968 1969 /* Check if the user wanted to know about this MSR fault */ 1970 if (!(vcpu->kvm->arch.user_space_msr_mask & msr_reason)) 1971 return 0; 1972 1973 vcpu->run->exit_reason = exit_reason; 1974 vcpu->run->msr.error = 0; 1975 memset(vcpu->run->msr.pad, 0, sizeof(vcpu->run->msr.pad)); 1976 vcpu->run->msr.reason = msr_reason; 1977 vcpu->run->msr.index = index; 1978 vcpu->run->msr.data = data; 1979 vcpu->arch.complete_userspace_io = completion; 1980 1981 return 1; 1982 } 1983 1984 int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu) 1985 { 1986 u32 ecx = kvm_rcx_read(vcpu); 1987 u64 data; 1988 int r; 1989 1990 r = kvm_get_msr_with_filter(vcpu, ecx, &data); 1991 1992 if (!r) { 1993 trace_kvm_msr_read(ecx, data); 1994 1995 kvm_rax_write(vcpu, data & -1u); 1996 kvm_rdx_write(vcpu, (data >> 32) & -1u); 1997 } else { 1998 /* MSR read failed? See if we should ask user space */ 1999 if (kvm_msr_user_space(vcpu, ecx, KVM_EXIT_X86_RDMSR, 0, 2000 complete_fast_rdmsr, r)) 2001 return 0; 2002 trace_kvm_msr_read_ex(ecx); 2003 } 2004 2005 return static_call(kvm_x86_complete_emulated_msr)(vcpu, r); 2006 } 2007 EXPORT_SYMBOL_GPL(kvm_emulate_rdmsr); 2008 2009 int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu) 2010 { 2011 u32 ecx = kvm_rcx_read(vcpu); 2012 u64 data = kvm_read_edx_eax(vcpu); 2013 int r; 2014 2015 r = kvm_set_msr_with_filter(vcpu, ecx, data); 2016 2017 if (!r) { 2018 trace_kvm_msr_write(ecx, data); 2019 } else { 2020 /* MSR write failed? See if we should ask user space */ 2021 if (kvm_msr_user_space(vcpu, ecx, KVM_EXIT_X86_WRMSR, data, 2022 complete_fast_msr_access, r)) 2023 return 0; 2024 /* Signal all other negative errors to userspace */ 2025 if (r < 0) 2026 return r; 2027 trace_kvm_msr_write_ex(ecx, data); 2028 } 2029 2030 return static_call(kvm_x86_complete_emulated_msr)(vcpu, r); 2031 } 2032 EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr); 2033 2034 int kvm_emulate_as_nop(struct kvm_vcpu *vcpu) 2035 { 2036 return kvm_skip_emulated_instruction(vcpu); 2037 } 2038 EXPORT_SYMBOL_GPL(kvm_emulate_as_nop); 2039 2040 int kvm_emulate_invd(struct kvm_vcpu *vcpu) 2041 { 2042 /* Treat an INVD instruction as a NOP and just skip it. */ 2043 return kvm_emulate_as_nop(vcpu); 2044 } 2045 EXPORT_SYMBOL_GPL(kvm_emulate_invd); 2046 2047 int kvm_handle_invalid_op(struct kvm_vcpu *vcpu) 2048 { 2049 kvm_queue_exception(vcpu, UD_VECTOR); 2050 return 1; 2051 } 2052 EXPORT_SYMBOL_GPL(kvm_handle_invalid_op); 2053 2054 2055 static int kvm_emulate_monitor_mwait(struct kvm_vcpu *vcpu, const char *insn) 2056 { 2057 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS) && 2058 !guest_cpuid_has(vcpu, X86_FEATURE_MWAIT)) 2059 return kvm_handle_invalid_op(vcpu); 2060 2061 pr_warn_once("kvm: %s instruction emulated as NOP!\n", insn); 2062 return kvm_emulate_as_nop(vcpu); 2063 } 2064 int kvm_emulate_mwait(struct kvm_vcpu *vcpu) 2065 { 2066 return kvm_emulate_monitor_mwait(vcpu, "MWAIT"); 2067 } 2068 EXPORT_SYMBOL_GPL(kvm_emulate_mwait); 2069 2070 int kvm_emulate_monitor(struct kvm_vcpu *vcpu) 2071 { 2072 return kvm_emulate_monitor_mwait(vcpu, "MONITOR"); 2073 } 2074 EXPORT_SYMBOL_GPL(kvm_emulate_monitor); 2075 2076 static inline bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu) 2077 { 2078 xfer_to_guest_mode_prepare(); 2079 return vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu) || 2080 xfer_to_guest_mode_work_pending(); 2081 } 2082 2083 /* 2084 * The fast path for frequent and performance sensitive wrmsr emulation, 2085 * i.e. the sending of IPI, sending IPI early in the VM-Exit flow reduces 2086 * the latency of virtual IPI by avoiding the expensive bits of transitioning 2087 * from guest to host, e.g. reacquiring KVM's SRCU lock. In contrast to the 2088 * other cases which must be called after interrupts are enabled on the host. 2089 */ 2090 static int handle_fastpath_set_x2apic_icr_irqoff(struct kvm_vcpu *vcpu, u64 data) 2091 { 2092 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(vcpu->arch.apic)) 2093 return 1; 2094 2095 if (((data & APIC_SHORT_MASK) == APIC_DEST_NOSHORT) && 2096 ((data & APIC_DEST_MASK) == APIC_DEST_PHYSICAL) && 2097 ((data & APIC_MODE_MASK) == APIC_DM_FIXED) && 2098 ((u32)(data >> 32) != X2APIC_BROADCAST)) 2099 return kvm_x2apic_icr_write(vcpu->arch.apic, data); 2100 2101 return 1; 2102 } 2103 2104 static int handle_fastpath_set_tscdeadline(struct kvm_vcpu *vcpu, u64 data) 2105 { 2106 if (!kvm_can_use_hv_timer(vcpu)) 2107 return 1; 2108 2109 kvm_set_lapic_tscdeadline_msr(vcpu, data); 2110 return 0; 2111 } 2112 2113 fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu) 2114 { 2115 u32 msr = kvm_rcx_read(vcpu); 2116 u64 data; 2117 fastpath_t ret = EXIT_FASTPATH_NONE; 2118 2119 switch (msr) { 2120 case APIC_BASE_MSR + (APIC_ICR >> 4): 2121 data = kvm_read_edx_eax(vcpu); 2122 if (!handle_fastpath_set_x2apic_icr_irqoff(vcpu, data)) { 2123 kvm_skip_emulated_instruction(vcpu); 2124 ret = EXIT_FASTPATH_EXIT_HANDLED; 2125 } 2126 break; 2127 case MSR_IA32_TSC_DEADLINE: 2128 data = kvm_read_edx_eax(vcpu); 2129 if (!handle_fastpath_set_tscdeadline(vcpu, data)) { 2130 kvm_skip_emulated_instruction(vcpu); 2131 ret = EXIT_FASTPATH_REENTER_GUEST; 2132 } 2133 break; 2134 default: 2135 break; 2136 } 2137 2138 if (ret != EXIT_FASTPATH_NONE) 2139 trace_kvm_msr_write(msr, data); 2140 2141 return ret; 2142 } 2143 EXPORT_SYMBOL_GPL(handle_fastpath_set_msr_irqoff); 2144 2145 /* 2146 * Adapt set_msr() to msr_io()'s calling convention 2147 */ 2148 static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) 2149 { 2150 return kvm_get_msr_ignored_check(vcpu, index, data, true); 2151 } 2152 2153 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) 2154 { 2155 return kvm_set_msr_ignored_check(vcpu, index, *data, true); 2156 } 2157 2158 #ifdef CONFIG_X86_64 2159 struct pvclock_clock { 2160 int vclock_mode; 2161 u64 cycle_last; 2162 u64 mask; 2163 u32 mult; 2164 u32 shift; 2165 u64 base_cycles; 2166 u64 offset; 2167 }; 2168 2169 struct pvclock_gtod_data { 2170 seqcount_t seq; 2171 2172 struct pvclock_clock clock; /* extract of a clocksource struct */ 2173 struct pvclock_clock raw_clock; /* extract of a clocksource struct */ 2174 2175 ktime_t offs_boot; 2176 u64 wall_time_sec; 2177 }; 2178 2179 static struct pvclock_gtod_data pvclock_gtod_data; 2180 2181 static void update_pvclock_gtod(struct timekeeper *tk) 2182 { 2183 struct pvclock_gtod_data *vdata = &pvclock_gtod_data; 2184 2185 write_seqcount_begin(&vdata->seq); 2186 2187 /* copy pvclock gtod data */ 2188 vdata->clock.vclock_mode = tk->tkr_mono.clock->vdso_clock_mode; 2189 vdata->clock.cycle_last = tk->tkr_mono.cycle_last; 2190 vdata->clock.mask = tk->tkr_mono.mask; 2191 vdata->clock.mult = tk->tkr_mono.mult; 2192 vdata->clock.shift = tk->tkr_mono.shift; 2193 vdata->clock.base_cycles = tk->tkr_mono.xtime_nsec; 2194 vdata->clock.offset = tk->tkr_mono.base; 2195 2196 vdata->raw_clock.vclock_mode = tk->tkr_raw.clock->vdso_clock_mode; 2197 vdata->raw_clock.cycle_last = tk->tkr_raw.cycle_last; 2198 vdata->raw_clock.mask = tk->tkr_raw.mask; 2199 vdata->raw_clock.mult = tk->tkr_raw.mult; 2200 vdata->raw_clock.shift = tk->tkr_raw.shift; 2201 vdata->raw_clock.base_cycles = tk->tkr_raw.xtime_nsec; 2202 vdata->raw_clock.offset = tk->tkr_raw.base; 2203 2204 vdata->wall_time_sec = tk->xtime_sec; 2205 2206 vdata->offs_boot = tk->offs_boot; 2207 2208 write_seqcount_end(&vdata->seq); 2209 } 2210 2211 static s64 get_kvmclock_base_ns(void) 2212 { 2213 /* Count up from boot time, but with the frequency of the raw clock. */ 2214 return ktime_to_ns(ktime_add(ktime_get_raw(), pvclock_gtod_data.offs_boot)); 2215 } 2216 #else 2217 static s64 get_kvmclock_base_ns(void) 2218 { 2219 /* Master clock not used, so we can just use CLOCK_BOOTTIME. */ 2220 return ktime_get_boottime_ns(); 2221 } 2222 #endif 2223 2224 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock, int sec_hi_ofs) 2225 { 2226 int version; 2227 int r; 2228 struct pvclock_wall_clock wc; 2229 u32 wc_sec_hi; 2230 u64 wall_nsec; 2231 2232 if (!wall_clock) 2233 return; 2234 2235 r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version)); 2236 if (r) 2237 return; 2238 2239 if (version & 1) 2240 ++version; /* first time write, random junk */ 2241 2242 ++version; 2243 2244 if (kvm_write_guest(kvm, wall_clock, &version, sizeof(version))) 2245 return; 2246 2247 /* 2248 * The guest calculates current wall clock time by adding 2249 * system time (updated by kvm_guest_time_update below) to the 2250 * wall clock specified here. We do the reverse here. 2251 */ 2252 wall_nsec = ktime_get_real_ns() - get_kvmclock_ns(kvm); 2253 2254 wc.nsec = do_div(wall_nsec, 1000000000); 2255 wc.sec = (u32)wall_nsec; /* overflow in 2106 guest time */ 2256 wc.version = version; 2257 2258 kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc)); 2259 2260 if (sec_hi_ofs) { 2261 wc_sec_hi = wall_nsec >> 32; 2262 kvm_write_guest(kvm, wall_clock + sec_hi_ofs, 2263 &wc_sec_hi, sizeof(wc_sec_hi)); 2264 } 2265 2266 version++; 2267 kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); 2268 } 2269 2270 static void kvm_write_system_time(struct kvm_vcpu *vcpu, gpa_t system_time, 2271 bool old_msr, bool host_initiated) 2272 { 2273 struct kvm_arch *ka = &vcpu->kvm->arch; 2274 2275 if (vcpu->vcpu_id == 0 && !host_initiated) { 2276 if (ka->boot_vcpu_runs_old_kvmclock != old_msr) 2277 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); 2278 2279 ka->boot_vcpu_runs_old_kvmclock = old_msr; 2280 } 2281 2282 vcpu->arch.time = system_time; 2283 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu); 2284 2285 /* we verify if the enable bit is set... */ 2286 if (system_time & 1) { 2287 kvm_gfn_to_pfn_cache_init(vcpu->kvm, &vcpu->arch.pv_time, vcpu, 2288 KVM_HOST_USES_PFN, system_time & ~1ULL, 2289 sizeof(struct pvclock_vcpu_time_info)); 2290 } else { 2291 kvm_gfn_to_pfn_cache_destroy(vcpu->kvm, &vcpu->arch.pv_time); 2292 } 2293 2294 return; 2295 } 2296 2297 static uint32_t div_frac(uint32_t dividend, uint32_t divisor) 2298 { 2299 do_shl32_div32(dividend, divisor); 2300 return dividend; 2301 } 2302 2303 static void kvm_get_time_scale(uint64_t scaled_hz, uint64_t base_hz, 2304 s8 *pshift, u32 *pmultiplier) 2305 { 2306 uint64_t scaled64; 2307 int32_t shift = 0; 2308 uint64_t tps64; 2309 uint32_t tps32; 2310 2311 tps64 = base_hz; 2312 scaled64 = scaled_hz; 2313 while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) { 2314 tps64 >>= 1; 2315 shift--; 2316 } 2317 2318 tps32 = (uint32_t)tps64; 2319 while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) { 2320 if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000) 2321 scaled64 >>= 1; 2322 else 2323 tps32 <<= 1; 2324 shift++; 2325 } 2326 2327 *pshift = shift; 2328 *pmultiplier = div_frac(scaled64, tps32); 2329 } 2330 2331 #ifdef CONFIG_X86_64 2332 static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0); 2333 #endif 2334 2335 static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz); 2336 static unsigned long max_tsc_khz; 2337 2338 static u32 adjust_tsc_khz(u32 khz, s32 ppm) 2339 { 2340 u64 v = (u64)khz * (1000000 + ppm); 2341 do_div(v, 1000000); 2342 return v; 2343 } 2344 2345 static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier); 2346 2347 static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale) 2348 { 2349 u64 ratio; 2350 2351 /* Guest TSC same frequency as host TSC? */ 2352 if (!scale) { 2353 kvm_vcpu_write_tsc_multiplier(vcpu, kvm_caps.default_tsc_scaling_ratio); 2354 return 0; 2355 } 2356 2357 /* TSC scaling supported? */ 2358 if (!kvm_caps.has_tsc_control) { 2359 if (user_tsc_khz > tsc_khz) { 2360 vcpu->arch.tsc_catchup = 1; 2361 vcpu->arch.tsc_always_catchup = 1; 2362 return 0; 2363 } else { 2364 pr_warn_ratelimited("user requested TSC rate below hardware speed\n"); 2365 return -1; 2366 } 2367 } 2368 2369 /* TSC scaling required - calculate ratio */ 2370 ratio = mul_u64_u32_div(1ULL << kvm_caps.tsc_scaling_ratio_frac_bits, 2371 user_tsc_khz, tsc_khz); 2372 2373 if (ratio == 0 || ratio >= kvm_caps.max_tsc_scaling_ratio) { 2374 pr_warn_ratelimited("Invalid TSC scaling ratio - virtual-tsc-khz=%u\n", 2375 user_tsc_khz); 2376 return -1; 2377 } 2378 2379 kvm_vcpu_write_tsc_multiplier(vcpu, ratio); 2380 return 0; 2381 } 2382 2383 static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz) 2384 { 2385 u32 thresh_lo, thresh_hi; 2386 int use_scaling = 0; 2387 2388 /* tsc_khz can be zero if TSC calibration fails */ 2389 if (user_tsc_khz == 0) { 2390 /* set tsc_scaling_ratio to a safe value */ 2391 kvm_vcpu_write_tsc_multiplier(vcpu, kvm_caps.default_tsc_scaling_ratio); 2392 return -1; 2393 } 2394 2395 /* Compute a scale to convert nanoseconds in TSC cycles */ 2396 kvm_get_time_scale(user_tsc_khz * 1000LL, NSEC_PER_SEC, 2397 &vcpu->arch.virtual_tsc_shift, 2398 &vcpu->arch.virtual_tsc_mult); 2399 vcpu->arch.virtual_tsc_khz = user_tsc_khz; 2400 2401 /* 2402 * Compute the variation in TSC rate which is acceptable 2403 * within the range of tolerance and decide if the 2404 * rate being applied is within that bounds of the hardware 2405 * rate. If so, no scaling or compensation need be done. 2406 */ 2407 thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm); 2408 thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm); 2409 if (user_tsc_khz < thresh_lo || user_tsc_khz > thresh_hi) { 2410 pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", user_tsc_khz, thresh_lo, thresh_hi); 2411 use_scaling = 1; 2412 } 2413 return set_tsc_khz(vcpu, user_tsc_khz, use_scaling); 2414 } 2415 2416 static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns) 2417 { 2418 u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec, 2419 vcpu->arch.virtual_tsc_mult, 2420 vcpu->arch.virtual_tsc_shift); 2421 tsc += vcpu->arch.this_tsc_write; 2422 return tsc; 2423 } 2424 2425 #ifdef CONFIG_X86_64 2426 static inline int gtod_is_based_on_tsc(int mode) 2427 { 2428 return mode == VDSO_CLOCKMODE_TSC || mode == VDSO_CLOCKMODE_HVCLOCK; 2429 } 2430 #endif 2431 2432 static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu) 2433 { 2434 #ifdef CONFIG_X86_64 2435 bool vcpus_matched; 2436 struct kvm_arch *ka = &vcpu->kvm->arch; 2437 struct pvclock_gtod_data *gtod = &pvclock_gtod_data; 2438 2439 vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == 2440 atomic_read(&vcpu->kvm->online_vcpus)); 2441 2442 /* 2443 * Once the masterclock is enabled, always perform request in 2444 * order to update it. 2445 * 2446 * In order to enable masterclock, the host clocksource must be TSC 2447 * and the vcpus need to have matched TSCs. When that happens, 2448 * perform request to enable masterclock. 2449 */ 2450 if (ka->use_master_clock || 2451 (gtod_is_based_on_tsc(gtod->clock.vclock_mode) && vcpus_matched)) 2452 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); 2453 2454 trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc, 2455 atomic_read(&vcpu->kvm->online_vcpus), 2456 ka->use_master_clock, gtod->clock.vclock_mode); 2457 #endif 2458 } 2459 2460 /* 2461 * Multiply tsc by a fixed point number represented by ratio. 2462 * 2463 * The most significant 64-N bits (mult) of ratio represent the 2464 * integral part of the fixed point number; the remaining N bits 2465 * (frac) represent the fractional part, ie. ratio represents a fixed 2466 * point number (mult + frac * 2^(-N)). 2467 * 2468 * N equals to kvm_caps.tsc_scaling_ratio_frac_bits. 2469 */ 2470 static inline u64 __scale_tsc(u64 ratio, u64 tsc) 2471 { 2472 return mul_u64_u64_shr(tsc, ratio, kvm_caps.tsc_scaling_ratio_frac_bits); 2473 } 2474 2475 u64 kvm_scale_tsc(u64 tsc, u64 ratio) 2476 { 2477 u64 _tsc = tsc; 2478 2479 if (ratio != kvm_caps.default_tsc_scaling_ratio) 2480 _tsc = __scale_tsc(ratio, tsc); 2481 2482 return _tsc; 2483 } 2484 EXPORT_SYMBOL_GPL(kvm_scale_tsc); 2485 2486 static u64 kvm_compute_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) 2487 { 2488 u64 tsc; 2489 2490 tsc = kvm_scale_tsc(rdtsc(), vcpu->arch.l1_tsc_scaling_ratio); 2491 2492 return target_tsc - tsc; 2493 } 2494 2495 u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc) 2496 { 2497 return vcpu->arch.l1_tsc_offset + 2498 kvm_scale_tsc(host_tsc, vcpu->arch.l1_tsc_scaling_ratio); 2499 } 2500 EXPORT_SYMBOL_GPL(kvm_read_l1_tsc); 2501 2502 u64 kvm_calc_nested_tsc_offset(u64 l1_offset, u64 l2_offset, u64 l2_multiplier) 2503 { 2504 u64 nested_offset; 2505 2506 if (l2_multiplier == kvm_caps.default_tsc_scaling_ratio) 2507 nested_offset = l1_offset; 2508 else 2509 nested_offset = mul_s64_u64_shr((s64) l1_offset, l2_multiplier, 2510 kvm_caps.tsc_scaling_ratio_frac_bits); 2511 2512 nested_offset += l2_offset; 2513 return nested_offset; 2514 } 2515 EXPORT_SYMBOL_GPL(kvm_calc_nested_tsc_offset); 2516 2517 u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier) 2518 { 2519 if (l2_multiplier != kvm_caps.default_tsc_scaling_ratio) 2520 return mul_u64_u64_shr(l1_multiplier, l2_multiplier, 2521 kvm_caps.tsc_scaling_ratio_frac_bits); 2522 2523 return l1_multiplier; 2524 } 2525 EXPORT_SYMBOL_GPL(kvm_calc_nested_tsc_multiplier); 2526 2527 static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 l1_offset) 2528 { 2529 trace_kvm_write_tsc_offset(vcpu->vcpu_id, 2530 vcpu->arch.l1_tsc_offset, 2531 l1_offset); 2532 2533 vcpu->arch.l1_tsc_offset = l1_offset; 2534 2535 /* 2536 * If we are here because L1 chose not to trap WRMSR to TSC then 2537 * according to the spec this should set L1's TSC (as opposed to 2538 * setting L1's offset for L2). 2539 */ 2540 if (is_guest_mode(vcpu)) 2541 vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset( 2542 l1_offset, 2543 static_call(kvm_x86_get_l2_tsc_offset)(vcpu), 2544 static_call(kvm_x86_get_l2_tsc_multiplier)(vcpu)); 2545 else 2546 vcpu->arch.tsc_offset = l1_offset; 2547 2548 static_call(kvm_x86_write_tsc_offset)(vcpu, vcpu->arch.tsc_offset); 2549 } 2550 2551 static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier) 2552 { 2553 vcpu->arch.l1_tsc_scaling_ratio = l1_multiplier; 2554 2555 /* Userspace is changing the multiplier while L2 is active */ 2556 if (is_guest_mode(vcpu)) 2557 vcpu->arch.tsc_scaling_ratio = kvm_calc_nested_tsc_multiplier( 2558 l1_multiplier, 2559 static_call(kvm_x86_get_l2_tsc_multiplier)(vcpu)); 2560 else 2561 vcpu->arch.tsc_scaling_ratio = l1_multiplier; 2562 2563 if (kvm_caps.has_tsc_control) 2564 static_call(kvm_x86_write_tsc_multiplier)( 2565 vcpu, vcpu->arch.tsc_scaling_ratio); 2566 } 2567 2568 static inline bool kvm_check_tsc_unstable(void) 2569 { 2570 #ifdef CONFIG_X86_64 2571 /* 2572 * TSC is marked unstable when we're running on Hyper-V, 2573 * 'TSC page' clocksource is good. 2574 */ 2575 if (pvclock_gtod_data.clock.vclock_mode == VDSO_CLOCKMODE_HVCLOCK) 2576 return false; 2577 #endif 2578 return check_tsc_unstable(); 2579 } 2580 2581 /* 2582 * Infers attempts to synchronize the guest's tsc from host writes. Sets the 2583 * offset for the vcpu and tracks the TSC matching generation that the vcpu 2584 * participates in. 2585 */ 2586 static void __kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 offset, u64 tsc, 2587 u64 ns, bool matched) 2588 { 2589 struct kvm *kvm = vcpu->kvm; 2590 2591 lockdep_assert_held(&kvm->arch.tsc_write_lock); 2592 2593 /* 2594 * We also track th most recent recorded KHZ, write and time to 2595 * allow the matching interval to be extended at each write. 2596 */ 2597 kvm->arch.last_tsc_nsec = ns; 2598 kvm->arch.last_tsc_write = tsc; 2599 kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; 2600 kvm->arch.last_tsc_offset = offset; 2601 2602 vcpu->arch.last_guest_tsc = tsc; 2603 2604 kvm_vcpu_write_tsc_offset(vcpu, offset); 2605 2606 if (!matched) { 2607 /* 2608 * We split periods of matched TSC writes into generations. 2609 * For each generation, we track the original measured 2610 * nanosecond time, offset, and write, so if TSCs are in 2611 * sync, we can match exact offset, and if not, we can match 2612 * exact software computation in compute_guest_tsc() 2613 * 2614 * These values are tracked in kvm->arch.cur_xxx variables. 2615 */ 2616 kvm->arch.cur_tsc_generation++; 2617 kvm->arch.cur_tsc_nsec = ns; 2618 kvm->arch.cur_tsc_write = tsc; 2619 kvm->arch.cur_tsc_offset = offset; 2620 kvm->arch.nr_vcpus_matched_tsc = 0; 2621 } else if (vcpu->arch.this_tsc_generation != kvm->arch.cur_tsc_generation) { 2622 kvm->arch.nr_vcpus_matched_tsc++; 2623 } 2624 2625 /* Keep track of which generation this VCPU has synchronized to */ 2626 vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation; 2627 vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; 2628 vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; 2629 2630 kvm_track_tsc_matching(vcpu); 2631 } 2632 2633 static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data) 2634 { 2635 struct kvm *kvm = vcpu->kvm; 2636 u64 offset, ns, elapsed; 2637 unsigned long flags; 2638 bool matched = false; 2639 bool synchronizing = false; 2640 2641 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); 2642 offset = kvm_compute_l1_tsc_offset(vcpu, data); 2643 ns = get_kvmclock_base_ns(); 2644 elapsed = ns - kvm->arch.last_tsc_nsec; 2645 2646 if (vcpu->arch.virtual_tsc_khz) { 2647 if (data == 0) { 2648 /* 2649 * detection of vcpu initialization -- need to sync 2650 * with other vCPUs. This particularly helps to keep 2651 * kvm_clock stable after CPU hotplug 2652 */ 2653 synchronizing = true; 2654 } else { 2655 u64 tsc_exp = kvm->arch.last_tsc_write + 2656 nsec_to_cycles(vcpu, elapsed); 2657 u64 tsc_hz = vcpu->arch.virtual_tsc_khz * 1000LL; 2658 /* 2659 * Special case: TSC write with a small delta (1 second) 2660 * of virtual cycle time against real time is 2661 * interpreted as an attempt to synchronize the CPU. 2662 */ 2663 synchronizing = data < tsc_exp + tsc_hz && 2664 data + tsc_hz > tsc_exp; 2665 } 2666 } 2667 2668 /* 2669 * For a reliable TSC, we can match TSC offsets, and for an unstable 2670 * TSC, we add elapsed time in this computation. We could let the 2671 * compensation code attempt to catch up if we fall behind, but 2672 * it's better to try to match offsets from the beginning. 2673 */ 2674 if (synchronizing && 2675 vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) { 2676 if (!kvm_check_tsc_unstable()) { 2677 offset = kvm->arch.cur_tsc_offset; 2678 } else { 2679 u64 delta = nsec_to_cycles(vcpu, elapsed); 2680 data += delta; 2681 offset = kvm_compute_l1_tsc_offset(vcpu, data); 2682 } 2683 matched = true; 2684 } 2685 2686 __kvm_synchronize_tsc(vcpu, offset, data, ns, matched); 2687 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); 2688 } 2689 2690 static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, 2691 s64 adjustment) 2692 { 2693 u64 tsc_offset = vcpu->arch.l1_tsc_offset; 2694 kvm_vcpu_write_tsc_offset(vcpu, tsc_offset + adjustment); 2695 } 2696 2697 static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment) 2698 { 2699 if (vcpu->arch.l1_tsc_scaling_ratio != kvm_caps.default_tsc_scaling_ratio) 2700 WARN_ON(adjustment < 0); 2701 adjustment = kvm_scale_tsc((u64) adjustment, 2702 vcpu->arch.l1_tsc_scaling_ratio); 2703 adjust_tsc_offset_guest(vcpu, adjustment); 2704 } 2705 2706 #ifdef CONFIG_X86_64 2707 2708 static u64 read_tsc(void) 2709 { 2710 u64 ret = (u64)rdtsc_ordered(); 2711 u64 last = pvclock_gtod_data.clock.cycle_last; 2712 2713 if (likely(ret >= last)) 2714 return ret; 2715 2716 /* 2717 * GCC likes to generate cmov here, but this branch is extremely 2718 * predictable (it's just a function of time and the likely is 2719 * very likely) and there's a data dependence, so force GCC 2720 * to generate a branch instead. I don't barrier() because 2721 * we don't actually need a barrier, and if this function 2722 * ever gets inlined it will generate worse code. 2723 */ 2724 asm volatile (""); 2725 return last; 2726 } 2727 2728 static inline u64 vgettsc(struct pvclock_clock *clock, u64 *tsc_timestamp, 2729 int *mode) 2730 { 2731 long v; 2732 u64 tsc_pg_val; 2733 2734 switch (clock->vclock_mode) { 2735 case VDSO_CLOCKMODE_HVCLOCK: 2736 tsc_pg_val = hv_read_tsc_page_tsc(hv_get_tsc_page(), 2737 tsc_timestamp); 2738 if (tsc_pg_val != U64_MAX) { 2739 /* TSC page valid */ 2740 *mode = VDSO_CLOCKMODE_HVCLOCK; 2741 v = (tsc_pg_val - clock->cycle_last) & 2742 clock->mask; 2743 } else { 2744 /* TSC page invalid */ 2745 *mode = VDSO_CLOCKMODE_NONE; 2746 } 2747 break; 2748 case VDSO_CLOCKMODE_TSC: 2749 *mode = VDSO_CLOCKMODE_TSC; 2750 *tsc_timestamp = read_tsc(); 2751 v = (*tsc_timestamp - clock->cycle_last) & 2752 clock->mask; 2753 break; 2754 default: 2755 *mode = VDSO_CLOCKMODE_NONE; 2756 } 2757 2758 if (*mode == VDSO_CLOCKMODE_NONE) 2759 *tsc_timestamp = v = 0; 2760 2761 return v * clock->mult; 2762 } 2763 2764 static int do_monotonic_raw(s64 *t, u64 *tsc_timestamp) 2765 { 2766 struct pvclock_gtod_data *gtod = &pvclock_gtod_data; 2767 unsigned long seq; 2768 int mode; 2769 u64 ns; 2770 2771 do { 2772 seq = read_seqcount_begin(>od->seq); 2773 ns = gtod->raw_clock.base_cycles; 2774 ns += vgettsc(>od->raw_clock, tsc_timestamp, &mode); 2775 ns >>= gtod->raw_clock.shift; 2776 ns += ktime_to_ns(ktime_add(gtod->raw_clock.offset, gtod->offs_boot)); 2777 } while (unlikely(read_seqcount_retry(>od->seq, seq))); 2778 *t = ns; 2779 2780 return mode; 2781 } 2782 2783 static int do_realtime(struct timespec64 *ts, u64 *tsc_timestamp) 2784 { 2785 struct pvclock_gtod_data *gtod = &pvclock_gtod_data; 2786 unsigned long seq; 2787 int mode; 2788 u64 ns; 2789 2790 do { 2791 seq = read_seqcount_begin(>od->seq); 2792 ts->tv_sec = gtod->wall_time_sec; 2793 ns = gtod->clock.base_cycles; 2794 ns += vgettsc(>od->clock, tsc_timestamp, &mode); 2795 ns >>= gtod->clock.shift; 2796 } while (unlikely(read_seqcount_retry(>od->seq, seq))); 2797 2798 ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); 2799 ts->tv_nsec = ns; 2800 2801 return mode; 2802 } 2803 2804 /* returns true if host is using TSC based clocksource */ 2805 static bool kvm_get_time_and_clockread(s64 *kernel_ns, u64 *tsc_timestamp) 2806 { 2807 /* checked again under seqlock below */ 2808 if (!gtod_is_based_on_tsc(pvclock_gtod_data.clock.vclock_mode)) 2809 return false; 2810 2811 return gtod_is_based_on_tsc(do_monotonic_raw(kernel_ns, 2812 tsc_timestamp)); 2813 } 2814 2815 /* returns true if host is using TSC based clocksource */ 2816 static bool kvm_get_walltime_and_clockread(struct timespec64 *ts, 2817 u64 *tsc_timestamp) 2818 { 2819 /* checked again under seqlock below */ 2820 if (!gtod_is_based_on_tsc(pvclock_gtod_data.clock.vclock_mode)) 2821 return false; 2822 2823 return gtod_is_based_on_tsc(do_realtime(ts, tsc_timestamp)); 2824 } 2825 #endif 2826 2827 /* 2828 * 2829 * Assuming a stable TSC across physical CPUS, and a stable TSC 2830 * across virtual CPUs, the following condition is possible. 2831 * Each numbered line represents an event visible to both 2832 * CPUs at the next numbered event. 2833 * 2834 * "timespecX" represents host monotonic time. "tscX" represents 2835 * RDTSC value. 2836 * 2837 * VCPU0 on CPU0 | VCPU1 on CPU1 2838 * 2839 * 1. read timespec0,tsc0 2840 * 2. | timespec1 = timespec0 + N 2841 * | tsc1 = tsc0 + M 2842 * 3. transition to guest | transition to guest 2843 * 4. ret0 = timespec0 + (rdtsc - tsc0) | 2844 * 5. | ret1 = timespec1 + (rdtsc - tsc1) 2845 * | ret1 = timespec0 + N + (rdtsc - (tsc0 + M)) 2846 * 2847 * Since ret0 update is visible to VCPU1 at time 5, to obey monotonicity: 2848 * 2849 * - ret0 < ret1 2850 * - timespec0 + (rdtsc - tsc0) < timespec0 + N + (rdtsc - (tsc0 + M)) 2851 * ... 2852 * - 0 < N - M => M < N 2853 * 2854 * That is, when timespec0 != timespec1, M < N. Unfortunately that is not 2855 * always the case (the difference between two distinct xtime instances 2856 * might be smaller then the difference between corresponding TSC reads, 2857 * when updating guest vcpus pvclock areas). 2858 * 2859 * To avoid that problem, do not allow visibility of distinct 2860 * system_timestamp/tsc_timestamp values simultaneously: use a master 2861 * copy of host monotonic time values. Update that master copy 2862 * in lockstep. 2863 * 2864 * Rely on synchronization of host TSCs and guest TSCs for monotonicity. 2865 * 2866 */ 2867 2868 static void pvclock_update_vm_gtod_copy(struct kvm *kvm) 2869 { 2870 #ifdef CONFIG_X86_64 2871 struct kvm_arch *ka = &kvm->arch; 2872 int vclock_mode; 2873 bool host_tsc_clocksource, vcpus_matched; 2874 2875 lockdep_assert_held(&kvm->arch.tsc_write_lock); 2876 vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == 2877 atomic_read(&kvm->online_vcpus)); 2878 2879 /* 2880 * If the host uses TSC clock, then passthrough TSC as stable 2881 * to the guest. 2882 */ 2883 host_tsc_clocksource = kvm_get_time_and_clockread( 2884 &ka->master_kernel_ns, 2885 &ka->master_cycle_now); 2886 2887 ka->use_master_clock = host_tsc_clocksource && vcpus_matched 2888 && !ka->backwards_tsc_observed 2889 && !ka->boot_vcpu_runs_old_kvmclock; 2890 2891 if (ka->use_master_clock) 2892 atomic_set(&kvm_guest_has_master_clock, 1); 2893 2894 vclock_mode = pvclock_gtod_data.clock.vclock_mode; 2895 trace_kvm_update_master_clock(ka->use_master_clock, vclock_mode, 2896 vcpus_matched); 2897 #endif 2898 } 2899 2900 static void kvm_make_mclock_inprogress_request(struct kvm *kvm) 2901 { 2902 kvm_make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS); 2903 } 2904 2905 static void __kvm_start_pvclock_update(struct kvm *kvm) 2906 { 2907 raw_spin_lock_irq(&kvm->arch.tsc_write_lock); 2908 write_seqcount_begin(&kvm->arch.pvclock_sc); 2909 } 2910 2911 static void kvm_start_pvclock_update(struct kvm *kvm) 2912 { 2913 kvm_make_mclock_inprogress_request(kvm); 2914 2915 /* no guest entries from this point */ 2916 __kvm_start_pvclock_update(kvm); 2917 } 2918 2919 static void kvm_end_pvclock_update(struct kvm *kvm) 2920 { 2921 struct kvm_arch *ka = &kvm->arch; 2922 struct kvm_vcpu *vcpu; 2923 unsigned long i; 2924 2925 write_seqcount_end(&ka->pvclock_sc); 2926 raw_spin_unlock_irq(&ka->tsc_write_lock); 2927 kvm_for_each_vcpu(i, vcpu, kvm) 2928 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 2929 2930 /* guest entries allowed */ 2931 kvm_for_each_vcpu(i, vcpu, kvm) 2932 kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu); 2933 } 2934 2935 static void kvm_update_masterclock(struct kvm *kvm) 2936 { 2937 kvm_hv_request_tsc_page_update(kvm); 2938 kvm_start_pvclock_update(kvm); 2939 pvclock_update_vm_gtod_copy(kvm); 2940 kvm_end_pvclock_update(kvm); 2941 } 2942 2943 /* Called within read_seqcount_begin/retry for kvm->pvclock_sc. */ 2944 static void __get_kvmclock(struct kvm *kvm, struct kvm_clock_data *data) 2945 { 2946 struct kvm_arch *ka = &kvm->arch; 2947 struct pvclock_vcpu_time_info hv_clock; 2948 2949 /* both __this_cpu_read() and rdtsc() should be on the same cpu */ 2950 get_cpu(); 2951 2952 data->flags = 0; 2953 if (ka->use_master_clock && __this_cpu_read(cpu_tsc_khz)) { 2954 #ifdef CONFIG_X86_64 2955 struct timespec64 ts; 2956 2957 if (kvm_get_walltime_and_clockread(&ts, &data->host_tsc)) { 2958 data->realtime = ts.tv_nsec + NSEC_PER_SEC * ts.tv_sec; 2959 data->flags |= KVM_CLOCK_REALTIME | KVM_CLOCK_HOST_TSC; 2960 } else 2961 #endif 2962 data->host_tsc = rdtsc(); 2963 2964 data->flags |= KVM_CLOCK_TSC_STABLE; 2965 hv_clock.tsc_timestamp = ka->master_cycle_now; 2966 hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset; 2967 kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL, 2968 &hv_clock.tsc_shift, 2969 &hv_clock.tsc_to_system_mul); 2970 data->clock = __pvclock_read_cycles(&hv_clock, data->host_tsc); 2971 } else { 2972 data->clock = get_kvmclock_base_ns() + ka->kvmclock_offset; 2973 } 2974 2975 put_cpu(); 2976 } 2977 2978 static void get_kvmclock(struct kvm *kvm, struct kvm_clock_data *data) 2979 { 2980 struct kvm_arch *ka = &kvm->arch; 2981 unsigned seq; 2982 2983 do { 2984 seq = read_seqcount_begin(&ka->pvclock_sc); 2985 __get_kvmclock(kvm, data); 2986 } while (read_seqcount_retry(&ka->pvclock_sc, seq)); 2987 } 2988 2989 u64 get_kvmclock_ns(struct kvm *kvm) 2990 { 2991 struct kvm_clock_data data; 2992 2993 get_kvmclock(kvm, &data); 2994 return data.clock; 2995 } 2996 2997 static void kvm_setup_guest_pvclock(struct kvm_vcpu *v, 2998 struct gfn_to_pfn_cache *gpc, 2999 unsigned int offset) 3000 { 3001 struct kvm_vcpu_arch *vcpu = &v->arch; 3002 struct pvclock_vcpu_time_info *guest_hv_clock; 3003 unsigned long flags; 3004 3005 read_lock_irqsave(&gpc->lock, flags); 3006 while (!kvm_gfn_to_pfn_cache_check(v->kvm, gpc, gpc->gpa, 3007 offset + sizeof(*guest_hv_clock))) { 3008 read_unlock_irqrestore(&gpc->lock, flags); 3009 3010 if (kvm_gfn_to_pfn_cache_refresh(v->kvm, gpc, gpc->gpa, 3011 offset + sizeof(*guest_hv_clock))) 3012 return; 3013 3014 read_lock_irqsave(&gpc->lock, flags); 3015 } 3016 3017 guest_hv_clock = (void *)(gpc->khva + offset); 3018 3019 /* 3020 * This VCPU is paused, but it's legal for a guest to read another 3021 * VCPU's kvmclock, so we really have to follow the specification where 3022 * it says that version is odd if data is being modified, and even after 3023 * it is consistent. 3024 */ 3025 3026 guest_hv_clock->version = vcpu->hv_clock.version = (guest_hv_clock->version + 1) | 1; 3027 smp_wmb(); 3028 3029 /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */ 3030 vcpu->hv_clock.flags |= (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED); 3031 3032 if (vcpu->pvclock_set_guest_stopped_request) { 3033 vcpu->hv_clock.flags |= PVCLOCK_GUEST_STOPPED; 3034 vcpu->pvclock_set_guest_stopped_request = false; 3035 } 3036 3037 memcpy(guest_hv_clock, &vcpu->hv_clock, sizeof(*guest_hv_clock)); 3038 smp_wmb(); 3039 3040 guest_hv_clock->version = ++vcpu->hv_clock.version; 3041 3042 mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT); 3043 read_unlock_irqrestore(&gpc->lock, flags); 3044 3045 trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock); 3046 } 3047 3048 static int kvm_guest_time_update(struct kvm_vcpu *v) 3049 { 3050 unsigned long flags, tgt_tsc_khz; 3051 unsigned seq; 3052 struct kvm_vcpu_arch *vcpu = &v->arch; 3053 struct kvm_arch *ka = &v->kvm->arch; 3054 s64 kernel_ns; 3055 u64 tsc_timestamp, host_tsc; 3056 u8 pvclock_flags; 3057 bool use_master_clock; 3058 3059 kernel_ns = 0; 3060 host_tsc = 0; 3061 3062 /* 3063 * If the host uses TSC clock, then passthrough TSC as stable 3064 * to the guest. 3065 */ 3066 do { 3067 seq = read_seqcount_begin(&ka->pvclock_sc); 3068 use_master_clock = ka->use_master_clock; 3069 if (use_master_clock) { 3070 host_tsc = ka->master_cycle_now; 3071 kernel_ns = ka->master_kernel_ns; 3072 } 3073 } while (read_seqcount_retry(&ka->pvclock_sc, seq)); 3074 3075 /* Keep irq disabled to prevent changes to the clock */ 3076 local_irq_save(flags); 3077 tgt_tsc_khz = __this_cpu_read(cpu_tsc_khz); 3078 if (unlikely(tgt_tsc_khz == 0)) { 3079 local_irq_restore(flags); 3080 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); 3081 return 1; 3082 } 3083 if (!use_master_clock) { 3084 host_tsc = rdtsc(); 3085 kernel_ns = get_kvmclock_base_ns(); 3086 } 3087 3088 tsc_timestamp = kvm_read_l1_tsc(v, host_tsc); 3089 3090 /* 3091 * We may have to catch up the TSC to match elapsed wall clock 3092 * time for two reasons, even if kvmclock is used. 3093 * 1) CPU could have been running below the maximum TSC rate 3094 * 2) Broken TSC compensation resets the base at each VCPU 3095 * entry to avoid unknown leaps of TSC even when running 3096 * again on the same CPU. This may cause apparent elapsed 3097 * time to disappear, and the guest to stand still or run 3098 * very slowly. 3099 */ 3100 if (vcpu->tsc_catchup) { 3101 u64 tsc = compute_guest_tsc(v, kernel_ns); 3102 if (tsc > tsc_timestamp) { 3103 adjust_tsc_offset_guest(v, tsc - tsc_timestamp); 3104 tsc_timestamp = tsc; 3105 } 3106 } 3107 3108 local_irq_restore(flags); 3109 3110 /* With all the info we got, fill in the values */ 3111 3112 if (kvm_caps.has_tsc_control) 3113 tgt_tsc_khz = kvm_scale_tsc(tgt_tsc_khz, 3114 v->arch.l1_tsc_scaling_ratio); 3115 3116 if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) { 3117 kvm_get_time_scale(NSEC_PER_SEC, tgt_tsc_khz * 1000LL, 3118 &vcpu->hv_clock.tsc_shift, 3119 &vcpu->hv_clock.tsc_to_system_mul); 3120 vcpu->hw_tsc_khz = tgt_tsc_khz; 3121 } 3122 3123 vcpu->hv_clock.tsc_timestamp = tsc_timestamp; 3124 vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset; 3125 vcpu->last_guest_tsc = tsc_timestamp; 3126 3127 /* If the host uses TSC clocksource, then it is stable */ 3128 pvclock_flags = 0; 3129 if (use_master_clock) 3130 pvclock_flags |= PVCLOCK_TSC_STABLE_BIT; 3131 3132 vcpu->hv_clock.flags = pvclock_flags; 3133 3134 if (vcpu->pv_time.active) 3135 kvm_setup_guest_pvclock(v, &vcpu->pv_time, 0); 3136 if (vcpu->xen.vcpu_info_cache.active) 3137 kvm_setup_guest_pvclock(v, &vcpu->xen.vcpu_info_cache, 3138 offsetof(struct compat_vcpu_info, time)); 3139 if (vcpu->xen.vcpu_time_info_cache.active) 3140 kvm_setup_guest_pvclock(v, &vcpu->xen.vcpu_time_info_cache, 0); 3141 kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock); 3142 return 0; 3143 } 3144 3145 /* 3146 * kvmclock updates which are isolated to a given vcpu, such as 3147 * vcpu->cpu migration, should not allow system_timestamp from 3148 * the rest of the vcpus to remain static. Otherwise ntp frequency 3149 * correction applies to one vcpu's system_timestamp but not 3150 * the others. 3151 * 3152 * So in those cases, request a kvmclock update for all vcpus. 3153 * We need to rate-limit these requests though, as they can 3154 * considerably slow guests that have a large number of vcpus. 3155 * The time for a remote vcpu to update its kvmclock is bound 3156 * by the delay we use to rate-limit the updates. 3157 */ 3158 3159 #define KVMCLOCK_UPDATE_DELAY msecs_to_jiffies(100) 3160 3161 static void kvmclock_update_fn(struct work_struct *work) 3162 { 3163 unsigned long i; 3164 struct delayed_work *dwork = to_delayed_work(work); 3165 struct kvm_arch *ka = container_of(dwork, struct kvm_arch, 3166 kvmclock_update_work); 3167 struct kvm *kvm = container_of(ka, struct kvm, arch); 3168 struct kvm_vcpu *vcpu; 3169 3170 kvm_for_each_vcpu(i, vcpu, kvm) { 3171 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 3172 kvm_vcpu_kick(vcpu); 3173 } 3174 } 3175 3176 static void kvm_gen_kvmclock_update(struct kvm_vcpu *v) 3177 { 3178 struct kvm *kvm = v->kvm; 3179 3180 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); 3181 schedule_delayed_work(&kvm->arch.kvmclock_update_work, 3182 KVMCLOCK_UPDATE_DELAY); 3183 } 3184 3185 #define KVMCLOCK_SYNC_PERIOD (300 * HZ) 3186 3187 static void kvmclock_sync_fn(struct work_struct *work) 3188 { 3189 struct delayed_work *dwork = to_delayed_work(work); 3190 struct kvm_arch *ka = container_of(dwork, struct kvm_arch, 3191 kvmclock_sync_work); 3192 struct kvm *kvm = container_of(ka, struct kvm, arch); 3193 3194 if (!kvmclock_periodic_sync) 3195 return; 3196 3197 schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0); 3198 schedule_delayed_work(&kvm->arch.kvmclock_sync_work, 3199 KVMCLOCK_SYNC_PERIOD); 3200 } 3201 3202 /* These helpers are safe iff @msr is known to be an MCx bank MSR. */ 3203 static bool is_mci_control_msr(u32 msr) 3204 { 3205 return (msr & 3) == 0; 3206 } 3207 static bool is_mci_status_msr(u32 msr) 3208 { 3209 return (msr & 3) == 1; 3210 } 3211 3212 /* 3213 * On AMD, HWCR[McStatusWrEn] controls whether setting MCi_STATUS results in #GP. 3214 */ 3215 static bool can_set_mci_status(struct kvm_vcpu *vcpu) 3216 { 3217 /* McStatusWrEn enabled? */ 3218 if (guest_cpuid_is_amd_or_hygon(vcpu)) 3219 return !!(vcpu->arch.msr_hwcr & BIT_ULL(18)); 3220 3221 return false; 3222 } 3223 3224 static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 3225 { 3226 u64 mcg_cap = vcpu->arch.mcg_cap; 3227 unsigned bank_num = mcg_cap & 0xff; 3228 u32 msr = msr_info->index; 3229 u64 data = msr_info->data; 3230 u32 offset, last_msr; 3231 3232 switch (msr) { 3233 case MSR_IA32_MCG_STATUS: 3234 vcpu->arch.mcg_status = data; 3235 break; 3236 case MSR_IA32_MCG_CTL: 3237 if (!(mcg_cap & MCG_CTL_P) && 3238 (data || !msr_info->host_initiated)) 3239 return 1; 3240 if (data != 0 && data != ~(u64)0) 3241 return 1; 3242 vcpu->arch.mcg_ctl = data; 3243 break; 3244 case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1: 3245 last_msr = MSR_IA32_MCx_CTL2(bank_num) - 1; 3246 if (msr > last_msr) 3247 return 1; 3248 3249 if (!(mcg_cap & MCG_CMCI_P) && (data || !msr_info->host_initiated)) 3250 return 1; 3251 /* An attempt to write a 1 to a reserved bit raises #GP */ 3252 if (data & ~(MCI_CTL2_CMCI_EN | MCI_CTL2_CMCI_THRESHOLD_MASK)) 3253 return 1; 3254 offset = array_index_nospec(msr - MSR_IA32_MC0_CTL2, 3255 last_msr + 1 - MSR_IA32_MC0_CTL2); 3256 vcpu->arch.mci_ctl2_banks[offset] = data; 3257 break; 3258 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: 3259 last_msr = MSR_IA32_MCx_CTL(bank_num) - 1; 3260 if (msr > last_msr) 3261 return 1; 3262 3263 /* 3264 * Only 0 or all 1s can be written to IA32_MCi_CTL, all other 3265 * values are architecturally undefined. But, some Linux 3266 * kernels clear bit 10 in bank 4 to workaround a BIOS/GART TLB 3267 * issue on AMD K8s, allow bit 10 to be clear when setting all 3268 * other bits in order to avoid an uncaught #GP in the guest. 3269 * 3270 * UNIXWARE clears bit 0 of MC1_CTL to ignore correctable, 3271 * single-bit ECC data errors. 3272 */ 3273 if (is_mci_control_msr(msr) && 3274 data != 0 && (data | (1 << 10) | 1) != ~(u64)0) 3275 return 1; 3276 3277 /* 3278 * All CPUs allow writing 0 to MCi_STATUS MSRs to clear the MSR. 3279 * AMD-based CPUs allow non-zero values, but if and only if 3280 * HWCR[McStatusWrEn] is set. 3281 */ 3282 if (!msr_info->host_initiated && is_mci_status_msr(msr) && 3283 data != 0 && !can_set_mci_status(vcpu)) 3284 return 1; 3285 3286 offset = array_index_nospec(msr - MSR_IA32_MC0_CTL, 3287 last_msr + 1 - MSR_IA32_MC0_CTL); 3288 vcpu->arch.mce_banks[offset] = data; 3289 break; 3290 default: 3291 return 1; 3292 } 3293 return 0; 3294 } 3295 3296 static inline bool kvm_pv_async_pf_enabled(struct kvm_vcpu *vcpu) 3297 { 3298 u64 mask = KVM_ASYNC_PF_ENABLED | KVM_ASYNC_PF_DELIVERY_AS_INT; 3299 3300 return (vcpu->arch.apf.msr_en_val & mask) == mask; 3301 } 3302 3303 static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) 3304 { 3305 gpa_t gpa = data & ~0x3f; 3306 3307 /* Bits 4:5 are reserved, Should be zero */ 3308 if (data & 0x30) 3309 return 1; 3310 3311 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_VMEXIT) && 3312 (data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT)) 3313 return 1; 3314 3315 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT) && 3316 (data & KVM_ASYNC_PF_DELIVERY_AS_INT)) 3317 return 1; 3318 3319 if (!lapic_in_kernel(vcpu)) 3320 return data ? 1 : 0; 3321 3322 vcpu->arch.apf.msr_en_val = data; 3323 3324 if (!kvm_pv_async_pf_enabled(vcpu)) { 3325 kvm_clear_async_pf_completion_queue(vcpu); 3326 kvm_async_pf_hash_reset(vcpu); 3327 return 0; 3328 } 3329 3330 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, 3331 sizeof(u64))) 3332 return 1; 3333 3334 vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); 3335 vcpu->arch.apf.delivery_as_pf_vmexit = data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT; 3336 3337 kvm_async_pf_wakeup_all(vcpu); 3338 3339 return 0; 3340 } 3341 3342 static int kvm_pv_enable_async_pf_int(struct kvm_vcpu *vcpu, u64 data) 3343 { 3344 /* Bits 8-63 are reserved */ 3345 if (data >> 8) 3346 return 1; 3347 3348 if (!lapic_in_kernel(vcpu)) 3349 return 1; 3350 3351 vcpu->arch.apf.msr_int_val = data; 3352 3353 vcpu->arch.apf.vec = data & KVM_ASYNC_PF_VEC_MASK; 3354 3355 return 0; 3356 } 3357 3358 static void kvmclock_reset(struct kvm_vcpu *vcpu) 3359 { 3360 kvm_gfn_to_pfn_cache_destroy(vcpu->kvm, &vcpu->arch.pv_time); 3361 vcpu->arch.time = 0; 3362 } 3363 3364 static void kvm_vcpu_flush_tlb_all(struct kvm_vcpu *vcpu) 3365 { 3366 ++vcpu->stat.tlb_flush; 3367 static_call(kvm_x86_flush_tlb_all)(vcpu); 3368 } 3369 3370 static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu) 3371 { 3372 ++vcpu->stat.tlb_flush; 3373 3374 if (!tdp_enabled) { 3375 /* 3376 * A TLB flush on behalf of the guest is equivalent to 3377 * INVPCID(all), toggling CR4.PGE, etc., which requires 3378 * a forced sync of the shadow page tables. Ensure all the 3379 * roots are synced and the guest TLB in hardware is clean. 3380 */ 3381 kvm_mmu_sync_roots(vcpu); 3382 kvm_mmu_sync_prev_roots(vcpu); 3383 } 3384 3385 static_call(kvm_x86_flush_tlb_guest)(vcpu); 3386 } 3387 3388 3389 static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu) 3390 { 3391 ++vcpu->stat.tlb_flush; 3392 static_call(kvm_x86_flush_tlb_current)(vcpu); 3393 } 3394 3395 /* 3396 * Service "local" TLB flush requests, which are specific to the current MMU 3397 * context. In addition to the generic event handling in vcpu_enter_guest(), 3398 * TLB flushes that are targeted at an MMU context also need to be serviced 3399 * prior before nested VM-Enter/VM-Exit. 3400 */ 3401 void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu) 3402 { 3403 if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu)) 3404 kvm_vcpu_flush_tlb_current(vcpu); 3405 3406 if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu)) 3407 kvm_vcpu_flush_tlb_guest(vcpu); 3408 } 3409 EXPORT_SYMBOL_GPL(kvm_service_local_tlb_flush_requests); 3410 3411 static void record_steal_time(struct kvm_vcpu *vcpu) 3412 { 3413 struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache; 3414 struct kvm_steal_time __user *st; 3415 struct kvm_memslots *slots; 3416 gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS; 3417 u64 steal; 3418 u32 version; 3419 3420 if (kvm_xen_msr_enabled(vcpu->kvm)) { 3421 kvm_xen_runstate_set_running(vcpu); 3422 return; 3423 } 3424 3425 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) 3426 return; 3427 3428 if (WARN_ON_ONCE(current->mm != vcpu->kvm->mm)) 3429 return; 3430 3431 slots = kvm_memslots(vcpu->kvm); 3432 3433 if (unlikely(slots->generation != ghc->generation || 3434 gpa != ghc->gpa || 3435 kvm_is_error_hva(ghc->hva) || !ghc->memslot)) { 3436 /* We rely on the fact that it fits in a single page. */ 3437 BUILD_BUG_ON((sizeof(*st) - 1) & KVM_STEAL_VALID_BITS); 3438 3439 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st)) || 3440 kvm_is_error_hva(ghc->hva) || !ghc->memslot) 3441 return; 3442 } 3443 3444 st = (struct kvm_steal_time __user *)ghc->hva; 3445 /* 3446 * Doing a TLB flush here, on the guest's behalf, can avoid 3447 * expensive IPIs. 3448 */ 3449 if (guest_pv_has(vcpu, KVM_FEATURE_PV_TLB_FLUSH)) { 3450 u8 st_preempted = 0; 3451 int err = -EFAULT; 3452 3453 if (!user_access_begin(st, sizeof(*st))) 3454 return; 3455 3456 asm volatile("1: xchgb %0, %2\n" 3457 "xor %1, %1\n" 3458 "2:\n" 3459 _ASM_EXTABLE_UA(1b, 2b) 3460 : "+q" (st_preempted), 3461 "+&r" (err), 3462 "+m" (st->preempted)); 3463 if (err) 3464 goto out; 3465 3466 user_access_end(); 3467 3468 vcpu->arch.st.preempted = 0; 3469 3470 trace_kvm_pv_tlb_flush(vcpu->vcpu_id, 3471 st_preempted & KVM_VCPU_FLUSH_TLB); 3472 if (st_preempted & KVM_VCPU_FLUSH_TLB) 3473 kvm_vcpu_flush_tlb_guest(vcpu); 3474 3475 if (!user_access_begin(st, sizeof(*st))) 3476 goto dirty; 3477 } else { 3478 if (!user_access_begin(st, sizeof(*st))) 3479 return; 3480 3481 unsafe_put_user(0, &st->preempted, out); 3482 vcpu->arch.st.preempted = 0; 3483 } 3484 3485 unsafe_get_user(version, &st->version, out); 3486 if (version & 1) 3487 version += 1; /* first time write, random junk */ 3488 3489 version += 1; 3490 unsafe_put_user(version, &st->version, out); 3491 3492 smp_wmb(); 3493 3494 unsafe_get_user(steal, &st->steal, out); 3495 steal += current->sched_info.run_delay - 3496 vcpu->arch.st.last_steal; 3497 vcpu->arch.st.last_steal = current->sched_info.run_delay; 3498 unsafe_put_user(steal, &st->steal, out); 3499 3500 version += 1; 3501 unsafe_put_user(version, &st->version, out); 3502 3503 out: 3504 user_access_end(); 3505 dirty: 3506 mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa)); 3507 } 3508 3509 int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 3510 { 3511 bool pr = false; 3512 u32 msr = msr_info->index; 3513 u64 data = msr_info->data; 3514 3515 if (msr && msr == vcpu->kvm->arch.xen_hvm_config.msr) 3516 return kvm_xen_write_hypercall_page(vcpu, data); 3517 3518 switch (msr) { 3519 case MSR_AMD64_NB_CFG: 3520 case MSR_IA32_UCODE_WRITE: 3521 case MSR_VM_HSAVE_PA: 3522 case MSR_AMD64_PATCH_LOADER: 3523 case MSR_AMD64_BU_CFG2: 3524 case MSR_AMD64_DC_CFG: 3525 case MSR_F15H_EX_CFG: 3526 break; 3527 3528 case MSR_IA32_UCODE_REV: 3529 if (msr_info->host_initiated) 3530 vcpu->arch.microcode_version = data; 3531 break; 3532 case MSR_IA32_ARCH_CAPABILITIES: 3533 if (!msr_info->host_initiated) 3534 return 1; 3535 vcpu->arch.arch_capabilities = data; 3536 break; 3537 case MSR_IA32_PERF_CAPABILITIES: { 3538 struct kvm_msr_entry msr_ent = {.index = msr, .data = 0}; 3539 3540 if (!msr_info->host_initiated) 3541 return 1; 3542 if (kvm_get_msr_feature(&msr_ent)) 3543 return 1; 3544 if (data & ~msr_ent.data) 3545 return 1; 3546 3547 vcpu->arch.perf_capabilities = data; 3548 kvm_pmu_refresh(vcpu); 3549 return 0; 3550 } 3551 case MSR_EFER: 3552 return set_efer(vcpu, msr_info); 3553 case MSR_K7_HWCR: 3554 data &= ~(u64)0x40; /* ignore flush filter disable */ 3555 data &= ~(u64)0x100; /* ignore ignne emulation enable */ 3556 data &= ~(u64)0x8; /* ignore TLB cache disable */ 3557 3558 /* Handle McStatusWrEn */ 3559 if (data == BIT_ULL(18)) { 3560 vcpu->arch.msr_hwcr = data; 3561 } else if (data != 0) { 3562 vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n", 3563 data); 3564 return 1; 3565 } 3566 break; 3567 case MSR_FAM10H_MMIO_CONF_BASE: 3568 if (data != 0) { 3569 vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: " 3570 "0x%llx\n", data); 3571 return 1; 3572 } 3573 break; 3574 case 0x200 ... MSR_IA32_MC0_CTL2 - 1: 3575 case MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) ... 0x2ff: 3576 return kvm_mtrr_set_msr(vcpu, msr, data); 3577 case MSR_IA32_APICBASE: 3578 return kvm_set_apic_base(vcpu, msr_info); 3579 case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff: 3580 return kvm_x2apic_msr_write(vcpu, msr, data); 3581 case MSR_IA32_TSC_DEADLINE: 3582 kvm_set_lapic_tscdeadline_msr(vcpu, data); 3583 break; 3584 case MSR_IA32_TSC_ADJUST: 3585 if (guest_cpuid_has(vcpu, X86_FEATURE_TSC_ADJUST)) { 3586 if (!msr_info->host_initiated) { 3587 s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr; 3588 adjust_tsc_offset_guest(vcpu, adj); 3589 /* Before back to guest, tsc_timestamp must be adjusted 3590 * as well, otherwise guest's percpu pvclock time could jump. 3591 */ 3592 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 3593 } 3594 vcpu->arch.ia32_tsc_adjust_msr = data; 3595 } 3596 break; 3597 case MSR_IA32_MISC_ENABLE: { 3598 u64 old_val = vcpu->arch.ia32_misc_enable_msr; 3599 3600 if (!msr_info->host_initiated) { 3601 /* RO bits */ 3602 if ((old_val ^ data) & MSR_IA32_MISC_ENABLE_PMU_RO_MASK) 3603 return 1; 3604 3605 /* R bits, i.e. writes are ignored, but don't fault. */ 3606 data = data & ~MSR_IA32_MISC_ENABLE_EMON; 3607 data |= old_val & MSR_IA32_MISC_ENABLE_EMON; 3608 } 3609 3610 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT) && 3611 ((old_val ^ data) & MSR_IA32_MISC_ENABLE_MWAIT)) { 3612 if (!guest_cpuid_has(vcpu, X86_FEATURE_XMM3)) 3613 return 1; 3614 vcpu->arch.ia32_misc_enable_msr = data; 3615 kvm_update_cpuid_runtime(vcpu); 3616 } else { 3617 vcpu->arch.ia32_misc_enable_msr = data; 3618 } 3619 break; 3620 } 3621 case MSR_IA32_SMBASE: 3622 if (!msr_info->host_initiated) 3623 return 1; 3624 vcpu->arch.smbase = data; 3625 break; 3626 case MSR_IA32_POWER_CTL: 3627 vcpu->arch.msr_ia32_power_ctl = data; 3628 break; 3629 case MSR_IA32_TSC: 3630 if (msr_info->host_initiated) { 3631 kvm_synchronize_tsc(vcpu, data); 3632 } else { 3633 u64 adj = kvm_compute_l1_tsc_offset(vcpu, data) - vcpu->arch.l1_tsc_offset; 3634 adjust_tsc_offset_guest(vcpu, adj); 3635 vcpu->arch.ia32_tsc_adjust_msr += adj; 3636 } 3637 break; 3638 case MSR_IA32_XSS: 3639 if (!msr_info->host_initiated && 3640 !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)) 3641 return 1; 3642 /* 3643 * KVM supports exposing PT to the guest, but does not support 3644 * IA32_XSS[bit 8]. Guests have to use RDMSR/WRMSR rather than 3645 * XSAVES/XRSTORS to save/restore PT MSRs. 3646 */ 3647 if (data & ~kvm_caps.supported_xss) 3648 return 1; 3649 vcpu->arch.ia32_xss = data; 3650 kvm_update_cpuid_runtime(vcpu); 3651 break; 3652 case MSR_SMI_COUNT: 3653 if (!msr_info->host_initiated) 3654 return 1; 3655 vcpu->arch.smi_count = data; 3656 break; 3657 case MSR_KVM_WALL_CLOCK_NEW: 3658 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) 3659 return 1; 3660 3661 vcpu->kvm->arch.wall_clock = data; 3662 kvm_write_wall_clock(vcpu->kvm, data, 0); 3663 break; 3664 case MSR_KVM_WALL_CLOCK: 3665 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) 3666 return 1; 3667 3668 vcpu->kvm->arch.wall_clock = data; 3669 kvm_write_wall_clock(vcpu->kvm, data, 0); 3670 break; 3671 case MSR_KVM_SYSTEM_TIME_NEW: 3672 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) 3673 return 1; 3674 3675 kvm_write_system_time(vcpu, data, false, msr_info->host_initiated); 3676 break; 3677 case MSR_KVM_SYSTEM_TIME: 3678 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) 3679 return 1; 3680 3681 kvm_write_system_time(vcpu, data, true, msr_info->host_initiated); 3682 break; 3683 case MSR_KVM_ASYNC_PF_EN: 3684 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF)) 3685 return 1; 3686 3687 if (kvm_pv_enable_async_pf(vcpu, data)) 3688 return 1; 3689 break; 3690 case MSR_KVM_ASYNC_PF_INT: 3691 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) 3692 return 1; 3693 3694 if (kvm_pv_enable_async_pf_int(vcpu, data)) 3695 return 1; 3696 break; 3697 case MSR_KVM_ASYNC_PF_ACK: 3698 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) 3699 return 1; 3700 if (data & 0x1) { 3701 vcpu->arch.apf.pageready_pending = false; 3702 kvm_check_async_pf_completion(vcpu); 3703 } 3704 break; 3705 case MSR_KVM_STEAL_TIME: 3706 if (!guest_pv_has(vcpu, KVM_FEATURE_STEAL_TIME)) 3707 return 1; 3708 3709 if (unlikely(!sched_info_on())) 3710 return 1; 3711 3712 if (data & KVM_STEAL_RESERVED_MASK) 3713 return 1; 3714 3715 vcpu->arch.st.msr_val = data; 3716 3717 if (!(data & KVM_MSR_ENABLED)) 3718 break; 3719 3720 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); 3721 3722 break; 3723 case MSR_KVM_PV_EOI_EN: 3724 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_EOI)) 3725 return 1; 3726 3727 if (kvm_lapic_set_pv_eoi(vcpu, data, sizeof(u8))) 3728 return 1; 3729 break; 3730 3731 case MSR_KVM_POLL_CONTROL: 3732 if (!guest_pv_has(vcpu, KVM_FEATURE_POLL_CONTROL)) 3733 return 1; 3734 3735 /* only enable bit supported */ 3736 if (data & (-1ULL << 1)) 3737 return 1; 3738 3739 vcpu->arch.msr_kvm_poll_control = data; 3740 break; 3741 3742 case MSR_IA32_MCG_CTL: 3743 case MSR_IA32_MCG_STATUS: 3744 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: 3745 case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1: 3746 return set_msr_mce(vcpu, msr_info); 3747 3748 case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3: 3749 case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1: 3750 pr = true; 3751 fallthrough; 3752 case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3: 3753 case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1: 3754 if (kvm_pmu_is_valid_msr(vcpu, msr)) 3755 return kvm_pmu_set_msr(vcpu, msr_info); 3756 3757 if (pr || data != 0) 3758 vcpu_unimpl(vcpu, "disabled perfctr wrmsr: " 3759 "0x%x data 0x%llx\n", msr, data); 3760 break; 3761 case MSR_K7_CLK_CTL: 3762 /* 3763 * Ignore all writes to this no longer documented MSR. 3764 * Writes are only relevant for old K7 processors, 3765 * all pre-dating SVM, but a recommended workaround from 3766 * AMD for these chips. It is possible to specify the 3767 * affected processor models on the command line, hence 3768 * the need to ignore the workaround. 3769 */ 3770 break; 3771 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: 3772 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER: 3773 case HV_X64_MSR_SYNDBG_OPTIONS: 3774 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: 3775 case HV_X64_MSR_CRASH_CTL: 3776 case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT: 3777 case HV_X64_MSR_REENLIGHTENMENT_CONTROL: 3778 case HV_X64_MSR_TSC_EMULATION_CONTROL: 3779 case HV_X64_MSR_TSC_EMULATION_STATUS: 3780 return kvm_hv_set_msr_common(vcpu, msr, data, 3781 msr_info->host_initiated); 3782 case MSR_IA32_BBL_CR_CTL3: 3783 /* Drop writes to this legacy MSR -- see rdmsr 3784 * counterpart for further detail. 3785 */ 3786 if (report_ignored_msrs) 3787 vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n", 3788 msr, data); 3789 break; 3790 case MSR_AMD64_OSVW_ID_LENGTH: 3791 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) 3792 return 1; 3793 vcpu->arch.osvw.length = data; 3794 break; 3795 case MSR_AMD64_OSVW_STATUS: 3796 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) 3797 return 1; 3798 vcpu->arch.osvw.status = data; 3799 break; 3800 case MSR_PLATFORM_INFO: 3801 if (!msr_info->host_initiated || 3802 (!(data & MSR_PLATFORM_INFO_CPUID_FAULT) && 3803 cpuid_fault_enabled(vcpu))) 3804 return 1; 3805 vcpu->arch.msr_platform_info = data; 3806 break; 3807 case MSR_MISC_FEATURES_ENABLES: 3808 if (data & ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT || 3809 (data & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT && 3810 !supports_cpuid_fault(vcpu))) 3811 return 1; 3812 vcpu->arch.msr_misc_features_enables = data; 3813 break; 3814 #ifdef CONFIG_X86_64 3815 case MSR_IA32_XFD: 3816 if (!msr_info->host_initiated && 3817 !guest_cpuid_has(vcpu, X86_FEATURE_XFD)) 3818 return 1; 3819 3820 if (data & ~kvm_guest_supported_xfd(vcpu)) 3821 return 1; 3822 3823 fpu_update_guest_xfd(&vcpu->arch.guest_fpu, data); 3824 break; 3825 case MSR_IA32_XFD_ERR: 3826 if (!msr_info->host_initiated && 3827 !guest_cpuid_has(vcpu, X86_FEATURE_XFD)) 3828 return 1; 3829 3830 if (data & ~kvm_guest_supported_xfd(vcpu)) 3831 return 1; 3832 3833 vcpu->arch.guest_fpu.xfd_err = data; 3834 break; 3835 #endif 3836 case MSR_IA32_PEBS_ENABLE: 3837 case MSR_IA32_DS_AREA: 3838 case MSR_PEBS_DATA_CFG: 3839 case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5: 3840 if (kvm_pmu_is_valid_msr(vcpu, msr)) 3841 return kvm_pmu_set_msr(vcpu, msr_info); 3842 /* 3843 * Userspace is allowed to write '0' to MSRs that KVM reports 3844 * as to-be-saved, even if an MSRs isn't fully supported. 3845 */ 3846 return !msr_info->host_initiated || data; 3847 default: 3848 if (kvm_pmu_is_valid_msr(vcpu, msr)) 3849 return kvm_pmu_set_msr(vcpu, msr_info); 3850 return KVM_MSR_RET_INVALID; 3851 } 3852 return 0; 3853 } 3854 EXPORT_SYMBOL_GPL(kvm_set_msr_common); 3855 3856 static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) 3857 { 3858 u64 data; 3859 u64 mcg_cap = vcpu->arch.mcg_cap; 3860 unsigned bank_num = mcg_cap & 0xff; 3861 u32 offset, last_msr; 3862 3863 switch (msr) { 3864 case MSR_IA32_P5_MC_ADDR: 3865 case MSR_IA32_P5_MC_TYPE: 3866 data = 0; 3867 break; 3868 case MSR_IA32_MCG_CAP: 3869 data = vcpu->arch.mcg_cap; 3870 break; 3871 case MSR_IA32_MCG_CTL: 3872 if (!(mcg_cap & MCG_CTL_P) && !host) 3873 return 1; 3874 data = vcpu->arch.mcg_ctl; 3875 break; 3876 case MSR_IA32_MCG_STATUS: 3877 data = vcpu->arch.mcg_status; 3878 break; 3879 case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1: 3880 last_msr = MSR_IA32_MCx_CTL2(bank_num) - 1; 3881 if (msr > last_msr) 3882 return 1; 3883 3884 if (!(mcg_cap & MCG_CMCI_P) && !host) 3885 return 1; 3886 offset = array_index_nospec(msr - MSR_IA32_MC0_CTL2, 3887 last_msr + 1 - MSR_IA32_MC0_CTL2); 3888 data = vcpu->arch.mci_ctl2_banks[offset]; 3889 break; 3890 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: 3891 last_msr = MSR_IA32_MCx_CTL(bank_num) - 1; 3892 if (msr > last_msr) 3893 return 1; 3894 3895 offset = array_index_nospec(msr - MSR_IA32_MC0_CTL, 3896 last_msr + 1 - MSR_IA32_MC0_CTL); 3897 data = vcpu->arch.mce_banks[offset]; 3898 break; 3899 default: 3900 return 1; 3901 } 3902 *pdata = data; 3903 return 0; 3904 } 3905 3906 int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 3907 { 3908 switch (msr_info->index) { 3909 case MSR_IA32_PLATFORM_ID: 3910 case MSR_IA32_EBL_CR_POWERON: 3911 case MSR_IA32_LASTBRANCHFROMIP: 3912 case MSR_IA32_LASTBRANCHTOIP: 3913 case MSR_IA32_LASTINTFROMIP: 3914 case MSR_IA32_LASTINTTOIP: 3915 case MSR_AMD64_SYSCFG: 3916 case MSR_K8_TSEG_ADDR: 3917 case MSR_K8_TSEG_MASK: 3918 case MSR_VM_HSAVE_PA: 3919 case MSR_K8_INT_PENDING_MSG: 3920 case MSR_AMD64_NB_CFG: 3921 case MSR_FAM10H_MMIO_CONF_BASE: 3922 case MSR_AMD64_BU_CFG2: 3923 case MSR_IA32_PERF_CTL: 3924 case MSR_AMD64_DC_CFG: 3925 case MSR_F15H_EX_CFG: 3926 /* 3927 * Intel Sandy Bridge CPUs must support the RAPL (running average power 3928 * limit) MSRs. Just return 0, as we do not want to expose the host 3929 * data here. Do not conditionalize this on CPUID, as KVM does not do 3930 * so for existing CPU-specific MSRs. 3931 */ 3932 case MSR_RAPL_POWER_UNIT: 3933 case MSR_PP0_ENERGY_STATUS: /* Power plane 0 (core) */ 3934 case MSR_PP1_ENERGY_STATUS: /* Power plane 1 (graphics uncore) */ 3935 case MSR_PKG_ENERGY_STATUS: /* Total package */ 3936 case MSR_DRAM_ENERGY_STATUS: /* DRAM controller */ 3937 msr_info->data = 0; 3938 break; 3939 case MSR_IA32_PEBS_ENABLE: 3940 case MSR_IA32_DS_AREA: 3941 case MSR_PEBS_DATA_CFG: 3942 case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5: 3943 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) 3944 return kvm_pmu_get_msr(vcpu, msr_info); 3945 /* 3946 * Userspace is allowed to read MSRs that KVM reports as 3947 * to-be-saved, even if an MSR isn't fully supported. 3948 */ 3949 if (!msr_info->host_initiated) 3950 return 1; 3951 msr_info->data = 0; 3952 break; 3953 case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3: 3954 case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3: 3955 case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1: 3956 case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1: 3957 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) 3958 return kvm_pmu_get_msr(vcpu, msr_info); 3959 msr_info->data = 0; 3960 break; 3961 case MSR_IA32_UCODE_REV: 3962 msr_info->data = vcpu->arch.microcode_version; 3963 break; 3964 case MSR_IA32_ARCH_CAPABILITIES: 3965 if (!msr_info->host_initiated && 3966 !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES)) 3967 return 1; 3968 msr_info->data = vcpu->arch.arch_capabilities; 3969 break; 3970 case MSR_IA32_PERF_CAPABILITIES: 3971 if (!msr_info->host_initiated && 3972 !guest_cpuid_has(vcpu, X86_FEATURE_PDCM)) 3973 return 1; 3974 msr_info->data = vcpu->arch.perf_capabilities; 3975 break; 3976 case MSR_IA32_POWER_CTL: 3977 msr_info->data = vcpu->arch.msr_ia32_power_ctl; 3978 break; 3979 case MSR_IA32_TSC: { 3980 /* 3981 * Intel SDM states that MSR_IA32_TSC read adds the TSC offset 3982 * even when not intercepted. AMD manual doesn't explicitly 3983 * state this but appears to behave the same. 3984 * 3985 * On userspace reads and writes, however, we unconditionally 3986 * return L1's TSC value to ensure backwards-compatible 3987 * behavior for migration. 3988 */ 3989 u64 offset, ratio; 3990 3991 if (msr_info->host_initiated) { 3992 offset = vcpu->arch.l1_tsc_offset; 3993 ratio = vcpu->arch.l1_tsc_scaling_ratio; 3994 } else { 3995 offset = vcpu->arch.tsc_offset; 3996 ratio = vcpu->arch.tsc_scaling_ratio; 3997 } 3998 3999 msr_info->data = kvm_scale_tsc(rdtsc(), ratio) + offset; 4000 break; 4001 } 4002 case MSR_MTRRcap: 4003 case 0x200 ... MSR_IA32_MC0_CTL2 - 1: 4004 case MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) ... 0x2ff: 4005 return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data); 4006 case 0xcd: /* fsb frequency */ 4007 msr_info->data = 3; 4008 break; 4009 /* 4010 * MSR_EBC_FREQUENCY_ID 4011 * Conservative value valid for even the basic CPU models. 4012 * Models 0,1: 000 in bits 23:21 indicating a bus speed of 4013 * 100MHz, model 2 000 in bits 18:16 indicating 100MHz, 4014 * and 266MHz for model 3, or 4. Set Core Clock 4015 * Frequency to System Bus Frequency Ratio to 1 (bits 4016 * 31:24) even though these are only valid for CPU 4017 * models > 2, however guests may end up dividing or 4018 * multiplying by zero otherwise. 4019 */ 4020 case MSR_EBC_FREQUENCY_ID: 4021 msr_info->data = 1 << 24; 4022 break; 4023 case MSR_IA32_APICBASE: 4024 msr_info->data = kvm_get_apic_base(vcpu); 4025 break; 4026 case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff: 4027 return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data); 4028 case MSR_IA32_TSC_DEADLINE: 4029 msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu); 4030 break; 4031 case MSR_IA32_TSC_ADJUST: 4032 msr_info->data = (u64)vcpu->arch.ia32_tsc_adjust_msr; 4033 break; 4034 case MSR_IA32_MISC_ENABLE: 4035 msr_info->data = vcpu->arch.ia32_misc_enable_msr; 4036 break; 4037 case MSR_IA32_SMBASE: 4038 if (!msr_info->host_initiated) 4039 return 1; 4040 msr_info->data = vcpu->arch.smbase; 4041 break; 4042 case MSR_SMI_COUNT: 4043 msr_info->data = vcpu->arch.smi_count; 4044 break; 4045 case MSR_IA32_PERF_STATUS: 4046 /* TSC increment by tick */ 4047 msr_info->data = 1000ULL; 4048 /* CPU multiplier */ 4049 msr_info->data |= (((uint64_t)4ULL) << 40); 4050 break; 4051 case MSR_EFER: 4052 msr_info->data = vcpu->arch.efer; 4053 break; 4054 case MSR_KVM_WALL_CLOCK: 4055 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) 4056 return 1; 4057 4058 msr_info->data = vcpu->kvm->arch.wall_clock; 4059 break; 4060 case MSR_KVM_WALL_CLOCK_NEW: 4061 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) 4062 return 1; 4063 4064 msr_info->data = vcpu->kvm->arch.wall_clock; 4065 break; 4066 case MSR_KVM_SYSTEM_TIME: 4067 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) 4068 return 1; 4069 4070 msr_info->data = vcpu->arch.time; 4071 break; 4072 case MSR_KVM_SYSTEM_TIME_NEW: 4073 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) 4074 return 1; 4075 4076 msr_info->data = vcpu->arch.time; 4077 break; 4078 case MSR_KVM_ASYNC_PF_EN: 4079 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF)) 4080 return 1; 4081 4082 msr_info->data = vcpu->arch.apf.msr_en_val; 4083 break; 4084 case MSR_KVM_ASYNC_PF_INT: 4085 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) 4086 return 1; 4087 4088 msr_info->data = vcpu->arch.apf.msr_int_val; 4089 break; 4090 case MSR_KVM_ASYNC_PF_ACK: 4091 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) 4092 return 1; 4093 4094 msr_info->data = 0; 4095 break; 4096 case MSR_KVM_STEAL_TIME: 4097 if (!guest_pv_has(vcpu, KVM_FEATURE_STEAL_TIME)) 4098 return 1; 4099 4100 msr_info->data = vcpu->arch.st.msr_val; 4101 break; 4102 case MSR_KVM_PV_EOI_EN: 4103 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_EOI)) 4104 return 1; 4105 4106 msr_info->data = vcpu->arch.pv_eoi.msr_val; 4107 break; 4108 case MSR_KVM_POLL_CONTROL: 4109 if (!guest_pv_has(vcpu, KVM_FEATURE_POLL_CONTROL)) 4110 return 1; 4111 4112 msr_info->data = vcpu->arch.msr_kvm_poll_control; 4113 break; 4114 case MSR_IA32_P5_MC_ADDR: 4115 case MSR_IA32_P5_MC_TYPE: 4116 case MSR_IA32_MCG_CAP: 4117 case MSR_IA32_MCG_CTL: 4118 case MSR_IA32_MCG_STATUS: 4119 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: 4120 case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1: 4121 return get_msr_mce(vcpu, msr_info->index, &msr_info->data, 4122 msr_info->host_initiated); 4123 case MSR_IA32_XSS: 4124 if (!msr_info->host_initiated && 4125 !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)) 4126 return 1; 4127 msr_info->data = vcpu->arch.ia32_xss; 4128 break; 4129 case MSR_K7_CLK_CTL: 4130 /* 4131 * Provide expected ramp-up count for K7. All other 4132 * are set to zero, indicating minimum divisors for 4133 * every field. 4134 * 4135 * This prevents guest kernels on AMD host with CPU 4136 * type 6, model 8 and higher from exploding due to 4137 * the rdmsr failing. 4138 */ 4139 msr_info->data = 0x20000000; 4140 break; 4141 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: 4142 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER: 4143 case HV_X64_MSR_SYNDBG_OPTIONS: 4144 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: 4145 case HV_X64_MSR_CRASH_CTL: 4146 case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT: 4147 case HV_X64_MSR_REENLIGHTENMENT_CONTROL: 4148 case HV_X64_MSR_TSC_EMULATION_CONTROL: 4149 case HV_X64_MSR_TSC_EMULATION_STATUS: 4150 return kvm_hv_get_msr_common(vcpu, 4151 msr_info->index, &msr_info->data, 4152 msr_info->host_initiated); 4153 case MSR_IA32_BBL_CR_CTL3: 4154 /* This legacy MSR exists but isn't fully documented in current 4155 * silicon. It is however accessed by winxp in very narrow 4156 * scenarios where it sets bit #19, itself documented as 4157 * a "reserved" bit. Best effort attempt to source coherent 4158 * read data here should the balance of the register be 4159 * interpreted by the guest: 4160 * 4161 * L2 cache control register 3: 64GB range, 256KB size, 4162 * enabled, latency 0x1, configured 4163 */ 4164 msr_info->data = 0xbe702111; 4165 break; 4166 case MSR_AMD64_OSVW_ID_LENGTH: 4167 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) 4168 return 1; 4169 msr_info->data = vcpu->arch.osvw.length; 4170 break; 4171 case MSR_AMD64_OSVW_STATUS: 4172 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) 4173 return 1; 4174 msr_info->data = vcpu->arch.osvw.status; 4175 break; 4176 case MSR_PLATFORM_INFO: 4177 if (!msr_info->host_initiated && 4178 !vcpu->kvm->arch.guest_can_read_msr_platform_info) 4179 return 1; 4180 msr_info->data = vcpu->arch.msr_platform_info; 4181 break; 4182 case MSR_MISC_FEATURES_ENABLES: 4183 msr_info->data = vcpu->arch.msr_misc_features_enables; 4184 break; 4185 case MSR_K7_HWCR: 4186 msr_info->data = vcpu->arch.msr_hwcr; 4187 break; 4188 #ifdef CONFIG_X86_64 4189 case MSR_IA32_XFD: 4190 if (!msr_info->host_initiated && 4191 !guest_cpuid_has(vcpu, X86_FEATURE_XFD)) 4192 return 1; 4193 4194 msr_info->data = vcpu->arch.guest_fpu.fpstate->xfd; 4195 break; 4196 case MSR_IA32_XFD_ERR: 4197 if (!msr_info->host_initiated && 4198 !guest_cpuid_has(vcpu, X86_FEATURE_XFD)) 4199 return 1; 4200 4201 msr_info->data = vcpu->arch.guest_fpu.xfd_err; 4202 break; 4203 #endif 4204 default: 4205 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) 4206 return kvm_pmu_get_msr(vcpu, msr_info); 4207 return KVM_MSR_RET_INVALID; 4208 } 4209 return 0; 4210 } 4211 EXPORT_SYMBOL_GPL(kvm_get_msr_common); 4212 4213 /* 4214 * Read or write a bunch of msrs. All parameters are kernel addresses. 4215 * 4216 * @return number of msrs set successfully. 4217 */ 4218 static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs, 4219 struct kvm_msr_entry *entries, 4220 int (*do_msr)(struct kvm_vcpu *vcpu, 4221 unsigned index, u64 *data)) 4222 { 4223 int i; 4224 4225 for (i = 0; i < msrs->nmsrs; ++i) 4226 if (do_msr(vcpu, entries[i].index, &entries[i].data)) 4227 break; 4228 4229 return i; 4230 } 4231 4232 /* 4233 * Read or write a bunch of msrs. Parameters are user addresses. 4234 * 4235 * @return number of msrs set successfully. 4236 */ 4237 static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs, 4238 int (*do_msr)(struct kvm_vcpu *vcpu, 4239 unsigned index, u64 *data), 4240 int writeback) 4241 { 4242 struct kvm_msrs msrs; 4243 struct kvm_msr_entry *entries; 4244 int r, n; 4245 unsigned size; 4246 4247 r = -EFAULT; 4248 if (copy_from_user(&msrs, user_msrs, sizeof(msrs))) 4249 goto out; 4250 4251 r = -E2BIG; 4252 if (msrs.nmsrs >= MAX_IO_MSRS) 4253 goto out; 4254 4255 size = sizeof(struct kvm_msr_entry) * msrs.nmsrs; 4256 entries = memdup_user(user_msrs->entries, size); 4257 if (IS_ERR(entries)) { 4258 r = PTR_ERR(entries); 4259 goto out; 4260 } 4261 4262 r = n = __msr_io(vcpu, &msrs, entries, do_msr); 4263 if (r < 0) 4264 goto out_free; 4265 4266 r = -EFAULT; 4267 if (writeback && copy_to_user(user_msrs->entries, entries, size)) 4268 goto out_free; 4269 4270 r = n; 4271 4272 out_free: 4273 kfree(entries); 4274 out: 4275 return r; 4276 } 4277 4278 static inline bool kvm_can_mwait_in_guest(void) 4279 { 4280 return boot_cpu_has(X86_FEATURE_MWAIT) && 4281 !boot_cpu_has_bug(X86_BUG_MONITOR) && 4282 boot_cpu_has(X86_FEATURE_ARAT); 4283 } 4284 4285 static int kvm_ioctl_get_supported_hv_cpuid(struct kvm_vcpu *vcpu, 4286 struct kvm_cpuid2 __user *cpuid_arg) 4287 { 4288 struct kvm_cpuid2 cpuid; 4289 int r; 4290 4291 r = -EFAULT; 4292 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) 4293 return r; 4294 4295 r = kvm_get_hv_cpuid(vcpu, &cpuid, cpuid_arg->entries); 4296 if (r) 4297 return r; 4298 4299 r = -EFAULT; 4300 if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid))) 4301 return r; 4302 4303 return 0; 4304 } 4305 4306 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) 4307 { 4308 int r = 0; 4309 4310 switch (ext) { 4311 case KVM_CAP_IRQCHIP: 4312 case KVM_CAP_HLT: 4313 case KVM_CAP_MMU_SHADOW_CACHE_CONTROL: 4314 case KVM_CAP_SET_TSS_ADDR: 4315 case KVM_CAP_EXT_CPUID: 4316 case KVM_CAP_EXT_EMUL_CPUID: 4317 case KVM_CAP_CLOCKSOURCE: 4318 case KVM_CAP_PIT: 4319 case KVM_CAP_NOP_IO_DELAY: 4320 case KVM_CAP_MP_STATE: 4321 case KVM_CAP_SYNC_MMU: 4322 case KVM_CAP_USER_NMI: 4323 case KVM_CAP_REINJECT_CONTROL: 4324 case KVM_CAP_IRQ_INJECT_STATUS: 4325 case KVM_CAP_IOEVENTFD: 4326 case KVM_CAP_IOEVENTFD_NO_LENGTH: 4327 case KVM_CAP_PIT2: 4328 case KVM_CAP_PIT_STATE2: 4329 case KVM_CAP_SET_IDENTITY_MAP_ADDR: 4330 case KVM_CAP_VCPU_EVENTS: 4331 case KVM_CAP_HYPERV: 4332 case KVM_CAP_HYPERV_VAPIC: 4333 case KVM_CAP_HYPERV_SPIN: 4334 case KVM_CAP_HYPERV_SYNIC: 4335 case KVM_CAP_HYPERV_SYNIC2: 4336 case KVM_CAP_HYPERV_VP_INDEX: 4337 case KVM_CAP_HYPERV_EVENTFD: 4338 case KVM_CAP_HYPERV_TLBFLUSH: 4339 case KVM_CAP_HYPERV_SEND_IPI: 4340 case KVM_CAP_HYPERV_CPUID: 4341 case KVM_CAP_HYPERV_ENFORCE_CPUID: 4342 case KVM_CAP_SYS_HYPERV_CPUID: 4343 case KVM_CAP_PCI_SEGMENT: 4344 case KVM_CAP_DEBUGREGS: 4345 case KVM_CAP_X86_ROBUST_SINGLESTEP: 4346 case KVM_CAP_XSAVE: 4347 case KVM_CAP_ASYNC_PF: 4348 case KVM_CAP_ASYNC_PF_INT: 4349 case KVM_CAP_GET_TSC_KHZ: 4350 case KVM_CAP_KVMCLOCK_CTRL: 4351 case KVM_CAP_READONLY_MEM: 4352 case KVM_CAP_HYPERV_TIME: 4353 case KVM_CAP_IOAPIC_POLARITY_IGNORED: 4354 case KVM_CAP_TSC_DEADLINE_TIMER: 4355 case KVM_CAP_DISABLE_QUIRKS: 4356 case KVM_CAP_SET_BOOT_CPU_ID: 4357 case KVM_CAP_SPLIT_IRQCHIP: 4358 case KVM_CAP_IMMEDIATE_EXIT: 4359 case KVM_CAP_PMU_EVENT_FILTER: 4360 case KVM_CAP_GET_MSR_FEATURES: 4361 case KVM_CAP_MSR_PLATFORM_INFO: 4362 case KVM_CAP_EXCEPTION_PAYLOAD: 4363 case KVM_CAP_X86_TRIPLE_FAULT_EVENT: 4364 case KVM_CAP_SET_GUEST_DEBUG: 4365 case KVM_CAP_LAST_CPU: 4366 case KVM_CAP_X86_USER_SPACE_MSR: 4367 case KVM_CAP_X86_MSR_FILTER: 4368 case KVM_CAP_ENFORCE_PV_FEATURE_CPUID: 4369 #ifdef CONFIG_X86_SGX_KVM 4370 case KVM_CAP_SGX_ATTRIBUTE: 4371 #endif 4372 case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM: 4373 case KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM: 4374 case KVM_CAP_SREGS2: 4375 case KVM_CAP_EXIT_ON_EMULATION_FAILURE: 4376 case KVM_CAP_VCPU_ATTRIBUTES: 4377 case KVM_CAP_SYS_ATTRIBUTES: 4378 case KVM_CAP_VAPIC: 4379 case KVM_CAP_ENABLE_CAP: 4380 case KVM_CAP_VM_DISABLE_NX_HUGE_PAGES: 4381 r = 1; 4382 break; 4383 case KVM_CAP_EXIT_HYPERCALL: 4384 r = KVM_EXIT_HYPERCALL_VALID_MASK; 4385 break; 4386 case KVM_CAP_SET_GUEST_DEBUG2: 4387 return KVM_GUESTDBG_VALID_MASK; 4388 #ifdef CONFIG_KVM_XEN 4389 case KVM_CAP_XEN_HVM: 4390 r = KVM_XEN_HVM_CONFIG_HYPERCALL_MSR | 4391 KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL | 4392 KVM_XEN_HVM_CONFIG_SHARED_INFO | 4393 KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL | 4394 KVM_XEN_HVM_CONFIG_EVTCHN_SEND; 4395 if (sched_info_on()) 4396 r |= KVM_XEN_HVM_CONFIG_RUNSTATE; 4397 break; 4398 #endif 4399 case KVM_CAP_SYNC_REGS: 4400 r = KVM_SYNC_X86_VALID_FIELDS; 4401 break; 4402 case KVM_CAP_ADJUST_CLOCK: 4403 r = KVM_CLOCK_VALID_FLAGS; 4404 break; 4405 case KVM_CAP_X86_DISABLE_EXITS: 4406 r |= KVM_X86_DISABLE_EXITS_HLT | KVM_X86_DISABLE_EXITS_PAUSE | 4407 KVM_X86_DISABLE_EXITS_CSTATE; 4408 if(kvm_can_mwait_in_guest()) 4409 r |= KVM_X86_DISABLE_EXITS_MWAIT; 4410 break; 4411 case KVM_CAP_X86_SMM: 4412 /* SMBASE is usually relocated above 1M on modern chipsets, 4413 * and SMM handlers might indeed rely on 4G segment limits, 4414 * so do not report SMM to be available if real mode is 4415 * emulated via vm86 mode. Still, do not go to great lengths 4416 * to avoid userspace's usage of the feature, because it is a 4417 * fringe case that is not enabled except via specific settings 4418 * of the module parameters. 4419 */ 4420 r = static_call(kvm_x86_has_emulated_msr)(kvm, MSR_IA32_SMBASE); 4421 break; 4422 case KVM_CAP_NR_VCPUS: 4423 r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS); 4424 break; 4425 case KVM_CAP_MAX_VCPUS: 4426 r = KVM_MAX_VCPUS; 4427 break; 4428 case KVM_CAP_MAX_VCPU_ID: 4429 r = KVM_MAX_VCPU_IDS; 4430 break; 4431 case KVM_CAP_PV_MMU: /* obsolete */ 4432 r = 0; 4433 break; 4434 case KVM_CAP_MCE: 4435 r = KVM_MAX_MCE_BANKS; 4436 break; 4437 case KVM_CAP_XCRS: 4438 r = boot_cpu_has(X86_FEATURE_XSAVE); 4439 break; 4440 case KVM_CAP_TSC_CONTROL: 4441 case KVM_CAP_VM_TSC_CONTROL: 4442 r = kvm_caps.has_tsc_control; 4443 break; 4444 case KVM_CAP_X2APIC_API: 4445 r = KVM_X2APIC_API_VALID_FLAGS; 4446 break; 4447 case KVM_CAP_NESTED_STATE: 4448 r = kvm_x86_ops.nested_ops->get_state ? 4449 kvm_x86_ops.nested_ops->get_state(NULL, NULL, 0) : 0; 4450 break; 4451 case KVM_CAP_HYPERV_DIRECT_TLBFLUSH: 4452 r = kvm_x86_ops.enable_direct_tlbflush != NULL; 4453 break; 4454 case KVM_CAP_HYPERV_ENLIGHTENED_VMCS: 4455 r = kvm_x86_ops.nested_ops->enable_evmcs != NULL; 4456 break; 4457 case KVM_CAP_SMALLER_MAXPHYADDR: 4458 r = (int) allow_smaller_maxphyaddr; 4459 break; 4460 case KVM_CAP_STEAL_TIME: 4461 r = sched_info_on(); 4462 break; 4463 case KVM_CAP_X86_BUS_LOCK_EXIT: 4464 if (kvm_caps.has_bus_lock_exit) 4465 r = KVM_BUS_LOCK_DETECTION_OFF | 4466 KVM_BUS_LOCK_DETECTION_EXIT; 4467 else 4468 r = 0; 4469 break; 4470 case KVM_CAP_XSAVE2: { 4471 u64 guest_perm = xstate_get_guest_group_perm(); 4472 4473 r = xstate_required_size(kvm_caps.supported_xcr0 & guest_perm, false); 4474 if (r < sizeof(struct kvm_xsave)) 4475 r = sizeof(struct kvm_xsave); 4476 break; 4477 } 4478 case KVM_CAP_PMU_CAPABILITY: 4479 r = enable_pmu ? KVM_CAP_PMU_VALID_MASK : 0; 4480 break; 4481 case KVM_CAP_DISABLE_QUIRKS2: 4482 r = KVM_X86_VALID_QUIRKS; 4483 break; 4484 case KVM_CAP_X86_NOTIFY_VMEXIT: 4485 r = kvm_caps.has_notify_vmexit; 4486 break; 4487 default: 4488 break; 4489 } 4490 return r; 4491 } 4492 4493 static inline void __user *kvm_get_attr_addr(struct kvm_device_attr *attr) 4494 { 4495 void __user *uaddr = (void __user*)(unsigned long)attr->addr; 4496 4497 if ((u64)(unsigned long)uaddr != attr->addr) 4498 return ERR_PTR_USR(-EFAULT); 4499 return uaddr; 4500 } 4501 4502 static int kvm_x86_dev_get_attr(struct kvm_device_attr *attr) 4503 { 4504 u64 __user *uaddr = kvm_get_attr_addr(attr); 4505 4506 if (attr->group) 4507 return -ENXIO; 4508 4509 if (IS_ERR(uaddr)) 4510 return PTR_ERR(uaddr); 4511 4512 switch (attr->attr) { 4513 case KVM_X86_XCOMP_GUEST_SUPP: 4514 if (put_user(kvm_caps.supported_xcr0, uaddr)) 4515 return -EFAULT; 4516 return 0; 4517 default: 4518 return -ENXIO; 4519 break; 4520 } 4521 } 4522 4523 static int kvm_x86_dev_has_attr(struct kvm_device_attr *attr) 4524 { 4525 if (attr->group) 4526 return -ENXIO; 4527 4528 switch (attr->attr) { 4529 case KVM_X86_XCOMP_GUEST_SUPP: 4530 return 0; 4531 default: 4532 return -ENXIO; 4533 } 4534 } 4535 4536 long kvm_arch_dev_ioctl(struct file *filp, 4537 unsigned int ioctl, unsigned long arg) 4538 { 4539 void __user *argp = (void __user *)arg; 4540 long r; 4541 4542 switch (ioctl) { 4543 case KVM_GET_MSR_INDEX_LIST: { 4544 struct kvm_msr_list __user *user_msr_list = argp; 4545 struct kvm_msr_list msr_list; 4546 unsigned n; 4547 4548 r = -EFAULT; 4549 if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list))) 4550 goto out; 4551 n = msr_list.nmsrs; 4552 msr_list.nmsrs = num_msrs_to_save + num_emulated_msrs; 4553 if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list))) 4554 goto out; 4555 r = -E2BIG; 4556 if (n < msr_list.nmsrs) 4557 goto out; 4558 r = -EFAULT; 4559 if (copy_to_user(user_msr_list->indices, &msrs_to_save, 4560 num_msrs_to_save * sizeof(u32))) 4561 goto out; 4562 if (copy_to_user(user_msr_list->indices + num_msrs_to_save, 4563 &emulated_msrs, 4564 num_emulated_msrs * sizeof(u32))) 4565 goto out; 4566 r = 0; 4567 break; 4568 } 4569 case KVM_GET_SUPPORTED_CPUID: 4570 case KVM_GET_EMULATED_CPUID: { 4571 struct kvm_cpuid2 __user *cpuid_arg = argp; 4572 struct kvm_cpuid2 cpuid; 4573 4574 r = -EFAULT; 4575 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) 4576 goto out; 4577 4578 r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries, 4579 ioctl); 4580 if (r) 4581 goto out; 4582 4583 r = -EFAULT; 4584 if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid))) 4585 goto out; 4586 r = 0; 4587 break; 4588 } 4589 case KVM_X86_GET_MCE_CAP_SUPPORTED: 4590 r = -EFAULT; 4591 if (copy_to_user(argp, &kvm_caps.supported_mce_cap, 4592 sizeof(kvm_caps.supported_mce_cap))) 4593 goto out; 4594 r = 0; 4595 break; 4596 case KVM_GET_MSR_FEATURE_INDEX_LIST: { 4597 struct kvm_msr_list __user *user_msr_list = argp; 4598 struct kvm_msr_list msr_list; 4599 unsigned int n; 4600 4601 r = -EFAULT; 4602 if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list))) 4603 goto out; 4604 n = msr_list.nmsrs; 4605 msr_list.nmsrs = num_msr_based_features; 4606 if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list))) 4607 goto out; 4608 r = -E2BIG; 4609 if (n < msr_list.nmsrs) 4610 goto out; 4611 r = -EFAULT; 4612 if (copy_to_user(user_msr_list->indices, &msr_based_features, 4613 num_msr_based_features * sizeof(u32))) 4614 goto out; 4615 r = 0; 4616 break; 4617 } 4618 case KVM_GET_MSRS: 4619 r = msr_io(NULL, argp, do_get_msr_feature, 1); 4620 break; 4621 case KVM_GET_SUPPORTED_HV_CPUID: 4622 r = kvm_ioctl_get_supported_hv_cpuid(NULL, argp); 4623 break; 4624 case KVM_GET_DEVICE_ATTR: { 4625 struct kvm_device_attr attr; 4626 r = -EFAULT; 4627 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 4628 break; 4629 r = kvm_x86_dev_get_attr(&attr); 4630 break; 4631 } 4632 case KVM_HAS_DEVICE_ATTR: { 4633 struct kvm_device_attr attr; 4634 r = -EFAULT; 4635 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 4636 break; 4637 r = kvm_x86_dev_has_attr(&attr); 4638 break; 4639 } 4640 default: 4641 r = -EINVAL; 4642 break; 4643 } 4644 out: 4645 return r; 4646 } 4647 4648 static void wbinvd_ipi(void *garbage) 4649 { 4650 wbinvd(); 4651 } 4652 4653 static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu) 4654 { 4655 return kvm_arch_has_noncoherent_dma(vcpu->kvm); 4656 } 4657 4658 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 4659 { 4660 /* Address WBINVD may be executed by guest */ 4661 if (need_emulate_wbinvd(vcpu)) { 4662 if (static_call(kvm_x86_has_wbinvd_exit)()) 4663 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); 4664 else if (vcpu->cpu != -1 && vcpu->cpu != cpu) 4665 smp_call_function_single(vcpu->cpu, 4666 wbinvd_ipi, NULL, 1); 4667 } 4668 4669 static_call(kvm_x86_vcpu_load)(vcpu, cpu); 4670 4671 /* Save host pkru register if supported */ 4672 vcpu->arch.host_pkru = read_pkru(); 4673 4674 /* Apply any externally detected TSC adjustments (due to suspend) */ 4675 if (unlikely(vcpu->arch.tsc_offset_adjustment)) { 4676 adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment); 4677 vcpu->arch.tsc_offset_adjustment = 0; 4678 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 4679 } 4680 4681 if (unlikely(vcpu->cpu != cpu) || kvm_check_tsc_unstable()) { 4682 s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 : 4683 rdtsc() - vcpu->arch.last_host_tsc; 4684 if (tsc_delta < 0) 4685 mark_tsc_unstable("KVM discovered backwards TSC"); 4686 4687 if (kvm_check_tsc_unstable()) { 4688 u64 offset = kvm_compute_l1_tsc_offset(vcpu, 4689 vcpu->arch.last_guest_tsc); 4690 kvm_vcpu_write_tsc_offset(vcpu, offset); 4691 vcpu->arch.tsc_catchup = 1; 4692 } 4693 4694 if (kvm_lapic_hv_timer_in_use(vcpu)) 4695 kvm_lapic_restart_hv_timer(vcpu); 4696 4697 /* 4698 * On a host with synchronized TSC, there is no need to update 4699 * kvmclock on vcpu->cpu migration 4700 */ 4701 if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1) 4702 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu); 4703 if (vcpu->cpu != cpu) 4704 kvm_make_request(KVM_REQ_MIGRATE_TIMER, vcpu); 4705 vcpu->cpu = cpu; 4706 } 4707 4708 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); 4709 } 4710 4711 static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) 4712 { 4713 struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache; 4714 struct kvm_steal_time __user *st; 4715 struct kvm_memslots *slots; 4716 static const u8 preempted = KVM_VCPU_PREEMPTED; 4717 gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS; 4718 4719 /* 4720 * The vCPU can be marked preempted if and only if the VM-Exit was on 4721 * an instruction boundary and will not trigger guest emulation of any 4722 * kind (see vcpu_run). Vendor specific code controls (conservatively) 4723 * when this is true, for example allowing the vCPU to be marked 4724 * preempted if and only if the VM-Exit was due to a host interrupt. 4725 */ 4726 if (!vcpu->arch.at_instruction_boundary) { 4727 vcpu->stat.preemption_other++; 4728 return; 4729 } 4730 4731 vcpu->stat.preemption_reported++; 4732 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) 4733 return; 4734 4735 if (vcpu->arch.st.preempted) 4736 return; 4737 4738 /* This happens on process exit */ 4739 if (unlikely(current->mm != vcpu->kvm->mm)) 4740 return; 4741 4742 slots = kvm_memslots(vcpu->kvm); 4743 4744 if (unlikely(slots->generation != ghc->generation || 4745 gpa != ghc->gpa || 4746 kvm_is_error_hva(ghc->hva) || !ghc->memslot)) 4747 return; 4748 4749 st = (struct kvm_steal_time __user *)ghc->hva; 4750 BUILD_BUG_ON(sizeof(st->preempted) != sizeof(preempted)); 4751 4752 if (!copy_to_user_nofault(&st->preempted, &preempted, sizeof(preempted))) 4753 vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED; 4754 4755 mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa)); 4756 } 4757 4758 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 4759 { 4760 int idx; 4761 4762 if (vcpu->preempted) { 4763 if (!vcpu->arch.guest_state_protected) 4764 vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu); 4765 4766 /* 4767 * Take the srcu lock as memslots will be accessed to check the gfn 4768 * cache generation against the memslots generation. 4769 */ 4770 idx = srcu_read_lock(&vcpu->kvm->srcu); 4771 if (kvm_xen_msr_enabled(vcpu->kvm)) 4772 kvm_xen_runstate_set_preempted(vcpu); 4773 else 4774 kvm_steal_time_set_preempted(vcpu); 4775 srcu_read_unlock(&vcpu->kvm->srcu, idx); 4776 } 4777 4778 static_call(kvm_x86_vcpu_put)(vcpu); 4779 vcpu->arch.last_host_tsc = rdtsc(); 4780 } 4781 4782 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, 4783 struct kvm_lapic_state *s) 4784 { 4785 static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); 4786 4787 return kvm_apic_get_state(vcpu, s); 4788 } 4789 4790 static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu, 4791 struct kvm_lapic_state *s) 4792 { 4793 int r; 4794 4795 r = kvm_apic_set_state(vcpu, s); 4796 if (r) 4797 return r; 4798 update_cr8_intercept(vcpu); 4799 4800 return 0; 4801 } 4802 4803 static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu) 4804 { 4805 /* 4806 * We can accept userspace's request for interrupt injection 4807 * as long as we have a place to store the interrupt number. 4808 * The actual injection will happen when the CPU is able to 4809 * deliver the interrupt. 4810 */ 4811 if (kvm_cpu_has_extint(vcpu)) 4812 return false; 4813 4814 /* Acknowledging ExtINT does not happen if LINT0 is masked. */ 4815 return (!lapic_in_kernel(vcpu) || 4816 kvm_apic_accept_pic_intr(vcpu)); 4817 } 4818 4819 static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu) 4820 { 4821 /* 4822 * Do not cause an interrupt window exit if an exception 4823 * is pending or an event needs reinjection; userspace 4824 * might want to inject the interrupt manually using KVM_SET_REGS 4825 * or KVM_SET_SREGS. For that to work, we must be at an 4826 * instruction boundary and with no events half-injected. 4827 */ 4828 return (kvm_arch_interrupt_allowed(vcpu) && 4829 kvm_cpu_accept_dm_intr(vcpu) && 4830 !kvm_event_needs_reinjection(vcpu) && 4831 !vcpu->arch.exception.pending); 4832 } 4833 4834 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, 4835 struct kvm_interrupt *irq) 4836 { 4837 if (irq->irq >= KVM_NR_INTERRUPTS) 4838 return -EINVAL; 4839 4840 if (!irqchip_in_kernel(vcpu->kvm)) { 4841 kvm_queue_interrupt(vcpu, irq->irq, false); 4842 kvm_make_request(KVM_REQ_EVENT, vcpu); 4843 return 0; 4844 } 4845 4846 /* 4847 * With in-kernel LAPIC, we only use this to inject EXTINT, so 4848 * fail for in-kernel 8259. 4849 */ 4850 if (pic_in_kernel(vcpu->kvm)) 4851 return -ENXIO; 4852 4853 if (vcpu->arch.pending_external_vector != -1) 4854 return -EEXIST; 4855 4856 vcpu->arch.pending_external_vector = irq->irq; 4857 kvm_make_request(KVM_REQ_EVENT, vcpu); 4858 return 0; 4859 } 4860 4861 static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu) 4862 { 4863 kvm_inject_nmi(vcpu); 4864 4865 return 0; 4866 } 4867 4868 static int kvm_vcpu_ioctl_smi(struct kvm_vcpu *vcpu) 4869 { 4870 kvm_make_request(KVM_REQ_SMI, vcpu); 4871 4872 return 0; 4873 } 4874 4875 static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu, 4876 struct kvm_tpr_access_ctl *tac) 4877 { 4878 if (tac->flags) 4879 return -EINVAL; 4880 vcpu->arch.tpr_access_reporting = !!tac->enabled; 4881 return 0; 4882 } 4883 4884 static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu, 4885 u64 mcg_cap) 4886 { 4887 int r; 4888 unsigned bank_num = mcg_cap & 0xff, bank; 4889 4890 r = -EINVAL; 4891 if (!bank_num || bank_num > KVM_MAX_MCE_BANKS) 4892 goto out; 4893 if (mcg_cap & ~(kvm_caps.supported_mce_cap | 0xff | 0xff0000)) 4894 goto out; 4895 r = 0; 4896 vcpu->arch.mcg_cap = mcg_cap; 4897 /* Init IA32_MCG_CTL to all 1s */ 4898 if (mcg_cap & MCG_CTL_P) 4899 vcpu->arch.mcg_ctl = ~(u64)0; 4900 /* Init IA32_MCi_CTL to all 1s, IA32_MCi_CTL2 to all 0s */ 4901 for (bank = 0; bank < bank_num; bank++) { 4902 vcpu->arch.mce_banks[bank*4] = ~(u64)0; 4903 if (mcg_cap & MCG_CMCI_P) 4904 vcpu->arch.mci_ctl2_banks[bank] = 0; 4905 } 4906 4907 kvm_apic_after_set_mcg_cap(vcpu); 4908 4909 static_call(kvm_x86_setup_mce)(vcpu); 4910 out: 4911 return r; 4912 } 4913 4914 /* 4915 * Validate this is an UCNA (uncorrectable no action) error by checking the 4916 * MCG_STATUS and MCi_STATUS registers: 4917 * - none of the bits for Machine Check Exceptions are set 4918 * - both the VAL (valid) and UC (uncorrectable) bits are set 4919 * MCI_STATUS_PCC - Processor Context Corrupted 4920 * MCI_STATUS_S - Signaled as a Machine Check Exception 4921 * MCI_STATUS_AR - Software recoverable Action Required 4922 */ 4923 static bool is_ucna(struct kvm_x86_mce *mce) 4924 { 4925 return !mce->mcg_status && 4926 !(mce->status & (MCI_STATUS_PCC | MCI_STATUS_S | MCI_STATUS_AR)) && 4927 (mce->status & MCI_STATUS_VAL) && 4928 (mce->status & MCI_STATUS_UC); 4929 } 4930 4931 static int kvm_vcpu_x86_set_ucna(struct kvm_vcpu *vcpu, struct kvm_x86_mce *mce, u64* banks) 4932 { 4933 u64 mcg_cap = vcpu->arch.mcg_cap; 4934 4935 banks[1] = mce->status; 4936 banks[2] = mce->addr; 4937 banks[3] = mce->misc; 4938 vcpu->arch.mcg_status = mce->mcg_status; 4939 4940 if (!(mcg_cap & MCG_CMCI_P) || 4941 !(vcpu->arch.mci_ctl2_banks[mce->bank] & MCI_CTL2_CMCI_EN)) 4942 return 0; 4943 4944 if (lapic_in_kernel(vcpu)) 4945 kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTCMCI); 4946 4947 return 0; 4948 } 4949 4950 static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu, 4951 struct kvm_x86_mce *mce) 4952 { 4953 u64 mcg_cap = vcpu->arch.mcg_cap; 4954 unsigned bank_num = mcg_cap & 0xff; 4955 u64 *banks = vcpu->arch.mce_banks; 4956 4957 if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL)) 4958 return -EINVAL; 4959 4960 banks += array_index_nospec(4 * mce->bank, 4 * bank_num); 4961 4962 if (is_ucna(mce)) 4963 return kvm_vcpu_x86_set_ucna(vcpu, mce, banks); 4964 4965 /* 4966 * if IA32_MCG_CTL is not all 1s, the uncorrected error 4967 * reporting is disabled 4968 */ 4969 if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) && 4970 vcpu->arch.mcg_ctl != ~(u64)0) 4971 return 0; 4972 /* 4973 * if IA32_MCi_CTL is not all 1s, the uncorrected error 4974 * reporting is disabled for the bank 4975 */ 4976 if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0) 4977 return 0; 4978 if (mce->status & MCI_STATUS_UC) { 4979 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) || 4980 !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) { 4981 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 4982 return 0; 4983 } 4984 if (banks[1] & MCI_STATUS_VAL) 4985 mce->status |= MCI_STATUS_OVER; 4986 banks[2] = mce->addr; 4987 banks[3] = mce->misc; 4988 vcpu->arch.mcg_status = mce->mcg_status; 4989 banks[1] = mce->status; 4990 kvm_queue_exception(vcpu, MC_VECTOR); 4991 } else if (!(banks[1] & MCI_STATUS_VAL) 4992 || !(banks[1] & MCI_STATUS_UC)) { 4993 if (banks[1] & MCI_STATUS_VAL) 4994 mce->status |= MCI_STATUS_OVER; 4995 banks[2] = mce->addr; 4996 banks[3] = mce->misc; 4997 banks[1] = mce->status; 4998 } else 4999 banks[1] |= MCI_STATUS_OVER; 5000 return 0; 5001 } 5002 5003 static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, 5004 struct kvm_vcpu_events *events) 5005 { 5006 process_nmi(vcpu); 5007 5008 if (kvm_check_request(KVM_REQ_SMI, vcpu)) 5009 process_smi(vcpu); 5010 5011 /* 5012 * In guest mode, payload delivery should be deferred, 5013 * so that the L1 hypervisor can intercept #PF before 5014 * CR2 is modified (or intercept #DB before DR6 is 5015 * modified under nVMX). Unless the per-VM capability, 5016 * KVM_CAP_EXCEPTION_PAYLOAD, is set, we may not defer the delivery of 5017 * an exception payload and handle after a KVM_GET_VCPU_EVENTS. Since we 5018 * opportunistically defer the exception payload, deliver it if the 5019 * capability hasn't been requested before processing a 5020 * KVM_GET_VCPU_EVENTS. 5021 */ 5022 if (!vcpu->kvm->arch.exception_payload_enabled && 5023 vcpu->arch.exception.pending && vcpu->arch.exception.has_payload) 5024 kvm_deliver_exception_payload(vcpu); 5025 5026 /* 5027 * The API doesn't provide the instruction length for software 5028 * exceptions, so don't report them. As long as the guest RIP 5029 * isn't advanced, we should expect to encounter the exception 5030 * again. 5031 */ 5032 if (kvm_exception_is_soft(vcpu->arch.exception.nr)) { 5033 events->exception.injected = 0; 5034 events->exception.pending = 0; 5035 } else { 5036 events->exception.injected = vcpu->arch.exception.injected; 5037 events->exception.pending = vcpu->arch.exception.pending; 5038 /* 5039 * For ABI compatibility, deliberately conflate 5040 * pending and injected exceptions when 5041 * KVM_CAP_EXCEPTION_PAYLOAD isn't enabled. 5042 */ 5043 if (!vcpu->kvm->arch.exception_payload_enabled) 5044 events->exception.injected |= 5045 vcpu->arch.exception.pending; 5046 } 5047 events->exception.nr = vcpu->arch.exception.nr; 5048 events->exception.has_error_code = vcpu->arch.exception.has_error_code; 5049 events->exception.error_code = vcpu->arch.exception.error_code; 5050 events->exception_has_payload = vcpu->arch.exception.has_payload; 5051 events->exception_payload = vcpu->arch.exception.payload; 5052 5053 events->interrupt.injected = 5054 vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft; 5055 events->interrupt.nr = vcpu->arch.interrupt.nr; 5056 events->interrupt.soft = 0; 5057 events->interrupt.shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu); 5058 5059 events->nmi.injected = vcpu->arch.nmi_injected; 5060 events->nmi.pending = vcpu->arch.nmi_pending != 0; 5061 events->nmi.masked = static_call(kvm_x86_get_nmi_mask)(vcpu); 5062 events->nmi.pad = 0; 5063 5064 events->sipi_vector = 0; /* never valid when reporting to user space */ 5065 5066 events->smi.smm = is_smm(vcpu); 5067 events->smi.pending = vcpu->arch.smi_pending; 5068 events->smi.smm_inside_nmi = 5069 !!(vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK); 5070 events->smi.latched_init = kvm_lapic_latched_init(vcpu); 5071 5072 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING 5073 | KVM_VCPUEVENT_VALID_SHADOW 5074 | KVM_VCPUEVENT_VALID_SMM); 5075 if (vcpu->kvm->arch.exception_payload_enabled) 5076 events->flags |= KVM_VCPUEVENT_VALID_PAYLOAD; 5077 if (vcpu->kvm->arch.triple_fault_event) { 5078 events->triple_fault.pending = kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu); 5079 events->flags |= KVM_VCPUEVENT_VALID_TRIPLE_FAULT; 5080 } 5081 5082 memset(&events->reserved, 0, sizeof(events->reserved)); 5083 } 5084 5085 static void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm); 5086 5087 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, 5088 struct kvm_vcpu_events *events) 5089 { 5090 if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING 5091 | KVM_VCPUEVENT_VALID_SIPI_VECTOR 5092 | KVM_VCPUEVENT_VALID_SHADOW 5093 | KVM_VCPUEVENT_VALID_SMM 5094 | KVM_VCPUEVENT_VALID_PAYLOAD 5095 | KVM_VCPUEVENT_VALID_TRIPLE_FAULT)) 5096 return -EINVAL; 5097 5098 if (events->flags & KVM_VCPUEVENT_VALID_PAYLOAD) { 5099 if (!vcpu->kvm->arch.exception_payload_enabled) 5100 return -EINVAL; 5101 if (events->exception.pending) 5102 events->exception.injected = 0; 5103 else 5104 events->exception_has_payload = 0; 5105 } else { 5106 events->exception.pending = 0; 5107 events->exception_has_payload = 0; 5108 } 5109 5110 if ((events->exception.injected || events->exception.pending) && 5111 (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR)) 5112 return -EINVAL; 5113 5114 /* INITs are latched while in SMM */ 5115 if (events->flags & KVM_VCPUEVENT_VALID_SMM && 5116 (events->smi.smm || events->smi.pending) && 5117 vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) 5118 return -EINVAL; 5119 5120 process_nmi(vcpu); 5121 vcpu->arch.exception.injected = events->exception.injected; 5122 vcpu->arch.exception.pending = events->exception.pending; 5123 vcpu->arch.exception.nr = events->exception.nr; 5124 vcpu->arch.exception.has_error_code = events->exception.has_error_code; 5125 vcpu->arch.exception.error_code = events->exception.error_code; 5126 vcpu->arch.exception.has_payload = events->exception_has_payload; 5127 vcpu->arch.exception.payload = events->exception_payload; 5128 5129 vcpu->arch.interrupt.injected = events->interrupt.injected; 5130 vcpu->arch.interrupt.nr = events->interrupt.nr; 5131 vcpu->arch.interrupt.soft = events->interrupt.soft; 5132 if (events->flags & KVM_VCPUEVENT_VALID_SHADOW) 5133 static_call(kvm_x86_set_interrupt_shadow)(vcpu, 5134 events->interrupt.shadow); 5135 5136 vcpu->arch.nmi_injected = events->nmi.injected; 5137 if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) 5138 vcpu->arch.nmi_pending = events->nmi.pending; 5139 static_call(kvm_x86_set_nmi_mask)(vcpu, events->nmi.masked); 5140 5141 if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR && 5142 lapic_in_kernel(vcpu)) 5143 vcpu->arch.apic->sipi_vector = events->sipi_vector; 5144 5145 if (events->flags & KVM_VCPUEVENT_VALID_SMM) { 5146 if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) { 5147 kvm_x86_ops.nested_ops->leave_nested(vcpu); 5148 kvm_smm_changed(vcpu, events->smi.smm); 5149 } 5150 5151 vcpu->arch.smi_pending = events->smi.pending; 5152 5153 if (events->smi.smm) { 5154 if (events->smi.smm_inside_nmi) 5155 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; 5156 else 5157 vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK; 5158 } 5159 5160 if (lapic_in_kernel(vcpu)) { 5161 if (events->smi.latched_init) 5162 set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); 5163 else 5164 clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); 5165 } 5166 } 5167 5168 if (events->flags & KVM_VCPUEVENT_VALID_TRIPLE_FAULT) { 5169 if (!vcpu->kvm->arch.triple_fault_event) 5170 return -EINVAL; 5171 if (events->triple_fault.pending) 5172 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 5173 else 5174 kvm_clear_request(KVM_REQ_TRIPLE_FAULT, vcpu); 5175 } 5176 5177 kvm_make_request(KVM_REQ_EVENT, vcpu); 5178 5179 return 0; 5180 } 5181 5182 static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu, 5183 struct kvm_debugregs *dbgregs) 5184 { 5185 unsigned long val; 5186 5187 memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db)); 5188 kvm_get_dr(vcpu, 6, &val); 5189 dbgregs->dr6 = val; 5190 dbgregs->dr7 = vcpu->arch.dr7; 5191 dbgregs->flags = 0; 5192 memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved)); 5193 } 5194 5195 static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, 5196 struct kvm_debugregs *dbgregs) 5197 { 5198 if (dbgregs->flags) 5199 return -EINVAL; 5200 5201 if (!kvm_dr6_valid(dbgregs->dr6)) 5202 return -EINVAL; 5203 if (!kvm_dr7_valid(dbgregs->dr7)) 5204 return -EINVAL; 5205 5206 memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); 5207 kvm_update_dr0123(vcpu); 5208 vcpu->arch.dr6 = dbgregs->dr6; 5209 vcpu->arch.dr7 = dbgregs->dr7; 5210 kvm_update_dr7(vcpu); 5211 5212 return 0; 5213 } 5214 5215 static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, 5216 struct kvm_xsave *guest_xsave) 5217 { 5218 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) 5219 return; 5220 5221 fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu, 5222 guest_xsave->region, 5223 sizeof(guest_xsave->region), 5224 vcpu->arch.pkru); 5225 } 5226 5227 static void kvm_vcpu_ioctl_x86_get_xsave2(struct kvm_vcpu *vcpu, 5228 u8 *state, unsigned int size) 5229 { 5230 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) 5231 return; 5232 5233 fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu, 5234 state, size, vcpu->arch.pkru); 5235 } 5236 5237 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, 5238 struct kvm_xsave *guest_xsave) 5239 { 5240 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) 5241 return 0; 5242 5243 return fpu_copy_uabi_to_guest_fpstate(&vcpu->arch.guest_fpu, 5244 guest_xsave->region, 5245 kvm_caps.supported_xcr0, 5246 &vcpu->arch.pkru); 5247 } 5248 5249 static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu, 5250 struct kvm_xcrs *guest_xcrs) 5251 { 5252 if (!boot_cpu_has(X86_FEATURE_XSAVE)) { 5253 guest_xcrs->nr_xcrs = 0; 5254 return; 5255 } 5256 5257 guest_xcrs->nr_xcrs = 1; 5258 guest_xcrs->flags = 0; 5259 guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK; 5260 guest_xcrs->xcrs[0].value = vcpu->arch.xcr0; 5261 } 5262 5263 static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu, 5264 struct kvm_xcrs *guest_xcrs) 5265 { 5266 int i, r = 0; 5267 5268 if (!boot_cpu_has(X86_FEATURE_XSAVE)) 5269 return -EINVAL; 5270 5271 if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags) 5272 return -EINVAL; 5273 5274 for (i = 0; i < guest_xcrs->nr_xcrs; i++) 5275 /* Only support XCR0 currently */ 5276 if (guest_xcrs->xcrs[i].xcr == XCR_XFEATURE_ENABLED_MASK) { 5277 r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK, 5278 guest_xcrs->xcrs[i].value); 5279 break; 5280 } 5281 if (r) 5282 r = -EINVAL; 5283 return r; 5284 } 5285 5286 /* 5287 * kvm_set_guest_paused() indicates to the guest kernel that it has been 5288 * stopped by the hypervisor. This function will be called from the host only. 5289 * EINVAL is returned when the host attempts to set the flag for a guest that 5290 * does not support pv clocks. 5291 */ 5292 static int kvm_set_guest_paused(struct kvm_vcpu *vcpu) 5293 { 5294 if (!vcpu->arch.pv_time.active) 5295 return -EINVAL; 5296 vcpu->arch.pvclock_set_guest_stopped_request = true; 5297 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 5298 return 0; 5299 } 5300 5301 static int kvm_arch_tsc_has_attr(struct kvm_vcpu *vcpu, 5302 struct kvm_device_attr *attr) 5303 { 5304 int r; 5305 5306 switch (attr->attr) { 5307 case KVM_VCPU_TSC_OFFSET: 5308 r = 0; 5309 break; 5310 default: 5311 r = -ENXIO; 5312 } 5313 5314 return r; 5315 } 5316 5317 static int kvm_arch_tsc_get_attr(struct kvm_vcpu *vcpu, 5318 struct kvm_device_attr *attr) 5319 { 5320 u64 __user *uaddr = kvm_get_attr_addr(attr); 5321 int r; 5322 5323 if (IS_ERR(uaddr)) 5324 return PTR_ERR(uaddr); 5325 5326 switch (attr->attr) { 5327 case KVM_VCPU_TSC_OFFSET: 5328 r = -EFAULT; 5329 if (put_user(vcpu->arch.l1_tsc_offset, uaddr)) 5330 break; 5331 r = 0; 5332 break; 5333 default: 5334 r = -ENXIO; 5335 } 5336 5337 return r; 5338 } 5339 5340 static int kvm_arch_tsc_set_attr(struct kvm_vcpu *vcpu, 5341 struct kvm_device_attr *attr) 5342 { 5343 u64 __user *uaddr = kvm_get_attr_addr(attr); 5344 struct kvm *kvm = vcpu->kvm; 5345 int r; 5346 5347 if (IS_ERR(uaddr)) 5348 return PTR_ERR(uaddr); 5349 5350 switch (attr->attr) { 5351 case KVM_VCPU_TSC_OFFSET: { 5352 u64 offset, tsc, ns; 5353 unsigned long flags; 5354 bool matched; 5355 5356 r = -EFAULT; 5357 if (get_user(offset, uaddr)) 5358 break; 5359 5360 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); 5361 5362 matched = (vcpu->arch.virtual_tsc_khz && 5363 kvm->arch.last_tsc_khz == vcpu->arch.virtual_tsc_khz && 5364 kvm->arch.last_tsc_offset == offset); 5365 5366 tsc = kvm_scale_tsc(rdtsc(), vcpu->arch.l1_tsc_scaling_ratio) + offset; 5367 ns = get_kvmclock_base_ns(); 5368 5369 __kvm_synchronize_tsc(vcpu, offset, tsc, ns, matched); 5370 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); 5371 5372 r = 0; 5373 break; 5374 } 5375 default: 5376 r = -ENXIO; 5377 } 5378 5379 return r; 5380 } 5381 5382 static int kvm_vcpu_ioctl_device_attr(struct kvm_vcpu *vcpu, 5383 unsigned int ioctl, 5384 void __user *argp) 5385 { 5386 struct kvm_device_attr attr; 5387 int r; 5388 5389 if (copy_from_user(&attr, argp, sizeof(attr))) 5390 return -EFAULT; 5391 5392 if (attr.group != KVM_VCPU_TSC_CTRL) 5393 return -ENXIO; 5394 5395 switch (ioctl) { 5396 case KVM_HAS_DEVICE_ATTR: 5397 r = kvm_arch_tsc_has_attr(vcpu, &attr); 5398 break; 5399 case KVM_GET_DEVICE_ATTR: 5400 r = kvm_arch_tsc_get_attr(vcpu, &attr); 5401 break; 5402 case KVM_SET_DEVICE_ATTR: 5403 r = kvm_arch_tsc_set_attr(vcpu, &attr); 5404 break; 5405 } 5406 5407 return r; 5408 } 5409 5410 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, 5411 struct kvm_enable_cap *cap) 5412 { 5413 int r; 5414 uint16_t vmcs_version; 5415 void __user *user_ptr; 5416 5417 if (cap->flags) 5418 return -EINVAL; 5419 5420 switch (cap->cap) { 5421 case KVM_CAP_HYPERV_SYNIC2: 5422 if (cap->args[0]) 5423 return -EINVAL; 5424 fallthrough; 5425 5426 case KVM_CAP_HYPERV_SYNIC: 5427 if (!irqchip_in_kernel(vcpu->kvm)) 5428 return -EINVAL; 5429 return kvm_hv_activate_synic(vcpu, cap->cap == 5430 KVM_CAP_HYPERV_SYNIC2); 5431 case KVM_CAP_HYPERV_ENLIGHTENED_VMCS: 5432 if (!kvm_x86_ops.nested_ops->enable_evmcs) 5433 return -ENOTTY; 5434 r = kvm_x86_ops.nested_ops->enable_evmcs(vcpu, &vmcs_version); 5435 if (!r) { 5436 user_ptr = (void __user *)(uintptr_t)cap->args[0]; 5437 if (copy_to_user(user_ptr, &vmcs_version, 5438 sizeof(vmcs_version))) 5439 r = -EFAULT; 5440 } 5441 return r; 5442 case KVM_CAP_HYPERV_DIRECT_TLBFLUSH: 5443 if (!kvm_x86_ops.enable_direct_tlbflush) 5444 return -ENOTTY; 5445 5446 return static_call(kvm_x86_enable_direct_tlbflush)(vcpu); 5447 5448 case KVM_CAP_HYPERV_ENFORCE_CPUID: 5449 return kvm_hv_set_enforce_cpuid(vcpu, cap->args[0]); 5450 5451 case KVM_CAP_ENFORCE_PV_FEATURE_CPUID: 5452 vcpu->arch.pv_cpuid.enforce = cap->args[0]; 5453 if (vcpu->arch.pv_cpuid.enforce) 5454 kvm_update_pv_runtime(vcpu); 5455 5456 return 0; 5457 default: 5458 return -EINVAL; 5459 } 5460 } 5461 5462 long kvm_arch_vcpu_ioctl(struct file *filp, 5463 unsigned int ioctl, unsigned long arg) 5464 { 5465 struct kvm_vcpu *vcpu = filp->private_data; 5466 void __user *argp = (void __user *)arg; 5467 int r; 5468 union { 5469 struct kvm_sregs2 *sregs2; 5470 struct kvm_lapic_state *lapic; 5471 struct kvm_xsave *xsave; 5472 struct kvm_xcrs *xcrs; 5473 void *buffer; 5474 } u; 5475 5476 vcpu_load(vcpu); 5477 5478 u.buffer = NULL; 5479 switch (ioctl) { 5480 case KVM_GET_LAPIC: { 5481 r = -EINVAL; 5482 if (!lapic_in_kernel(vcpu)) 5483 goto out; 5484 u.lapic = kzalloc(sizeof(struct kvm_lapic_state), 5485 GFP_KERNEL_ACCOUNT); 5486 5487 r = -ENOMEM; 5488 if (!u.lapic) 5489 goto out; 5490 r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic); 5491 if (r) 5492 goto out; 5493 r = -EFAULT; 5494 if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state))) 5495 goto out; 5496 r = 0; 5497 break; 5498 } 5499 case KVM_SET_LAPIC: { 5500 r = -EINVAL; 5501 if (!lapic_in_kernel(vcpu)) 5502 goto out; 5503 u.lapic = memdup_user(argp, sizeof(*u.lapic)); 5504 if (IS_ERR(u.lapic)) { 5505 r = PTR_ERR(u.lapic); 5506 goto out_nofree; 5507 } 5508 5509 r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic); 5510 break; 5511 } 5512 case KVM_INTERRUPT: { 5513 struct kvm_interrupt irq; 5514 5515 r = -EFAULT; 5516 if (copy_from_user(&irq, argp, sizeof(irq))) 5517 goto out; 5518 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); 5519 break; 5520 } 5521 case KVM_NMI: { 5522 r = kvm_vcpu_ioctl_nmi(vcpu); 5523 break; 5524 } 5525 case KVM_SMI: { 5526 r = kvm_vcpu_ioctl_smi(vcpu); 5527 break; 5528 } 5529 case KVM_SET_CPUID: { 5530 struct kvm_cpuid __user *cpuid_arg = argp; 5531 struct kvm_cpuid cpuid; 5532 5533 r = -EFAULT; 5534 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) 5535 goto out; 5536 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries); 5537 break; 5538 } 5539 case KVM_SET_CPUID2: { 5540 struct kvm_cpuid2 __user *cpuid_arg = argp; 5541 struct kvm_cpuid2 cpuid; 5542 5543 r = -EFAULT; 5544 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) 5545 goto out; 5546 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid, 5547 cpuid_arg->entries); 5548 break; 5549 } 5550 case KVM_GET_CPUID2: { 5551 struct kvm_cpuid2 __user *cpuid_arg = argp; 5552 struct kvm_cpuid2 cpuid; 5553 5554 r = -EFAULT; 5555 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) 5556 goto out; 5557 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid, 5558 cpuid_arg->entries); 5559 if (r) 5560 goto out; 5561 r = -EFAULT; 5562 if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid))) 5563 goto out; 5564 r = 0; 5565 break; 5566 } 5567 case KVM_GET_MSRS: { 5568 int idx = srcu_read_lock(&vcpu->kvm->srcu); 5569 r = msr_io(vcpu, argp, do_get_msr, 1); 5570 srcu_read_unlock(&vcpu->kvm->srcu, idx); 5571 break; 5572 } 5573 case KVM_SET_MSRS: { 5574 int idx = srcu_read_lock(&vcpu->kvm->srcu); 5575 r = msr_io(vcpu, argp, do_set_msr, 0); 5576 srcu_read_unlock(&vcpu->kvm->srcu, idx); 5577 break; 5578 } 5579 case KVM_TPR_ACCESS_REPORTING: { 5580 struct kvm_tpr_access_ctl tac; 5581 5582 r = -EFAULT; 5583 if (copy_from_user(&tac, argp, sizeof(tac))) 5584 goto out; 5585 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac); 5586 if (r) 5587 goto out; 5588 r = -EFAULT; 5589 if (copy_to_user(argp, &tac, sizeof(tac))) 5590 goto out; 5591 r = 0; 5592 break; 5593 }; 5594 case KVM_SET_VAPIC_ADDR: { 5595 struct kvm_vapic_addr va; 5596 int idx; 5597 5598 r = -EINVAL; 5599 if (!lapic_in_kernel(vcpu)) 5600 goto out; 5601 r = -EFAULT; 5602 if (copy_from_user(&va, argp, sizeof(va))) 5603 goto out; 5604 idx = srcu_read_lock(&vcpu->kvm->srcu); 5605 r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); 5606 srcu_read_unlock(&vcpu->kvm->srcu, idx); 5607 break; 5608 } 5609 case KVM_X86_SETUP_MCE: { 5610 u64 mcg_cap; 5611 5612 r = -EFAULT; 5613 if (copy_from_user(&mcg_cap, argp, sizeof(mcg_cap))) 5614 goto out; 5615 r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap); 5616 break; 5617 } 5618 case KVM_X86_SET_MCE: { 5619 struct kvm_x86_mce mce; 5620 5621 r = -EFAULT; 5622 if (copy_from_user(&mce, argp, sizeof(mce))) 5623 goto out; 5624 r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce); 5625 break; 5626 } 5627 case KVM_GET_VCPU_EVENTS: { 5628 struct kvm_vcpu_events events; 5629 5630 kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events); 5631 5632 r = -EFAULT; 5633 if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events))) 5634 break; 5635 r = 0; 5636 break; 5637 } 5638 case KVM_SET_VCPU_EVENTS: { 5639 struct kvm_vcpu_events events; 5640 5641 r = -EFAULT; 5642 if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events))) 5643 break; 5644 5645 r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events); 5646 break; 5647 } 5648 case KVM_GET_DEBUGREGS: { 5649 struct kvm_debugregs dbgregs; 5650 5651 kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs); 5652 5653 r = -EFAULT; 5654 if (copy_to_user(argp, &dbgregs, 5655 sizeof(struct kvm_debugregs))) 5656 break; 5657 r = 0; 5658 break; 5659 } 5660 case KVM_SET_DEBUGREGS: { 5661 struct kvm_debugregs dbgregs; 5662 5663 r = -EFAULT; 5664 if (copy_from_user(&dbgregs, argp, 5665 sizeof(struct kvm_debugregs))) 5666 break; 5667 5668 r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs); 5669 break; 5670 } 5671 case KVM_GET_XSAVE: { 5672 r = -EINVAL; 5673 if (vcpu->arch.guest_fpu.uabi_size > sizeof(struct kvm_xsave)) 5674 break; 5675 5676 u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL_ACCOUNT); 5677 r = -ENOMEM; 5678 if (!u.xsave) 5679 break; 5680 5681 kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave); 5682 5683 r = -EFAULT; 5684 if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave))) 5685 break; 5686 r = 0; 5687 break; 5688 } 5689 case KVM_SET_XSAVE: { 5690 int size = vcpu->arch.guest_fpu.uabi_size; 5691 5692 u.xsave = memdup_user(argp, size); 5693 if (IS_ERR(u.xsave)) { 5694 r = PTR_ERR(u.xsave); 5695 goto out_nofree; 5696 } 5697 5698 r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave); 5699 break; 5700 } 5701 5702 case KVM_GET_XSAVE2: { 5703 int size = vcpu->arch.guest_fpu.uabi_size; 5704 5705 u.xsave = kzalloc(size, GFP_KERNEL_ACCOUNT); 5706 r = -ENOMEM; 5707 if (!u.xsave) 5708 break; 5709 5710 kvm_vcpu_ioctl_x86_get_xsave2(vcpu, u.buffer, size); 5711 5712 r = -EFAULT; 5713 if (copy_to_user(argp, u.xsave, size)) 5714 break; 5715 5716 r = 0; 5717 break; 5718 } 5719 5720 case KVM_GET_XCRS: { 5721 u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL_ACCOUNT); 5722 r = -ENOMEM; 5723 if (!u.xcrs) 5724 break; 5725 5726 kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs); 5727 5728 r = -EFAULT; 5729 if (copy_to_user(argp, u.xcrs, 5730 sizeof(struct kvm_xcrs))) 5731 break; 5732 r = 0; 5733 break; 5734 } 5735 case KVM_SET_XCRS: { 5736 u.xcrs = memdup_user(argp, sizeof(*u.xcrs)); 5737 if (IS_ERR(u.xcrs)) { 5738 r = PTR_ERR(u.xcrs); 5739 goto out_nofree; 5740 } 5741 5742 r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs); 5743 break; 5744 } 5745 case KVM_SET_TSC_KHZ: { 5746 u32 user_tsc_khz; 5747 5748 r = -EINVAL; 5749 user_tsc_khz = (u32)arg; 5750 5751 if (kvm_caps.has_tsc_control && 5752 user_tsc_khz >= kvm_caps.max_guest_tsc_khz) 5753 goto out; 5754 5755 if (user_tsc_khz == 0) 5756 user_tsc_khz = tsc_khz; 5757 5758 if (!kvm_set_tsc_khz(vcpu, user_tsc_khz)) 5759 r = 0; 5760 5761 goto out; 5762 } 5763 case KVM_GET_TSC_KHZ: { 5764 r = vcpu->arch.virtual_tsc_khz; 5765 goto out; 5766 } 5767 case KVM_KVMCLOCK_CTRL: { 5768 r = kvm_set_guest_paused(vcpu); 5769 goto out; 5770 } 5771 case KVM_ENABLE_CAP: { 5772 struct kvm_enable_cap cap; 5773 5774 r = -EFAULT; 5775 if (copy_from_user(&cap, argp, sizeof(cap))) 5776 goto out; 5777 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); 5778 break; 5779 } 5780 case KVM_GET_NESTED_STATE: { 5781 struct kvm_nested_state __user *user_kvm_nested_state = argp; 5782 u32 user_data_size; 5783 5784 r = -EINVAL; 5785 if (!kvm_x86_ops.nested_ops->get_state) 5786 break; 5787 5788 BUILD_BUG_ON(sizeof(user_data_size) != sizeof(user_kvm_nested_state->size)); 5789 r = -EFAULT; 5790 if (get_user(user_data_size, &user_kvm_nested_state->size)) 5791 break; 5792 5793 r = kvm_x86_ops.nested_ops->get_state(vcpu, user_kvm_nested_state, 5794 user_data_size); 5795 if (r < 0) 5796 break; 5797 5798 if (r > user_data_size) { 5799 if (put_user(r, &user_kvm_nested_state->size)) 5800 r = -EFAULT; 5801 else 5802 r = -E2BIG; 5803 break; 5804 } 5805 5806 r = 0; 5807 break; 5808 } 5809 case KVM_SET_NESTED_STATE: { 5810 struct kvm_nested_state __user *user_kvm_nested_state = argp; 5811 struct kvm_nested_state kvm_state; 5812 int idx; 5813 5814 r = -EINVAL; 5815 if (!kvm_x86_ops.nested_ops->set_state) 5816 break; 5817 5818 r = -EFAULT; 5819 if (copy_from_user(&kvm_state, user_kvm_nested_state, sizeof(kvm_state))) 5820 break; 5821 5822 r = -EINVAL; 5823 if (kvm_state.size < sizeof(kvm_state)) 5824 break; 5825 5826 if (kvm_state.flags & 5827 ~(KVM_STATE_NESTED_RUN_PENDING | KVM_STATE_NESTED_GUEST_MODE 5828 | KVM_STATE_NESTED_EVMCS | KVM_STATE_NESTED_MTF_PENDING 5829 | KVM_STATE_NESTED_GIF_SET)) 5830 break; 5831 5832 /* nested_run_pending implies guest_mode. */ 5833 if ((kvm_state.flags & KVM_STATE_NESTED_RUN_PENDING) 5834 && !(kvm_state.flags & KVM_STATE_NESTED_GUEST_MODE)) 5835 break; 5836 5837 idx = srcu_read_lock(&vcpu->kvm->srcu); 5838 r = kvm_x86_ops.nested_ops->set_state(vcpu, user_kvm_nested_state, &kvm_state); 5839 srcu_read_unlock(&vcpu->kvm->srcu, idx); 5840 break; 5841 } 5842 case KVM_GET_SUPPORTED_HV_CPUID: 5843 r = kvm_ioctl_get_supported_hv_cpuid(vcpu, argp); 5844 break; 5845 #ifdef CONFIG_KVM_XEN 5846 case KVM_XEN_VCPU_GET_ATTR: { 5847 struct kvm_xen_vcpu_attr xva; 5848 5849 r = -EFAULT; 5850 if (copy_from_user(&xva, argp, sizeof(xva))) 5851 goto out; 5852 r = kvm_xen_vcpu_get_attr(vcpu, &xva); 5853 if (!r && copy_to_user(argp, &xva, sizeof(xva))) 5854 r = -EFAULT; 5855 break; 5856 } 5857 case KVM_XEN_VCPU_SET_ATTR: { 5858 struct kvm_xen_vcpu_attr xva; 5859 5860 r = -EFAULT; 5861 if (copy_from_user(&xva, argp, sizeof(xva))) 5862 goto out; 5863 r = kvm_xen_vcpu_set_attr(vcpu, &xva); 5864 break; 5865 } 5866 #endif 5867 case KVM_GET_SREGS2: { 5868 u.sregs2 = kzalloc(sizeof(struct kvm_sregs2), GFP_KERNEL); 5869 r = -ENOMEM; 5870 if (!u.sregs2) 5871 goto out; 5872 __get_sregs2(vcpu, u.sregs2); 5873 r = -EFAULT; 5874 if (copy_to_user(argp, u.sregs2, sizeof(struct kvm_sregs2))) 5875 goto out; 5876 r = 0; 5877 break; 5878 } 5879 case KVM_SET_SREGS2: { 5880 u.sregs2 = memdup_user(argp, sizeof(struct kvm_sregs2)); 5881 if (IS_ERR(u.sregs2)) { 5882 r = PTR_ERR(u.sregs2); 5883 u.sregs2 = NULL; 5884 goto out; 5885 } 5886 r = __set_sregs2(vcpu, u.sregs2); 5887 break; 5888 } 5889 case KVM_HAS_DEVICE_ATTR: 5890 case KVM_GET_DEVICE_ATTR: 5891 case KVM_SET_DEVICE_ATTR: 5892 r = kvm_vcpu_ioctl_device_attr(vcpu, ioctl, argp); 5893 break; 5894 default: 5895 r = -EINVAL; 5896 } 5897 out: 5898 kfree(u.buffer); 5899 out_nofree: 5900 vcpu_put(vcpu); 5901 return r; 5902 } 5903 5904 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) 5905 { 5906 return VM_FAULT_SIGBUS; 5907 } 5908 5909 static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr) 5910 { 5911 int ret; 5912 5913 if (addr > (unsigned int)(-3 * PAGE_SIZE)) 5914 return -EINVAL; 5915 ret = static_call(kvm_x86_set_tss_addr)(kvm, addr); 5916 return ret; 5917 } 5918 5919 static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm, 5920 u64 ident_addr) 5921 { 5922 return static_call(kvm_x86_set_identity_map_addr)(kvm, ident_addr); 5923 } 5924 5925 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, 5926 unsigned long kvm_nr_mmu_pages) 5927 { 5928 if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES) 5929 return -EINVAL; 5930 5931 mutex_lock(&kvm->slots_lock); 5932 5933 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages); 5934 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; 5935 5936 mutex_unlock(&kvm->slots_lock); 5937 return 0; 5938 } 5939 5940 static unsigned long kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm) 5941 { 5942 return kvm->arch.n_max_mmu_pages; 5943 } 5944 5945 static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) 5946 { 5947 struct kvm_pic *pic = kvm->arch.vpic; 5948 int r; 5949 5950 r = 0; 5951 switch (chip->chip_id) { 5952 case KVM_IRQCHIP_PIC_MASTER: 5953 memcpy(&chip->chip.pic, &pic->pics[0], 5954 sizeof(struct kvm_pic_state)); 5955 break; 5956 case KVM_IRQCHIP_PIC_SLAVE: 5957 memcpy(&chip->chip.pic, &pic->pics[1], 5958 sizeof(struct kvm_pic_state)); 5959 break; 5960 case KVM_IRQCHIP_IOAPIC: 5961 kvm_get_ioapic(kvm, &chip->chip.ioapic); 5962 break; 5963 default: 5964 r = -EINVAL; 5965 break; 5966 } 5967 return r; 5968 } 5969 5970 static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) 5971 { 5972 struct kvm_pic *pic = kvm->arch.vpic; 5973 int r; 5974 5975 r = 0; 5976 switch (chip->chip_id) { 5977 case KVM_IRQCHIP_PIC_MASTER: 5978 spin_lock(&pic->lock); 5979 memcpy(&pic->pics[0], &chip->chip.pic, 5980 sizeof(struct kvm_pic_state)); 5981 spin_unlock(&pic->lock); 5982 break; 5983 case KVM_IRQCHIP_PIC_SLAVE: 5984 spin_lock(&pic->lock); 5985 memcpy(&pic->pics[1], &chip->chip.pic, 5986 sizeof(struct kvm_pic_state)); 5987 spin_unlock(&pic->lock); 5988 break; 5989 case KVM_IRQCHIP_IOAPIC: 5990 kvm_set_ioapic(kvm, &chip->chip.ioapic); 5991 break; 5992 default: 5993 r = -EINVAL; 5994 break; 5995 } 5996 kvm_pic_update_irq(pic); 5997 return r; 5998 } 5999 6000 static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps) 6001 { 6002 struct kvm_kpit_state *kps = &kvm->arch.vpit->pit_state; 6003 6004 BUILD_BUG_ON(sizeof(*ps) != sizeof(kps->channels)); 6005 6006 mutex_lock(&kps->lock); 6007 memcpy(ps, &kps->channels, sizeof(*ps)); 6008 mutex_unlock(&kps->lock); 6009 return 0; 6010 } 6011 6012 static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps) 6013 { 6014 int i; 6015 struct kvm_pit *pit = kvm->arch.vpit; 6016 6017 mutex_lock(&pit->pit_state.lock); 6018 memcpy(&pit->pit_state.channels, ps, sizeof(*ps)); 6019 for (i = 0; i < 3; i++) 6020 kvm_pit_load_count(pit, i, ps->channels[i].count, 0); 6021 mutex_unlock(&pit->pit_state.lock); 6022 return 0; 6023 } 6024 6025 static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) 6026 { 6027 mutex_lock(&kvm->arch.vpit->pit_state.lock); 6028 memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels, 6029 sizeof(ps->channels)); 6030 ps->flags = kvm->arch.vpit->pit_state.flags; 6031 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 6032 memset(&ps->reserved, 0, sizeof(ps->reserved)); 6033 return 0; 6034 } 6035 6036 static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) 6037 { 6038 int start = 0; 6039 int i; 6040 u32 prev_legacy, cur_legacy; 6041 struct kvm_pit *pit = kvm->arch.vpit; 6042 6043 mutex_lock(&pit->pit_state.lock); 6044 prev_legacy = pit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY; 6045 cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY; 6046 if (!prev_legacy && cur_legacy) 6047 start = 1; 6048 memcpy(&pit->pit_state.channels, &ps->channels, 6049 sizeof(pit->pit_state.channels)); 6050 pit->pit_state.flags = ps->flags; 6051 for (i = 0; i < 3; i++) 6052 kvm_pit_load_count(pit, i, pit->pit_state.channels[i].count, 6053 start && i == 0); 6054 mutex_unlock(&pit->pit_state.lock); 6055 return 0; 6056 } 6057 6058 static int kvm_vm_ioctl_reinject(struct kvm *kvm, 6059 struct kvm_reinject_control *control) 6060 { 6061 struct kvm_pit *pit = kvm->arch.vpit; 6062 6063 /* pit->pit_state.lock was overloaded to prevent userspace from getting 6064 * an inconsistent state after running multiple KVM_REINJECT_CONTROL 6065 * ioctls in parallel. Use a separate lock if that ioctl isn't rare. 6066 */ 6067 mutex_lock(&pit->pit_state.lock); 6068 kvm_pit_set_reinject(pit, control->pit_reinject); 6069 mutex_unlock(&pit->pit_state.lock); 6070 6071 return 0; 6072 } 6073 6074 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) 6075 { 6076 6077 /* 6078 * Flush all CPUs' dirty log buffers to the dirty_bitmap. Called 6079 * before reporting dirty_bitmap to userspace. KVM flushes the buffers 6080 * on all VM-Exits, thus we only need to kick running vCPUs to force a 6081 * VM-Exit. 6082 */ 6083 struct kvm_vcpu *vcpu; 6084 unsigned long i; 6085 6086 kvm_for_each_vcpu(i, vcpu, kvm) 6087 kvm_vcpu_kick(vcpu); 6088 } 6089 6090 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, 6091 bool line_status) 6092 { 6093 if (!irqchip_in_kernel(kvm)) 6094 return -ENXIO; 6095 6096 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 6097 irq_event->irq, irq_event->level, 6098 line_status); 6099 return 0; 6100 } 6101 6102 int kvm_vm_ioctl_enable_cap(struct kvm *kvm, 6103 struct kvm_enable_cap *cap) 6104 { 6105 int r; 6106 6107 if (cap->flags) 6108 return -EINVAL; 6109 6110 switch (cap->cap) { 6111 case KVM_CAP_DISABLE_QUIRKS2: 6112 r = -EINVAL; 6113 if (cap->args[0] & ~KVM_X86_VALID_QUIRKS) 6114 break; 6115 fallthrough; 6116 case KVM_CAP_DISABLE_QUIRKS: 6117 kvm->arch.disabled_quirks = cap->args[0]; 6118 r = 0; 6119 break; 6120 case KVM_CAP_SPLIT_IRQCHIP: { 6121 mutex_lock(&kvm->lock); 6122 r = -EINVAL; 6123 if (cap->args[0] > MAX_NR_RESERVED_IOAPIC_PINS) 6124 goto split_irqchip_unlock; 6125 r = -EEXIST; 6126 if (irqchip_in_kernel(kvm)) 6127 goto split_irqchip_unlock; 6128 if (kvm->created_vcpus) 6129 goto split_irqchip_unlock; 6130 r = kvm_setup_empty_irq_routing(kvm); 6131 if (r) 6132 goto split_irqchip_unlock; 6133 /* Pairs with irqchip_in_kernel. */ 6134 smp_wmb(); 6135 kvm->arch.irqchip_mode = KVM_IRQCHIP_SPLIT; 6136 kvm->arch.nr_reserved_ioapic_pins = cap->args[0]; 6137 kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_ABSENT); 6138 r = 0; 6139 split_irqchip_unlock: 6140 mutex_unlock(&kvm->lock); 6141 break; 6142 } 6143 case KVM_CAP_X2APIC_API: 6144 r = -EINVAL; 6145 if (cap->args[0] & ~KVM_X2APIC_API_VALID_FLAGS) 6146 break; 6147 6148 if (cap->args[0] & KVM_X2APIC_API_USE_32BIT_IDS) 6149 kvm->arch.x2apic_format = true; 6150 if (cap->args[0] & KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK) 6151 kvm->arch.x2apic_broadcast_quirk_disabled = true; 6152 6153 r = 0; 6154 break; 6155 case KVM_CAP_X86_DISABLE_EXITS: 6156 r = -EINVAL; 6157 if (cap->args[0] & ~KVM_X86_DISABLE_VALID_EXITS) 6158 break; 6159 6160 if ((cap->args[0] & KVM_X86_DISABLE_EXITS_MWAIT) && 6161 kvm_can_mwait_in_guest()) 6162 kvm->arch.mwait_in_guest = true; 6163 if (cap->args[0] & KVM_X86_DISABLE_EXITS_HLT) 6164 kvm->arch.hlt_in_guest = true; 6165 if (cap->args[0] & KVM_X86_DISABLE_EXITS_PAUSE) 6166 kvm->arch.pause_in_guest = true; 6167 if (cap->args[0] & KVM_X86_DISABLE_EXITS_CSTATE) 6168 kvm->arch.cstate_in_guest = true; 6169 r = 0; 6170 break; 6171 case KVM_CAP_MSR_PLATFORM_INFO: 6172 kvm->arch.guest_can_read_msr_platform_info = cap->args[0]; 6173 r = 0; 6174 break; 6175 case KVM_CAP_EXCEPTION_PAYLOAD: 6176 kvm->arch.exception_payload_enabled = cap->args[0]; 6177 r = 0; 6178 break; 6179 case KVM_CAP_X86_TRIPLE_FAULT_EVENT: 6180 kvm->arch.triple_fault_event = cap->args[0]; 6181 r = 0; 6182 break; 6183 case KVM_CAP_X86_USER_SPACE_MSR: 6184 r = -EINVAL; 6185 if (cap->args[0] & ~(KVM_MSR_EXIT_REASON_INVAL | 6186 KVM_MSR_EXIT_REASON_UNKNOWN | 6187 KVM_MSR_EXIT_REASON_FILTER)) 6188 break; 6189 kvm->arch.user_space_msr_mask = cap->args[0]; 6190 r = 0; 6191 break; 6192 case KVM_CAP_X86_BUS_LOCK_EXIT: 6193 r = -EINVAL; 6194 if (cap->args[0] & ~KVM_BUS_LOCK_DETECTION_VALID_MODE) 6195 break; 6196 6197 if ((cap->args[0] & KVM_BUS_LOCK_DETECTION_OFF) && 6198 (cap->args[0] & KVM_BUS_LOCK_DETECTION_EXIT)) 6199 break; 6200 6201 if (kvm_caps.has_bus_lock_exit && 6202 cap->args[0] & KVM_BUS_LOCK_DETECTION_EXIT) 6203 kvm->arch.bus_lock_detection_enabled = true; 6204 r = 0; 6205 break; 6206 #ifdef CONFIG_X86_SGX_KVM 6207 case KVM_CAP_SGX_ATTRIBUTE: { 6208 unsigned long allowed_attributes = 0; 6209 6210 r = sgx_set_attribute(&allowed_attributes, cap->args[0]); 6211 if (r) 6212 break; 6213 6214 /* KVM only supports the PROVISIONKEY privileged attribute. */ 6215 if ((allowed_attributes & SGX_ATTR_PROVISIONKEY) && 6216 !(allowed_attributes & ~SGX_ATTR_PROVISIONKEY)) 6217 kvm->arch.sgx_provisioning_allowed = true; 6218 else 6219 r = -EINVAL; 6220 break; 6221 } 6222 #endif 6223 case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM: 6224 r = -EINVAL; 6225 if (!kvm_x86_ops.vm_copy_enc_context_from) 6226 break; 6227 6228 r = static_call(kvm_x86_vm_copy_enc_context_from)(kvm, cap->args[0]); 6229 break; 6230 case KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM: 6231 r = -EINVAL; 6232 if (!kvm_x86_ops.vm_move_enc_context_from) 6233 break; 6234 6235 r = static_call(kvm_x86_vm_move_enc_context_from)(kvm, cap->args[0]); 6236 break; 6237 case KVM_CAP_EXIT_HYPERCALL: 6238 if (cap->args[0] & ~KVM_EXIT_HYPERCALL_VALID_MASK) { 6239 r = -EINVAL; 6240 break; 6241 } 6242 kvm->arch.hypercall_exit_enabled = cap->args[0]; 6243 r = 0; 6244 break; 6245 case KVM_CAP_EXIT_ON_EMULATION_FAILURE: 6246 r = -EINVAL; 6247 if (cap->args[0] & ~1) 6248 break; 6249 kvm->arch.exit_on_emulation_error = cap->args[0]; 6250 r = 0; 6251 break; 6252 case KVM_CAP_PMU_CAPABILITY: 6253 r = -EINVAL; 6254 if (!enable_pmu || (cap->args[0] & ~KVM_CAP_PMU_VALID_MASK)) 6255 break; 6256 6257 mutex_lock(&kvm->lock); 6258 if (!kvm->created_vcpus) { 6259 kvm->arch.enable_pmu = !(cap->args[0] & KVM_PMU_CAP_DISABLE); 6260 r = 0; 6261 } 6262 mutex_unlock(&kvm->lock); 6263 break; 6264 case KVM_CAP_MAX_VCPU_ID: 6265 r = -EINVAL; 6266 if (cap->args[0] > KVM_MAX_VCPU_IDS) 6267 break; 6268 6269 mutex_lock(&kvm->lock); 6270 if (kvm->arch.max_vcpu_ids == cap->args[0]) { 6271 r = 0; 6272 } else if (!kvm->arch.max_vcpu_ids) { 6273 kvm->arch.max_vcpu_ids = cap->args[0]; 6274 r = 0; 6275 } 6276 mutex_unlock(&kvm->lock); 6277 break; 6278 case KVM_CAP_X86_NOTIFY_VMEXIT: 6279 r = -EINVAL; 6280 if ((u32)cap->args[0] & ~KVM_X86_NOTIFY_VMEXIT_VALID_BITS) 6281 break; 6282 if (!kvm_caps.has_notify_vmexit) 6283 break; 6284 if (!((u32)cap->args[0] & KVM_X86_NOTIFY_VMEXIT_ENABLED)) 6285 break; 6286 mutex_lock(&kvm->lock); 6287 if (!kvm->created_vcpus) { 6288 kvm->arch.notify_window = cap->args[0] >> 32; 6289 kvm->arch.notify_vmexit_flags = (u32)cap->args[0]; 6290 r = 0; 6291 } 6292 mutex_unlock(&kvm->lock); 6293 break; 6294 case KVM_CAP_VM_DISABLE_NX_HUGE_PAGES: 6295 r = -EINVAL; 6296 6297 /* 6298 * Since the risk of disabling NX hugepages is a guest crashing 6299 * the system, ensure the userspace process has permission to 6300 * reboot the system. 6301 * 6302 * Note that unlike the reboot() syscall, the process must have 6303 * this capability in the root namespace because exposing 6304 * /dev/kvm into a container does not limit the scope of the 6305 * iTLB multihit bug to that container. In other words, 6306 * this must use capable(), not ns_capable(). 6307 */ 6308 if (!capable(CAP_SYS_BOOT)) { 6309 r = -EPERM; 6310 break; 6311 } 6312 6313 if (cap->args[0]) 6314 break; 6315 6316 mutex_lock(&kvm->lock); 6317 if (!kvm->created_vcpus) { 6318 kvm->arch.disable_nx_huge_pages = true; 6319 r = 0; 6320 } 6321 mutex_unlock(&kvm->lock); 6322 break; 6323 default: 6324 r = -EINVAL; 6325 break; 6326 } 6327 return r; 6328 } 6329 6330 static struct kvm_x86_msr_filter *kvm_alloc_msr_filter(bool default_allow) 6331 { 6332 struct kvm_x86_msr_filter *msr_filter; 6333 6334 msr_filter = kzalloc(sizeof(*msr_filter), GFP_KERNEL_ACCOUNT); 6335 if (!msr_filter) 6336 return NULL; 6337 6338 msr_filter->default_allow = default_allow; 6339 return msr_filter; 6340 } 6341 6342 static void kvm_free_msr_filter(struct kvm_x86_msr_filter *msr_filter) 6343 { 6344 u32 i; 6345 6346 if (!msr_filter) 6347 return; 6348 6349 for (i = 0; i < msr_filter->count; i++) 6350 kfree(msr_filter->ranges[i].bitmap); 6351 6352 kfree(msr_filter); 6353 } 6354 6355 static int kvm_add_msr_filter(struct kvm_x86_msr_filter *msr_filter, 6356 struct kvm_msr_filter_range *user_range) 6357 { 6358 unsigned long *bitmap = NULL; 6359 size_t bitmap_size; 6360 6361 if (!user_range->nmsrs) 6362 return 0; 6363 6364 if (user_range->flags & ~(KVM_MSR_FILTER_READ | KVM_MSR_FILTER_WRITE)) 6365 return -EINVAL; 6366 6367 if (!user_range->flags) 6368 return -EINVAL; 6369 6370 bitmap_size = BITS_TO_LONGS(user_range->nmsrs) * sizeof(long); 6371 if (!bitmap_size || bitmap_size > KVM_MSR_FILTER_MAX_BITMAP_SIZE) 6372 return -EINVAL; 6373 6374 bitmap = memdup_user((__user u8*)user_range->bitmap, bitmap_size); 6375 if (IS_ERR(bitmap)) 6376 return PTR_ERR(bitmap); 6377 6378 msr_filter->ranges[msr_filter->count] = (struct msr_bitmap_range) { 6379 .flags = user_range->flags, 6380 .base = user_range->base, 6381 .nmsrs = user_range->nmsrs, 6382 .bitmap = bitmap, 6383 }; 6384 6385 msr_filter->count++; 6386 return 0; 6387 } 6388 6389 static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp) 6390 { 6391 struct kvm_msr_filter __user *user_msr_filter = argp; 6392 struct kvm_x86_msr_filter *new_filter, *old_filter; 6393 struct kvm_msr_filter filter; 6394 bool default_allow; 6395 bool empty = true; 6396 int r = 0; 6397 u32 i; 6398 6399 if (copy_from_user(&filter, user_msr_filter, sizeof(filter))) 6400 return -EFAULT; 6401 6402 if (filter.flags & ~KVM_MSR_FILTER_DEFAULT_DENY) 6403 return -EINVAL; 6404 6405 for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) 6406 empty &= !filter.ranges[i].nmsrs; 6407 6408 default_allow = !(filter.flags & KVM_MSR_FILTER_DEFAULT_DENY); 6409 if (empty && !default_allow) 6410 return -EINVAL; 6411 6412 new_filter = kvm_alloc_msr_filter(default_allow); 6413 if (!new_filter) 6414 return -ENOMEM; 6415 6416 for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) { 6417 r = kvm_add_msr_filter(new_filter, &filter.ranges[i]); 6418 if (r) { 6419 kvm_free_msr_filter(new_filter); 6420 return r; 6421 } 6422 } 6423 6424 mutex_lock(&kvm->lock); 6425 6426 /* The per-VM filter is protected by kvm->lock... */ 6427 old_filter = srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1); 6428 6429 rcu_assign_pointer(kvm->arch.msr_filter, new_filter); 6430 synchronize_srcu(&kvm->srcu); 6431 6432 kvm_free_msr_filter(old_filter); 6433 6434 kvm_make_all_cpus_request(kvm, KVM_REQ_MSR_FILTER_CHANGED); 6435 mutex_unlock(&kvm->lock); 6436 6437 return 0; 6438 } 6439 6440 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER 6441 static int kvm_arch_suspend_notifier(struct kvm *kvm) 6442 { 6443 struct kvm_vcpu *vcpu; 6444 unsigned long i; 6445 int ret = 0; 6446 6447 mutex_lock(&kvm->lock); 6448 kvm_for_each_vcpu(i, vcpu, kvm) { 6449 if (!vcpu->arch.pv_time.active) 6450 continue; 6451 6452 ret = kvm_set_guest_paused(vcpu); 6453 if (ret) { 6454 kvm_err("Failed to pause guest VCPU%d: %d\n", 6455 vcpu->vcpu_id, ret); 6456 break; 6457 } 6458 } 6459 mutex_unlock(&kvm->lock); 6460 6461 return ret ? NOTIFY_BAD : NOTIFY_DONE; 6462 } 6463 6464 int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state) 6465 { 6466 switch (state) { 6467 case PM_HIBERNATION_PREPARE: 6468 case PM_SUSPEND_PREPARE: 6469 return kvm_arch_suspend_notifier(kvm); 6470 } 6471 6472 return NOTIFY_DONE; 6473 } 6474 #endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */ 6475 6476 static int kvm_vm_ioctl_get_clock(struct kvm *kvm, void __user *argp) 6477 { 6478 struct kvm_clock_data data = { 0 }; 6479 6480 get_kvmclock(kvm, &data); 6481 if (copy_to_user(argp, &data, sizeof(data))) 6482 return -EFAULT; 6483 6484 return 0; 6485 } 6486 6487 static int kvm_vm_ioctl_set_clock(struct kvm *kvm, void __user *argp) 6488 { 6489 struct kvm_arch *ka = &kvm->arch; 6490 struct kvm_clock_data data; 6491 u64 now_raw_ns; 6492 6493 if (copy_from_user(&data, argp, sizeof(data))) 6494 return -EFAULT; 6495 6496 /* 6497 * Only KVM_CLOCK_REALTIME is used, but allow passing the 6498 * result of KVM_GET_CLOCK back to KVM_SET_CLOCK. 6499 */ 6500 if (data.flags & ~KVM_CLOCK_VALID_FLAGS) 6501 return -EINVAL; 6502 6503 kvm_hv_request_tsc_page_update(kvm); 6504 kvm_start_pvclock_update(kvm); 6505 pvclock_update_vm_gtod_copy(kvm); 6506 6507 /* 6508 * This pairs with kvm_guest_time_update(): when masterclock is 6509 * in use, we use master_kernel_ns + kvmclock_offset to set 6510 * unsigned 'system_time' so if we use get_kvmclock_ns() (which 6511 * is slightly ahead) here we risk going negative on unsigned 6512 * 'system_time' when 'data.clock' is very small. 6513 */ 6514 if (data.flags & KVM_CLOCK_REALTIME) { 6515 u64 now_real_ns = ktime_get_real_ns(); 6516 6517 /* 6518 * Avoid stepping the kvmclock backwards. 6519 */ 6520 if (now_real_ns > data.realtime) 6521 data.clock += now_real_ns - data.realtime; 6522 } 6523 6524 if (ka->use_master_clock) 6525 now_raw_ns = ka->master_kernel_ns; 6526 else 6527 now_raw_ns = get_kvmclock_base_ns(); 6528 ka->kvmclock_offset = data.clock - now_raw_ns; 6529 kvm_end_pvclock_update(kvm); 6530 return 0; 6531 } 6532 6533 long kvm_arch_vm_ioctl(struct file *filp, 6534 unsigned int ioctl, unsigned long arg) 6535 { 6536 struct kvm *kvm = filp->private_data; 6537 void __user *argp = (void __user *)arg; 6538 int r = -ENOTTY; 6539 /* 6540 * This union makes it completely explicit to gcc-3.x 6541 * that these two variables' stack usage should be 6542 * combined, not added together. 6543 */ 6544 union { 6545 struct kvm_pit_state ps; 6546 struct kvm_pit_state2 ps2; 6547 struct kvm_pit_config pit_config; 6548 } u; 6549 6550 switch (ioctl) { 6551 case KVM_SET_TSS_ADDR: 6552 r = kvm_vm_ioctl_set_tss_addr(kvm, arg); 6553 break; 6554 case KVM_SET_IDENTITY_MAP_ADDR: { 6555 u64 ident_addr; 6556 6557 mutex_lock(&kvm->lock); 6558 r = -EINVAL; 6559 if (kvm->created_vcpus) 6560 goto set_identity_unlock; 6561 r = -EFAULT; 6562 if (copy_from_user(&ident_addr, argp, sizeof(ident_addr))) 6563 goto set_identity_unlock; 6564 r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr); 6565 set_identity_unlock: 6566 mutex_unlock(&kvm->lock); 6567 break; 6568 } 6569 case KVM_SET_NR_MMU_PAGES: 6570 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg); 6571 break; 6572 case KVM_GET_NR_MMU_PAGES: 6573 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm); 6574 break; 6575 case KVM_CREATE_IRQCHIP: { 6576 mutex_lock(&kvm->lock); 6577 6578 r = -EEXIST; 6579 if (irqchip_in_kernel(kvm)) 6580 goto create_irqchip_unlock; 6581 6582 r = -EINVAL; 6583 if (kvm->created_vcpus) 6584 goto create_irqchip_unlock; 6585 6586 r = kvm_pic_init(kvm); 6587 if (r) 6588 goto create_irqchip_unlock; 6589 6590 r = kvm_ioapic_init(kvm); 6591 if (r) { 6592 kvm_pic_destroy(kvm); 6593 goto create_irqchip_unlock; 6594 } 6595 6596 r = kvm_setup_default_irq_routing(kvm); 6597 if (r) { 6598 kvm_ioapic_destroy(kvm); 6599 kvm_pic_destroy(kvm); 6600 goto create_irqchip_unlock; 6601 } 6602 /* Write kvm->irq_routing before enabling irqchip_in_kernel. */ 6603 smp_wmb(); 6604 kvm->arch.irqchip_mode = KVM_IRQCHIP_KERNEL; 6605 kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_ABSENT); 6606 create_irqchip_unlock: 6607 mutex_unlock(&kvm->lock); 6608 break; 6609 } 6610 case KVM_CREATE_PIT: 6611 u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY; 6612 goto create_pit; 6613 case KVM_CREATE_PIT2: 6614 r = -EFAULT; 6615 if (copy_from_user(&u.pit_config, argp, 6616 sizeof(struct kvm_pit_config))) 6617 goto out; 6618 create_pit: 6619 mutex_lock(&kvm->lock); 6620 r = -EEXIST; 6621 if (kvm->arch.vpit) 6622 goto create_pit_unlock; 6623 r = -ENOMEM; 6624 kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags); 6625 if (kvm->arch.vpit) 6626 r = 0; 6627 create_pit_unlock: 6628 mutex_unlock(&kvm->lock); 6629 break; 6630 case KVM_GET_IRQCHIP: { 6631 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ 6632 struct kvm_irqchip *chip; 6633 6634 chip = memdup_user(argp, sizeof(*chip)); 6635 if (IS_ERR(chip)) { 6636 r = PTR_ERR(chip); 6637 goto out; 6638 } 6639 6640 r = -ENXIO; 6641 if (!irqchip_kernel(kvm)) 6642 goto get_irqchip_out; 6643 r = kvm_vm_ioctl_get_irqchip(kvm, chip); 6644 if (r) 6645 goto get_irqchip_out; 6646 r = -EFAULT; 6647 if (copy_to_user(argp, chip, sizeof(*chip))) 6648 goto get_irqchip_out; 6649 r = 0; 6650 get_irqchip_out: 6651 kfree(chip); 6652 break; 6653 } 6654 case KVM_SET_IRQCHIP: { 6655 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ 6656 struct kvm_irqchip *chip; 6657 6658 chip = memdup_user(argp, sizeof(*chip)); 6659 if (IS_ERR(chip)) { 6660 r = PTR_ERR(chip); 6661 goto out; 6662 } 6663 6664 r = -ENXIO; 6665 if (!irqchip_kernel(kvm)) 6666 goto set_irqchip_out; 6667 r = kvm_vm_ioctl_set_irqchip(kvm, chip); 6668 set_irqchip_out: 6669 kfree(chip); 6670 break; 6671 } 6672 case KVM_GET_PIT: { 6673 r = -EFAULT; 6674 if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state))) 6675 goto out; 6676 r = -ENXIO; 6677 if (!kvm->arch.vpit) 6678 goto out; 6679 r = kvm_vm_ioctl_get_pit(kvm, &u.ps); 6680 if (r) 6681 goto out; 6682 r = -EFAULT; 6683 if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state))) 6684 goto out; 6685 r = 0; 6686 break; 6687 } 6688 case KVM_SET_PIT: { 6689 r = -EFAULT; 6690 if (copy_from_user(&u.ps, argp, sizeof(u.ps))) 6691 goto out; 6692 mutex_lock(&kvm->lock); 6693 r = -ENXIO; 6694 if (!kvm->arch.vpit) 6695 goto set_pit_out; 6696 r = kvm_vm_ioctl_set_pit(kvm, &u.ps); 6697 set_pit_out: 6698 mutex_unlock(&kvm->lock); 6699 break; 6700 } 6701 case KVM_GET_PIT2: { 6702 r = -ENXIO; 6703 if (!kvm->arch.vpit) 6704 goto out; 6705 r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2); 6706 if (r) 6707 goto out; 6708 r = -EFAULT; 6709 if (copy_to_user(argp, &u.ps2, sizeof(u.ps2))) 6710 goto out; 6711 r = 0; 6712 break; 6713 } 6714 case KVM_SET_PIT2: { 6715 r = -EFAULT; 6716 if (copy_from_user(&u.ps2, argp, sizeof(u.ps2))) 6717 goto out; 6718 mutex_lock(&kvm->lock); 6719 r = -ENXIO; 6720 if (!kvm->arch.vpit) 6721 goto set_pit2_out; 6722 r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2); 6723 set_pit2_out: 6724 mutex_unlock(&kvm->lock); 6725 break; 6726 } 6727 case KVM_REINJECT_CONTROL: { 6728 struct kvm_reinject_control control; 6729 r = -EFAULT; 6730 if (copy_from_user(&control, argp, sizeof(control))) 6731 goto out; 6732 r = -ENXIO; 6733 if (!kvm->arch.vpit) 6734 goto out; 6735 r = kvm_vm_ioctl_reinject(kvm, &control); 6736 break; 6737 } 6738 case KVM_SET_BOOT_CPU_ID: 6739 r = 0; 6740 mutex_lock(&kvm->lock); 6741 if (kvm->created_vcpus) 6742 r = -EBUSY; 6743 else 6744 kvm->arch.bsp_vcpu_id = arg; 6745 mutex_unlock(&kvm->lock); 6746 break; 6747 #ifdef CONFIG_KVM_XEN 6748 case KVM_XEN_HVM_CONFIG: { 6749 struct kvm_xen_hvm_config xhc; 6750 r = -EFAULT; 6751 if (copy_from_user(&xhc, argp, sizeof(xhc))) 6752 goto out; 6753 r = kvm_xen_hvm_config(kvm, &xhc); 6754 break; 6755 } 6756 case KVM_XEN_HVM_GET_ATTR: { 6757 struct kvm_xen_hvm_attr xha; 6758 6759 r = -EFAULT; 6760 if (copy_from_user(&xha, argp, sizeof(xha))) 6761 goto out; 6762 r = kvm_xen_hvm_get_attr(kvm, &xha); 6763 if (!r && copy_to_user(argp, &xha, sizeof(xha))) 6764 r = -EFAULT; 6765 break; 6766 } 6767 case KVM_XEN_HVM_SET_ATTR: { 6768 struct kvm_xen_hvm_attr xha; 6769 6770 r = -EFAULT; 6771 if (copy_from_user(&xha, argp, sizeof(xha))) 6772 goto out; 6773 r = kvm_xen_hvm_set_attr(kvm, &xha); 6774 break; 6775 } 6776 case KVM_XEN_HVM_EVTCHN_SEND: { 6777 struct kvm_irq_routing_xen_evtchn uxe; 6778 6779 r = -EFAULT; 6780 if (copy_from_user(&uxe, argp, sizeof(uxe))) 6781 goto out; 6782 r = kvm_xen_hvm_evtchn_send(kvm, &uxe); 6783 break; 6784 } 6785 #endif 6786 case KVM_SET_CLOCK: 6787 r = kvm_vm_ioctl_set_clock(kvm, argp); 6788 break; 6789 case KVM_GET_CLOCK: 6790 r = kvm_vm_ioctl_get_clock(kvm, argp); 6791 break; 6792 case KVM_SET_TSC_KHZ: { 6793 u32 user_tsc_khz; 6794 6795 r = -EINVAL; 6796 user_tsc_khz = (u32)arg; 6797 6798 if (kvm_caps.has_tsc_control && 6799 user_tsc_khz >= kvm_caps.max_guest_tsc_khz) 6800 goto out; 6801 6802 if (user_tsc_khz == 0) 6803 user_tsc_khz = tsc_khz; 6804 6805 WRITE_ONCE(kvm->arch.default_tsc_khz, user_tsc_khz); 6806 r = 0; 6807 6808 goto out; 6809 } 6810 case KVM_GET_TSC_KHZ: { 6811 r = READ_ONCE(kvm->arch.default_tsc_khz); 6812 goto out; 6813 } 6814 case KVM_MEMORY_ENCRYPT_OP: { 6815 r = -ENOTTY; 6816 if (!kvm_x86_ops.mem_enc_ioctl) 6817 goto out; 6818 6819 r = static_call(kvm_x86_mem_enc_ioctl)(kvm, argp); 6820 break; 6821 } 6822 case KVM_MEMORY_ENCRYPT_REG_REGION: { 6823 struct kvm_enc_region region; 6824 6825 r = -EFAULT; 6826 if (copy_from_user(®ion, argp, sizeof(region))) 6827 goto out; 6828 6829 r = -ENOTTY; 6830 if (!kvm_x86_ops.mem_enc_register_region) 6831 goto out; 6832 6833 r = static_call(kvm_x86_mem_enc_register_region)(kvm, ®ion); 6834 break; 6835 } 6836 case KVM_MEMORY_ENCRYPT_UNREG_REGION: { 6837 struct kvm_enc_region region; 6838 6839 r = -EFAULT; 6840 if (copy_from_user(®ion, argp, sizeof(region))) 6841 goto out; 6842 6843 r = -ENOTTY; 6844 if (!kvm_x86_ops.mem_enc_unregister_region) 6845 goto out; 6846 6847 r = static_call(kvm_x86_mem_enc_unregister_region)(kvm, ®ion); 6848 break; 6849 } 6850 case KVM_HYPERV_EVENTFD: { 6851 struct kvm_hyperv_eventfd hvevfd; 6852 6853 r = -EFAULT; 6854 if (copy_from_user(&hvevfd, argp, sizeof(hvevfd))) 6855 goto out; 6856 r = kvm_vm_ioctl_hv_eventfd(kvm, &hvevfd); 6857 break; 6858 } 6859 case KVM_SET_PMU_EVENT_FILTER: 6860 r = kvm_vm_ioctl_set_pmu_event_filter(kvm, argp); 6861 break; 6862 case KVM_X86_SET_MSR_FILTER: 6863 r = kvm_vm_ioctl_set_msr_filter(kvm, argp); 6864 break; 6865 default: 6866 r = -ENOTTY; 6867 } 6868 out: 6869 return r; 6870 } 6871 6872 static void kvm_init_msr_list(void) 6873 { 6874 u32 dummy[2]; 6875 unsigned i; 6876 6877 BUILD_BUG_ON_MSG(KVM_PMC_MAX_FIXED != 3, 6878 "Please update the fixed PMCs in msrs_to_saved_all[]"); 6879 6880 num_msrs_to_save = 0; 6881 num_emulated_msrs = 0; 6882 num_msr_based_features = 0; 6883 6884 for (i = 0; i < ARRAY_SIZE(msrs_to_save_all); i++) { 6885 if (rdmsr_safe(msrs_to_save_all[i], &dummy[0], &dummy[1]) < 0) 6886 continue; 6887 6888 /* 6889 * Even MSRs that are valid in the host may not be exposed 6890 * to the guests in some cases. 6891 */ 6892 switch (msrs_to_save_all[i]) { 6893 case MSR_IA32_BNDCFGS: 6894 if (!kvm_mpx_supported()) 6895 continue; 6896 break; 6897 case MSR_TSC_AUX: 6898 if (!kvm_cpu_cap_has(X86_FEATURE_RDTSCP) && 6899 !kvm_cpu_cap_has(X86_FEATURE_RDPID)) 6900 continue; 6901 break; 6902 case MSR_IA32_UMWAIT_CONTROL: 6903 if (!kvm_cpu_cap_has(X86_FEATURE_WAITPKG)) 6904 continue; 6905 break; 6906 case MSR_IA32_RTIT_CTL: 6907 case MSR_IA32_RTIT_STATUS: 6908 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT)) 6909 continue; 6910 break; 6911 case MSR_IA32_RTIT_CR3_MATCH: 6912 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) || 6913 !intel_pt_validate_hw_cap(PT_CAP_cr3_filtering)) 6914 continue; 6915 break; 6916 case MSR_IA32_RTIT_OUTPUT_BASE: 6917 case MSR_IA32_RTIT_OUTPUT_MASK: 6918 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) || 6919 (!intel_pt_validate_hw_cap(PT_CAP_topa_output) && 6920 !intel_pt_validate_hw_cap(PT_CAP_single_range_output))) 6921 continue; 6922 break; 6923 case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: 6924 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) || 6925 msrs_to_save_all[i] - MSR_IA32_RTIT_ADDR0_A >= 6926 intel_pt_validate_hw_cap(PT_CAP_num_address_ranges) * 2) 6927 continue; 6928 break; 6929 case MSR_ARCH_PERFMON_PERFCTR0 ... MSR_ARCH_PERFMON_PERFCTR0 + 17: 6930 if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_PERFCTR0 >= 6931 min(INTEL_PMC_MAX_GENERIC, kvm_pmu_cap.num_counters_gp)) 6932 continue; 6933 break; 6934 case MSR_ARCH_PERFMON_EVENTSEL0 ... MSR_ARCH_PERFMON_EVENTSEL0 + 17: 6935 if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_EVENTSEL0 >= 6936 min(INTEL_PMC_MAX_GENERIC, kvm_pmu_cap.num_counters_gp)) 6937 continue; 6938 break; 6939 case MSR_IA32_XFD: 6940 case MSR_IA32_XFD_ERR: 6941 if (!kvm_cpu_cap_has(X86_FEATURE_XFD)) 6942 continue; 6943 break; 6944 default: 6945 break; 6946 } 6947 6948 msrs_to_save[num_msrs_to_save++] = msrs_to_save_all[i]; 6949 } 6950 6951 for (i = 0; i < ARRAY_SIZE(emulated_msrs_all); i++) { 6952 if (!static_call(kvm_x86_has_emulated_msr)(NULL, emulated_msrs_all[i])) 6953 continue; 6954 6955 emulated_msrs[num_emulated_msrs++] = emulated_msrs_all[i]; 6956 } 6957 6958 for (i = 0; i < ARRAY_SIZE(msr_based_features_all); i++) { 6959 struct kvm_msr_entry msr; 6960 6961 msr.index = msr_based_features_all[i]; 6962 if (kvm_get_msr_feature(&msr)) 6963 continue; 6964 6965 msr_based_features[num_msr_based_features++] = msr_based_features_all[i]; 6966 } 6967 } 6968 6969 static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len, 6970 const void *v) 6971 { 6972 int handled = 0; 6973 int n; 6974 6975 do { 6976 n = min(len, 8); 6977 if (!(lapic_in_kernel(vcpu) && 6978 !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v)) 6979 && kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, n, v)) 6980 break; 6981 handled += n; 6982 addr += n; 6983 len -= n; 6984 v += n; 6985 } while (len); 6986 6987 return handled; 6988 } 6989 6990 static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v) 6991 { 6992 int handled = 0; 6993 int n; 6994 6995 do { 6996 n = min(len, 8); 6997 if (!(lapic_in_kernel(vcpu) && 6998 !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev, 6999 addr, n, v)) 7000 && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v)) 7001 break; 7002 trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, v); 7003 handled += n; 7004 addr += n; 7005 len -= n; 7006 v += n; 7007 } while (len); 7008 7009 return handled; 7010 } 7011 7012 static void kvm_set_segment(struct kvm_vcpu *vcpu, 7013 struct kvm_segment *var, int seg) 7014 { 7015 static_call(kvm_x86_set_segment)(vcpu, var, seg); 7016 } 7017 7018 void kvm_get_segment(struct kvm_vcpu *vcpu, 7019 struct kvm_segment *var, int seg) 7020 { 7021 static_call(kvm_x86_get_segment)(vcpu, var, seg); 7022 } 7023 7024 gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u64 access, 7025 struct x86_exception *exception) 7026 { 7027 struct kvm_mmu *mmu = vcpu->arch.mmu; 7028 gpa_t t_gpa; 7029 7030 BUG_ON(!mmu_is_nested(vcpu)); 7031 7032 /* NPT walks are always user-walks */ 7033 access |= PFERR_USER_MASK; 7034 t_gpa = mmu->gva_to_gpa(vcpu, mmu, gpa, access, exception); 7035 7036 return t_gpa; 7037 } 7038 7039 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, 7040 struct x86_exception *exception) 7041 { 7042 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 7043 7044 u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; 7045 return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); 7046 } 7047 EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_read); 7048 7049 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, 7050 struct x86_exception *exception) 7051 { 7052 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 7053 7054 u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; 7055 access |= PFERR_FETCH_MASK; 7056 return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); 7057 } 7058 7059 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, 7060 struct x86_exception *exception) 7061 { 7062 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 7063 7064 u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; 7065 access |= PFERR_WRITE_MASK; 7066 return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); 7067 } 7068 EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_write); 7069 7070 /* uses this to access any guest's mapped memory without checking CPL */ 7071 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, 7072 struct x86_exception *exception) 7073 { 7074 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 7075 7076 return mmu->gva_to_gpa(vcpu, mmu, gva, 0, exception); 7077 } 7078 7079 static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, 7080 struct kvm_vcpu *vcpu, u64 access, 7081 struct x86_exception *exception) 7082 { 7083 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 7084 void *data = val; 7085 int r = X86EMUL_CONTINUE; 7086 7087 while (bytes) { 7088 gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access, exception); 7089 unsigned offset = addr & (PAGE_SIZE-1); 7090 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset); 7091 int ret; 7092 7093 if (gpa == INVALID_GPA) 7094 return X86EMUL_PROPAGATE_FAULT; 7095 ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, data, 7096 offset, toread); 7097 if (ret < 0) { 7098 r = X86EMUL_IO_NEEDED; 7099 goto out; 7100 } 7101 7102 bytes -= toread; 7103 data += toread; 7104 addr += toread; 7105 } 7106 out: 7107 return r; 7108 } 7109 7110 /* used for instruction fetching */ 7111 static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt, 7112 gva_t addr, void *val, unsigned int bytes, 7113 struct x86_exception *exception) 7114 { 7115 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7116 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 7117 u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; 7118 unsigned offset; 7119 int ret; 7120 7121 /* Inline kvm_read_guest_virt_helper for speed. */ 7122 gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access|PFERR_FETCH_MASK, 7123 exception); 7124 if (unlikely(gpa == INVALID_GPA)) 7125 return X86EMUL_PROPAGATE_FAULT; 7126 7127 offset = addr & (PAGE_SIZE-1); 7128 if (WARN_ON(offset + bytes > PAGE_SIZE)) 7129 bytes = (unsigned)PAGE_SIZE - offset; 7130 ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, val, 7131 offset, bytes); 7132 if (unlikely(ret < 0)) 7133 return X86EMUL_IO_NEEDED; 7134 7135 return X86EMUL_CONTINUE; 7136 } 7137 7138 int kvm_read_guest_virt(struct kvm_vcpu *vcpu, 7139 gva_t addr, void *val, unsigned int bytes, 7140 struct x86_exception *exception) 7141 { 7142 u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; 7143 7144 /* 7145 * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED 7146 * is returned, but our callers are not ready for that and they blindly 7147 * call kvm_inject_page_fault. Ensure that they at least do not leak 7148 * uninitialized kernel stack memory into cr2 and error code. 7149 */ 7150 memset(exception, 0, sizeof(*exception)); 7151 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, 7152 exception); 7153 } 7154 EXPORT_SYMBOL_GPL(kvm_read_guest_virt); 7155 7156 static int emulator_read_std(struct x86_emulate_ctxt *ctxt, 7157 gva_t addr, void *val, unsigned int bytes, 7158 struct x86_exception *exception, bool system) 7159 { 7160 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7161 u64 access = 0; 7162 7163 if (system) 7164 access |= PFERR_IMPLICIT_ACCESS; 7165 else if (static_call(kvm_x86_get_cpl)(vcpu) == 3) 7166 access |= PFERR_USER_MASK; 7167 7168 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception); 7169 } 7170 7171 static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt, 7172 unsigned long addr, void *val, unsigned int bytes) 7173 { 7174 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7175 int r = kvm_vcpu_read_guest(vcpu, addr, val, bytes); 7176 7177 return r < 0 ? X86EMUL_IO_NEEDED : X86EMUL_CONTINUE; 7178 } 7179 7180 static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, 7181 struct kvm_vcpu *vcpu, u64 access, 7182 struct x86_exception *exception) 7183 { 7184 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 7185 void *data = val; 7186 int r = X86EMUL_CONTINUE; 7187 7188 while (bytes) { 7189 gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access, exception); 7190 unsigned offset = addr & (PAGE_SIZE-1); 7191 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset); 7192 int ret; 7193 7194 if (gpa == INVALID_GPA) 7195 return X86EMUL_PROPAGATE_FAULT; 7196 ret = kvm_vcpu_write_guest(vcpu, gpa, data, towrite); 7197 if (ret < 0) { 7198 r = X86EMUL_IO_NEEDED; 7199 goto out; 7200 } 7201 7202 bytes -= towrite; 7203 data += towrite; 7204 addr += towrite; 7205 } 7206 out: 7207 return r; 7208 } 7209 7210 static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, 7211 unsigned int bytes, struct x86_exception *exception, 7212 bool system) 7213 { 7214 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7215 u64 access = PFERR_WRITE_MASK; 7216 7217 if (system) 7218 access |= PFERR_IMPLICIT_ACCESS; 7219 else if (static_call(kvm_x86_get_cpl)(vcpu) == 3) 7220 access |= PFERR_USER_MASK; 7221 7222 return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, 7223 access, exception); 7224 } 7225 7226 int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val, 7227 unsigned int bytes, struct x86_exception *exception) 7228 { 7229 /* kvm_write_guest_virt_system can pull in tons of pages. */ 7230 vcpu->arch.l1tf_flush_l1d = true; 7231 7232 return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, 7233 PFERR_WRITE_MASK, exception); 7234 } 7235 EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system); 7236 7237 static int kvm_can_emulate_insn(struct kvm_vcpu *vcpu, int emul_type, 7238 void *insn, int insn_len) 7239 { 7240 return static_call(kvm_x86_can_emulate_instruction)(vcpu, emul_type, 7241 insn, insn_len); 7242 } 7243 7244 int handle_ud(struct kvm_vcpu *vcpu) 7245 { 7246 static const char kvm_emulate_prefix[] = { __KVM_EMULATE_PREFIX }; 7247 int emul_type = EMULTYPE_TRAP_UD; 7248 char sig[5]; /* ud2; .ascii "kvm" */ 7249 struct x86_exception e; 7250 7251 if (unlikely(!kvm_can_emulate_insn(vcpu, emul_type, NULL, 0))) 7252 return 1; 7253 7254 if (force_emulation_prefix && 7255 kvm_read_guest_virt(vcpu, kvm_get_linear_rip(vcpu), 7256 sig, sizeof(sig), &e) == 0 && 7257 memcmp(sig, kvm_emulate_prefix, sizeof(sig)) == 0) { 7258 kvm_rip_write(vcpu, kvm_rip_read(vcpu) + sizeof(sig)); 7259 emul_type = EMULTYPE_TRAP_UD_FORCED; 7260 } 7261 7262 return kvm_emulate_instruction(vcpu, emul_type); 7263 } 7264 EXPORT_SYMBOL_GPL(handle_ud); 7265 7266 static int vcpu_is_mmio_gpa(struct kvm_vcpu *vcpu, unsigned long gva, 7267 gpa_t gpa, bool write) 7268 { 7269 /* For APIC access vmexit */ 7270 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) 7271 return 1; 7272 7273 if (vcpu_match_mmio_gpa(vcpu, gpa)) { 7274 trace_vcpu_match_mmio(gva, gpa, write, true); 7275 return 1; 7276 } 7277 7278 return 0; 7279 } 7280 7281 static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, 7282 gpa_t *gpa, struct x86_exception *exception, 7283 bool write) 7284 { 7285 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 7286 u64 access = ((static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0) 7287 | (write ? PFERR_WRITE_MASK : 0); 7288 7289 /* 7290 * currently PKRU is only applied to ept enabled guest so 7291 * there is no pkey in EPT page table for L1 guest or EPT 7292 * shadow page table for L2 guest. 7293 */ 7294 if (vcpu_match_mmio_gva(vcpu, gva) && (!is_paging(vcpu) || 7295 !permission_fault(vcpu, vcpu->arch.walk_mmu, 7296 vcpu->arch.mmio_access, 0, access))) { 7297 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT | 7298 (gva & (PAGE_SIZE - 1)); 7299 trace_vcpu_match_mmio(gva, *gpa, write, false); 7300 return 1; 7301 } 7302 7303 *gpa = mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); 7304 7305 if (*gpa == INVALID_GPA) 7306 return -1; 7307 7308 return vcpu_is_mmio_gpa(vcpu, gva, *gpa, write); 7309 } 7310 7311 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, 7312 const void *val, int bytes) 7313 { 7314 int ret; 7315 7316 ret = kvm_vcpu_write_guest(vcpu, gpa, val, bytes); 7317 if (ret < 0) 7318 return 0; 7319 kvm_page_track_write(vcpu, gpa, val, bytes); 7320 return 1; 7321 } 7322 7323 struct read_write_emulator_ops { 7324 int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val, 7325 int bytes); 7326 int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa, 7327 void *val, int bytes); 7328 int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa, 7329 int bytes, void *val); 7330 int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa, 7331 void *val, int bytes); 7332 bool write; 7333 }; 7334 7335 static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes) 7336 { 7337 if (vcpu->mmio_read_completed) { 7338 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, 7339 vcpu->mmio_fragments[0].gpa, val); 7340 vcpu->mmio_read_completed = 0; 7341 return 1; 7342 } 7343 7344 return 0; 7345 } 7346 7347 static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, 7348 void *val, int bytes) 7349 { 7350 return !kvm_vcpu_read_guest(vcpu, gpa, val, bytes); 7351 } 7352 7353 static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, 7354 void *val, int bytes) 7355 { 7356 return emulator_write_phys(vcpu, gpa, val, bytes); 7357 } 7358 7359 static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val) 7360 { 7361 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, val); 7362 return vcpu_mmio_write(vcpu, gpa, bytes, val); 7363 } 7364 7365 static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, 7366 void *val, int bytes) 7367 { 7368 trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, NULL); 7369 return X86EMUL_IO_NEEDED; 7370 } 7371 7372 static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, 7373 void *val, int bytes) 7374 { 7375 struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0]; 7376 7377 memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); 7378 return X86EMUL_CONTINUE; 7379 } 7380 7381 static const struct read_write_emulator_ops read_emultor = { 7382 .read_write_prepare = read_prepare, 7383 .read_write_emulate = read_emulate, 7384 .read_write_mmio = vcpu_mmio_read, 7385 .read_write_exit_mmio = read_exit_mmio, 7386 }; 7387 7388 static const struct read_write_emulator_ops write_emultor = { 7389 .read_write_emulate = write_emulate, 7390 .read_write_mmio = write_mmio, 7391 .read_write_exit_mmio = write_exit_mmio, 7392 .write = true, 7393 }; 7394 7395 static int emulator_read_write_onepage(unsigned long addr, void *val, 7396 unsigned int bytes, 7397 struct x86_exception *exception, 7398 struct kvm_vcpu *vcpu, 7399 const struct read_write_emulator_ops *ops) 7400 { 7401 gpa_t gpa; 7402 int handled, ret; 7403 bool write = ops->write; 7404 struct kvm_mmio_fragment *frag; 7405 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 7406 7407 /* 7408 * If the exit was due to a NPF we may already have a GPA. 7409 * If the GPA is present, use it to avoid the GVA to GPA table walk. 7410 * Note, this cannot be used on string operations since string 7411 * operation using rep will only have the initial GPA from the NPF 7412 * occurred. 7413 */ 7414 if (ctxt->gpa_available && emulator_can_use_gpa(ctxt) && 7415 (addr & ~PAGE_MASK) == (ctxt->gpa_val & ~PAGE_MASK)) { 7416 gpa = ctxt->gpa_val; 7417 ret = vcpu_is_mmio_gpa(vcpu, addr, gpa, write); 7418 } else { 7419 ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write); 7420 if (ret < 0) 7421 return X86EMUL_PROPAGATE_FAULT; 7422 } 7423 7424 if (!ret && ops->read_write_emulate(vcpu, gpa, val, bytes)) 7425 return X86EMUL_CONTINUE; 7426 7427 /* 7428 * Is this MMIO handled locally? 7429 */ 7430 handled = ops->read_write_mmio(vcpu, gpa, bytes, val); 7431 if (handled == bytes) 7432 return X86EMUL_CONTINUE; 7433 7434 gpa += handled; 7435 bytes -= handled; 7436 val += handled; 7437 7438 WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS); 7439 frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++]; 7440 frag->gpa = gpa; 7441 frag->data = val; 7442 frag->len = bytes; 7443 return X86EMUL_CONTINUE; 7444 } 7445 7446 static int emulator_read_write(struct x86_emulate_ctxt *ctxt, 7447 unsigned long addr, 7448 void *val, unsigned int bytes, 7449 struct x86_exception *exception, 7450 const struct read_write_emulator_ops *ops) 7451 { 7452 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7453 gpa_t gpa; 7454 int rc; 7455 7456 if (ops->read_write_prepare && 7457 ops->read_write_prepare(vcpu, val, bytes)) 7458 return X86EMUL_CONTINUE; 7459 7460 vcpu->mmio_nr_fragments = 0; 7461 7462 /* Crossing a page boundary? */ 7463 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) { 7464 int now; 7465 7466 now = -addr & ~PAGE_MASK; 7467 rc = emulator_read_write_onepage(addr, val, now, exception, 7468 vcpu, ops); 7469 7470 if (rc != X86EMUL_CONTINUE) 7471 return rc; 7472 addr += now; 7473 if (ctxt->mode != X86EMUL_MODE_PROT64) 7474 addr = (u32)addr; 7475 val += now; 7476 bytes -= now; 7477 } 7478 7479 rc = emulator_read_write_onepage(addr, val, bytes, exception, 7480 vcpu, ops); 7481 if (rc != X86EMUL_CONTINUE) 7482 return rc; 7483 7484 if (!vcpu->mmio_nr_fragments) 7485 return rc; 7486 7487 gpa = vcpu->mmio_fragments[0].gpa; 7488 7489 vcpu->mmio_needed = 1; 7490 vcpu->mmio_cur_fragment = 0; 7491 7492 vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len); 7493 vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write; 7494 vcpu->run->exit_reason = KVM_EXIT_MMIO; 7495 vcpu->run->mmio.phys_addr = gpa; 7496 7497 return ops->read_write_exit_mmio(vcpu, gpa, val, bytes); 7498 } 7499 7500 static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt, 7501 unsigned long addr, 7502 void *val, 7503 unsigned int bytes, 7504 struct x86_exception *exception) 7505 { 7506 return emulator_read_write(ctxt, addr, val, bytes, 7507 exception, &read_emultor); 7508 } 7509 7510 static int emulator_write_emulated(struct x86_emulate_ctxt *ctxt, 7511 unsigned long addr, 7512 const void *val, 7513 unsigned int bytes, 7514 struct x86_exception *exception) 7515 { 7516 return emulator_read_write(ctxt, addr, (void *)val, bytes, 7517 exception, &write_emultor); 7518 } 7519 7520 #define emulator_try_cmpxchg_user(t, ptr, old, new) \ 7521 (__try_cmpxchg_user((t __user *)(ptr), (t *)(old), *(t *)(new), efault ## t)) 7522 7523 static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt, 7524 unsigned long addr, 7525 const void *old, 7526 const void *new, 7527 unsigned int bytes, 7528 struct x86_exception *exception) 7529 { 7530 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7531 u64 page_line_mask; 7532 unsigned long hva; 7533 gpa_t gpa; 7534 int r; 7535 7536 /* guests cmpxchg8b have to be emulated atomically */ 7537 if (bytes > 8 || (bytes & (bytes - 1))) 7538 goto emul_write; 7539 7540 gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL); 7541 7542 if (gpa == INVALID_GPA || 7543 (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) 7544 goto emul_write; 7545 7546 /* 7547 * Emulate the atomic as a straight write to avoid #AC if SLD is 7548 * enabled in the host and the access splits a cache line. 7549 */ 7550 if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) 7551 page_line_mask = ~(cache_line_size() - 1); 7552 else 7553 page_line_mask = PAGE_MASK; 7554 7555 if (((gpa + bytes - 1) & page_line_mask) != (gpa & page_line_mask)) 7556 goto emul_write; 7557 7558 hva = kvm_vcpu_gfn_to_hva(vcpu, gpa_to_gfn(gpa)); 7559 if (kvm_is_error_hva(hva)) 7560 goto emul_write; 7561 7562 hva += offset_in_page(gpa); 7563 7564 switch (bytes) { 7565 case 1: 7566 r = emulator_try_cmpxchg_user(u8, hva, old, new); 7567 break; 7568 case 2: 7569 r = emulator_try_cmpxchg_user(u16, hva, old, new); 7570 break; 7571 case 4: 7572 r = emulator_try_cmpxchg_user(u32, hva, old, new); 7573 break; 7574 case 8: 7575 r = emulator_try_cmpxchg_user(u64, hva, old, new); 7576 break; 7577 default: 7578 BUG(); 7579 } 7580 7581 if (r < 0) 7582 return X86EMUL_UNHANDLEABLE; 7583 if (r) 7584 return X86EMUL_CMPXCHG_FAILED; 7585 7586 kvm_page_track_write(vcpu, gpa, new, bytes); 7587 7588 return X86EMUL_CONTINUE; 7589 7590 emul_write: 7591 printk_once(KERN_WARNING "kvm: emulating exchange as write\n"); 7592 7593 return emulator_write_emulated(ctxt, addr, new, bytes, exception); 7594 } 7595 7596 static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size, 7597 unsigned short port, void *data, 7598 unsigned int count, bool in) 7599 { 7600 unsigned i; 7601 int r; 7602 7603 WARN_ON_ONCE(vcpu->arch.pio.count); 7604 for (i = 0; i < count; i++) { 7605 if (in) 7606 r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, port, size, data); 7607 else 7608 r = kvm_io_bus_write(vcpu, KVM_PIO_BUS, port, size, data); 7609 7610 if (r) { 7611 if (i == 0) 7612 goto userspace_io; 7613 7614 /* 7615 * Userspace must have unregistered the device while PIO 7616 * was running. Drop writes / read as 0. 7617 */ 7618 if (in) 7619 memset(data, 0, size * (count - i)); 7620 break; 7621 } 7622 7623 data += size; 7624 } 7625 return 1; 7626 7627 userspace_io: 7628 vcpu->arch.pio.port = port; 7629 vcpu->arch.pio.in = in; 7630 vcpu->arch.pio.count = count; 7631 vcpu->arch.pio.size = size; 7632 7633 if (in) 7634 memset(vcpu->arch.pio_data, 0, size * count); 7635 else 7636 memcpy(vcpu->arch.pio_data, data, size * count); 7637 7638 vcpu->run->exit_reason = KVM_EXIT_IO; 7639 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; 7640 vcpu->run->io.size = size; 7641 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE; 7642 vcpu->run->io.count = count; 7643 vcpu->run->io.port = port; 7644 return 0; 7645 } 7646 7647 static int emulator_pio_in(struct kvm_vcpu *vcpu, int size, 7648 unsigned short port, void *val, unsigned int count) 7649 { 7650 int r = emulator_pio_in_out(vcpu, size, port, val, count, true); 7651 if (r) 7652 trace_kvm_pio(KVM_PIO_IN, port, size, count, val); 7653 7654 return r; 7655 } 7656 7657 static void complete_emulator_pio_in(struct kvm_vcpu *vcpu, void *val) 7658 { 7659 int size = vcpu->arch.pio.size; 7660 unsigned int count = vcpu->arch.pio.count; 7661 memcpy(val, vcpu->arch.pio_data, size * count); 7662 trace_kvm_pio(KVM_PIO_IN, vcpu->arch.pio.port, size, count, vcpu->arch.pio_data); 7663 vcpu->arch.pio.count = 0; 7664 } 7665 7666 static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt, 7667 int size, unsigned short port, void *val, 7668 unsigned int count) 7669 { 7670 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7671 if (vcpu->arch.pio.count) { 7672 /* 7673 * Complete a previous iteration that required userspace I/O. 7674 * Note, @count isn't guaranteed to match pio.count as userspace 7675 * can modify ECX before rerunning the vCPU. Ignore any such 7676 * shenanigans as KVM doesn't support modifying the rep count, 7677 * and the emulator ensures @count doesn't overflow the buffer. 7678 */ 7679 complete_emulator_pio_in(vcpu, val); 7680 return 1; 7681 } 7682 7683 return emulator_pio_in(vcpu, size, port, val, count); 7684 } 7685 7686 static int emulator_pio_out(struct kvm_vcpu *vcpu, int size, 7687 unsigned short port, const void *val, 7688 unsigned int count) 7689 { 7690 trace_kvm_pio(KVM_PIO_OUT, port, size, count, val); 7691 return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false); 7692 } 7693 7694 static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt, 7695 int size, unsigned short port, 7696 const void *val, unsigned int count) 7697 { 7698 return emulator_pio_out(emul_to_vcpu(ctxt), size, port, val, count); 7699 } 7700 7701 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg) 7702 { 7703 return static_call(kvm_x86_get_segment_base)(vcpu, seg); 7704 } 7705 7706 static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address) 7707 { 7708 kvm_mmu_invlpg(emul_to_vcpu(ctxt), address); 7709 } 7710 7711 static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu) 7712 { 7713 if (!need_emulate_wbinvd(vcpu)) 7714 return X86EMUL_CONTINUE; 7715 7716 if (static_call(kvm_x86_has_wbinvd_exit)()) { 7717 int cpu = get_cpu(); 7718 7719 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); 7720 on_each_cpu_mask(vcpu->arch.wbinvd_dirty_mask, 7721 wbinvd_ipi, NULL, 1); 7722 put_cpu(); 7723 cpumask_clear(vcpu->arch.wbinvd_dirty_mask); 7724 } else 7725 wbinvd(); 7726 return X86EMUL_CONTINUE; 7727 } 7728 7729 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu) 7730 { 7731 kvm_emulate_wbinvd_noskip(vcpu); 7732 return kvm_skip_emulated_instruction(vcpu); 7733 } 7734 EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd); 7735 7736 7737 7738 static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt) 7739 { 7740 kvm_emulate_wbinvd_noskip(emul_to_vcpu(ctxt)); 7741 } 7742 7743 static void emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, 7744 unsigned long *dest) 7745 { 7746 kvm_get_dr(emul_to_vcpu(ctxt), dr, dest); 7747 } 7748 7749 static int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, 7750 unsigned long value) 7751 { 7752 7753 return kvm_set_dr(emul_to_vcpu(ctxt), dr, value); 7754 } 7755 7756 static u64 mk_cr_64(u64 curr_cr, u32 new_val) 7757 { 7758 return (curr_cr & ~((1ULL << 32) - 1)) | new_val; 7759 } 7760 7761 static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr) 7762 { 7763 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7764 unsigned long value; 7765 7766 switch (cr) { 7767 case 0: 7768 value = kvm_read_cr0(vcpu); 7769 break; 7770 case 2: 7771 value = vcpu->arch.cr2; 7772 break; 7773 case 3: 7774 value = kvm_read_cr3(vcpu); 7775 break; 7776 case 4: 7777 value = kvm_read_cr4(vcpu); 7778 break; 7779 case 8: 7780 value = kvm_get_cr8(vcpu); 7781 break; 7782 default: 7783 kvm_err("%s: unexpected cr %u\n", __func__, cr); 7784 return 0; 7785 } 7786 7787 return value; 7788 } 7789 7790 static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val) 7791 { 7792 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7793 int res = 0; 7794 7795 switch (cr) { 7796 case 0: 7797 res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val)); 7798 break; 7799 case 2: 7800 vcpu->arch.cr2 = val; 7801 break; 7802 case 3: 7803 res = kvm_set_cr3(vcpu, val); 7804 break; 7805 case 4: 7806 res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val)); 7807 break; 7808 case 8: 7809 res = kvm_set_cr8(vcpu, val); 7810 break; 7811 default: 7812 kvm_err("%s: unexpected cr %u\n", __func__, cr); 7813 res = -1; 7814 } 7815 7816 return res; 7817 } 7818 7819 static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt) 7820 { 7821 return static_call(kvm_x86_get_cpl)(emul_to_vcpu(ctxt)); 7822 } 7823 7824 static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) 7825 { 7826 static_call(kvm_x86_get_gdt)(emul_to_vcpu(ctxt), dt); 7827 } 7828 7829 static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) 7830 { 7831 static_call(kvm_x86_get_idt)(emul_to_vcpu(ctxt), dt); 7832 } 7833 7834 static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) 7835 { 7836 static_call(kvm_x86_set_gdt)(emul_to_vcpu(ctxt), dt); 7837 } 7838 7839 static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) 7840 { 7841 static_call(kvm_x86_set_idt)(emul_to_vcpu(ctxt), dt); 7842 } 7843 7844 static unsigned long emulator_get_cached_segment_base( 7845 struct x86_emulate_ctxt *ctxt, int seg) 7846 { 7847 return get_segment_base(emul_to_vcpu(ctxt), seg); 7848 } 7849 7850 static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector, 7851 struct desc_struct *desc, u32 *base3, 7852 int seg) 7853 { 7854 struct kvm_segment var; 7855 7856 kvm_get_segment(emul_to_vcpu(ctxt), &var, seg); 7857 *selector = var.selector; 7858 7859 if (var.unusable) { 7860 memset(desc, 0, sizeof(*desc)); 7861 if (base3) 7862 *base3 = 0; 7863 return false; 7864 } 7865 7866 if (var.g) 7867 var.limit >>= 12; 7868 set_desc_limit(desc, var.limit); 7869 set_desc_base(desc, (unsigned long)var.base); 7870 #ifdef CONFIG_X86_64 7871 if (base3) 7872 *base3 = var.base >> 32; 7873 #endif 7874 desc->type = var.type; 7875 desc->s = var.s; 7876 desc->dpl = var.dpl; 7877 desc->p = var.present; 7878 desc->avl = var.avl; 7879 desc->l = var.l; 7880 desc->d = var.db; 7881 desc->g = var.g; 7882 7883 return true; 7884 } 7885 7886 static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector, 7887 struct desc_struct *desc, u32 base3, 7888 int seg) 7889 { 7890 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7891 struct kvm_segment var; 7892 7893 var.selector = selector; 7894 var.base = get_desc_base(desc); 7895 #ifdef CONFIG_X86_64 7896 var.base |= ((u64)base3) << 32; 7897 #endif 7898 var.limit = get_desc_limit(desc); 7899 if (desc->g) 7900 var.limit = (var.limit << 12) | 0xfff; 7901 var.type = desc->type; 7902 var.dpl = desc->dpl; 7903 var.db = desc->d; 7904 var.s = desc->s; 7905 var.l = desc->l; 7906 var.g = desc->g; 7907 var.avl = desc->avl; 7908 var.present = desc->p; 7909 var.unusable = !var.present; 7910 var.padding = 0; 7911 7912 kvm_set_segment(vcpu, &var, seg); 7913 return; 7914 } 7915 7916 static int emulator_get_msr_with_filter(struct x86_emulate_ctxt *ctxt, 7917 u32 msr_index, u64 *pdata) 7918 { 7919 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7920 int r; 7921 7922 r = kvm_get_msr_with_filter(vcpu, msr_index, pdata); 7923 if (r < 0) 7924 return X86EMUL_UNHANDLEABLE; 7925 7926 if (r) { 7927 if (kvm_msr_user_space(vcpu, msr_index, KVM_EXIT_X86_RDMSR, 0, 7928 complete_emulated_rdmsr, r)) 7929 return X86EMUL_IO_NEEDED; 7930 7931 trace_kvm_msr_read_ex(msr_index); 7932 return X86EMUL_PROPAGATE_FAULT; 7933 } 7934 7935 trace_kvm_msr_read(msr_index, *pdata); 7936 return X86EMUL_CONTINUE; 7937 } 7938 7939 static int emulator_set_msr_with_filter(struct x86_emulate_ctxt *ctxt, 7940 u32 msr_index, u64 data) 7941 { 7942 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7943 int r; 7944 7945 r = kvm_set_msr_with_filter(vcpu, msr_index, data); 7946 if (r < 0) 7947 return X86EMUL_UNHANDLEABLE; 7948 7949 if (r) { 7950 if (kvm_msr_user_space(vcpu, msr_index, KVM_EXIT_X86_WRMSR, data, 7951 complete_emulated_msr_access, r)) 7952 return X86EMUL_IO_NEEDED; 7953 7954 trace_kvm_msr_write_ex(msr_index, data); 7955 return X86EMUL_PROPAGATE_FAULT; 7956 } 7957 7958 trace_kvm_msr_write(msr_index, data); 7959 return X86EMUL_CONTINUE; 7960 } 7961 7962 static int emulator_get_msr(struct x86_emulate_ctxt *ctxt, 7963 u32 msr_index, u64 *pdata) 7964 { 7965 return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata); 7966 } 7967 7968 static int emulator_set_msr(struct x86_emulate_ctxt *ctxt, 7969 u32 msr_index, u64 data) 7970 { 7971 return kvm_set_msr(emul_to_vcpu(ctxt), msr_index, data); 7972 } 7973 7974 static u64 emulator_get_smbase(struct x86_emulate_ctxt *ctxt) 7975 { 7976 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7977 7978 return vcpu->arch.smbase; 7979 } 7980 7981 static void emulator_set_smbase(struct x86_emulate_ctxt *ctxt, u64 smbase) 7982 { 7983 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7984 7985 vcpu->arch.smbase = smbase; 7986 } 7987 7988 static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt, 7989 u32 pmc) 7990 { 7991 if (kvm_pmu_is_valid_rdpmc_ecx(emul_to_vcpu(ctxt), pmc)) 7992 return 0; 7993 return -EINVAL; 7994 } 7995 7996 static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt, 7997 u32 pmc, u64 *pdata) 7998 { 7999 return kvm_pmu_rdpmc(emul_to_vcpu(ctxt), pmc, pdata); 8000 } 8001 8002 static void emulator_halt(struct x86_emulate_ctxt *ctxt) 8003 { 8004 emul_to_vcpu(ctxt)->arch.halt_request = 1; 8005 } 8006 8007 static int emulator_intercept(struct x86_emulate_ctxt *ctxt, 8008 struct x86_instruction_info *info, 8009 enum x86_intercept_stage stage) 8010 { 8011 return static_call(kvm_x86_check_intercept)(emul_to_vcpu(ctxt), info, stage, 8012 &ctxt->exception); 8013 } 8014 8015 static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt, 8016 u32 *eax, u32 *ebx, u32 *ecx, u32 *edx, 8017 bool exact_only) 8018 { 8019 return kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx, exact_only); 8020 } 8021 8022 static bool emulator_guest_has_long_mode(struct x86_emulate_ctxt *ctxt) 8023 { 8024 return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_LM); 8025 } 8026 8027 static bool emulator_guest_has_movbe(struct x86_emulate_ctxt *ctxt) 8028 { 8029 return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_MOVBE); 8030 } 8031 8032 static bool emulator_guest_has_fxsr(struct x86_emulate_ctxt *ctxt) 8033 { 8034 return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_FXSR); 8035 } 8036 8037 static bool emulator_guest_has_rdpid(struct x86_emulate_ctxt *ctxt) 8038 { 8039 return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_RDPID); 8040 } 8041 8042 static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg) 8043 { 8044 return kvm_register_read_raw(emul_to_vcpu(ctxt), reg); 8045 } 8046 8047 static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val) 8048 { 8049 kvm_register_write_raw(emul_to_vcpu(ctxt), reg, val); 8050 } 8051 8052 static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked) 8053 { 8054 static_call(kvm_x86_set_nmi_mask)(emul_to_vcpu(ctxt), masked); 8055 } 8056 8057 static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt) 8058 { 8059 return emul_to_vcpu(ctxt)->arch.hflags; 8060 } 8061 8062 static void emulator_exiting_smm(struct x86_emulate_ctxt *ctxt) 8063 { 8064 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 8065 8066 kvm_smm_changed(vcpu, false); 8067 } 8068 8069 static int emulator_leave_smm(struct x86_emulate_ctxt *ctxt, 8070 const char *smstate) 8071 { 8072 return static_call(kvm_x86_leave_smm)(emul_to_vcpu(ctxt), smstate); 8073 } 8074 8075 static void emulator_triple_fault(struct x86_emulate_ctxt *ctxt) 8076 { 8077 kvm_make_request(KVM_REQ_TRIPLE_FAULT, emul_to_vcpu(ctxt)); 8078 } 8079 8080 static int emulator_set_xcr(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr) 8081 { 8082 return __kvm_set_xcr(emul_to_vcpu(ctxt), index, xcr); 8083 } 8084 8085 static void emulator_vm_bugged(struct x86_emulate_ctxt *ctxt) 8086 { 8087 struct kvm *kvm = emul_to_vcpu(ctxt)->kvm; 8088 8089 if (!kvm->vm_bugged) 8090 kvm_vm_bugged(kvm); 8091 } 8092 8093 static const struct x86_emulate_ops emulate_ops = { 8094 .vm_bugged = emulator_vm_bugged, 8095 .read_gpr = emulator_read_gpr, 8096 .write_gpr = emulator_write_gpr, 8097 .read_std = emulator_read_std, 8098 .write_std = emulator_write_std, 8099 .read_phys = kvm_read_guest_phys_system, 8100 .fetch = kvm_fetch_guest_virt, 8101 .read_emulated = emulator_read_emulated, 8102 .write_emulated = emulator_write_emulated, 8103 .cmpxchg_emulated = emulator_cmpxchg_emulated, 8104 .invlpg = emulator_invlpg, 8105 .pio_in_emulated = emulator_pio_in_emulated, 8106 .pio_out_emulated = emulator_pio_out_emulated, 8107 .get_segment = emulator_get_segment, 8108 .set_segment = emulator_set_segment, 8109 .get_cached_segment_base = emulator_get_cached_segment_base, 8110 .get_gdt = emulator_get_gdt, 8111 .get_idt = emulator_get_idt, 8112 .set_gdt = emulator_set_gdt, 8113 .set_idt = emulator_set_idt, 8114 .get_cr = emulator_get_cr, 8115 .set_cr = emulator_set_cr, 8116 .cpl = emulator_get_cpl, 8117 .get_dr = emulator_get_dr, 8118 .set_dr = emulator_set_dr, 8119 .get_smbase = emulator_get_smbase, 8120 .set_smbase = emulator_set_smbase, 8121 .set_msr_with_filter = emulator_set_msr_with_filter, 8122 .get_msr_with_filter = emulator_get_msr_with_filter, 8123 .set_msr = emulator_set_msr, 8124 .get_msr = emulator_get_msr, 8125 .check_pmc = emulator_check_pmc, 8126 .read_pmc = emulator_read_pmc, 8127 .halt = emulator_halt, 8128 .wbinvd = emulator_wbinvd, 8129 .fix_hypercall = emulator_fix_hypercall, 8130 .intercept = emulator_intercept, 8131 .get_cpuid = emulator_get_cpuid, 8132 .guest_has_long_mode = emulator_guest_has_long_mode, 8133 .guest_has_movbe = emulator_guest_has_movbe, 8134 .guest_has_fxsr = emulator_guest_has_fxsr, 8135 .guest_has_rdpid = emulator_guest_has_rdpid, 8136 .set_nmi_mask = emulator_set_nmi_mask, 8137 .get_hflags = emulator_get_hflags, 8138 .exiting_smm = emulator_exiting_smm, 8139 .leave_smm = emulator_leave_smm, 8140 .triple_fault = emulator_triple_fault, 8141 .set_xcr = emulator_set_xcr, 8142 }; 8143 8144 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) 8145 { 8146 u32 int_shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu); 8147 /* 8148 * an sti; sti; sequence only disable interrupts for the first 8149 * instruction. So, if the last instruction, be it emulated or 8150 * not, left the system with the INT_STI flag enabled, it 8151 * means that the last instruction is an sti. We should not 8152 * leave the flag on in this case. The same goes for mov ss 8153 */ 8154 if (int_shadow & mask) 8155 mask = 0; 8156 if (unlikely(int_shadow || mask)) { 8157 static_call(kvm_x86_set_interrupt_shadow)(vcpu, mask); 8158 if (!mask) 8159 kvm_make_request(KVM_REQ_EVENT, vcpu); 8160 } 8161 } 8162 8163 static bool inject_emulated_exception(struct kvm_vcpu *vcpu) 8164 { 8165 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 8166 if (ctxt->exception.vector == PF_VECTOR) 8167 return kvm_inject_emulated_page_fault(vcpu, &ctxt->exception); 8168 8169 if (ctxt->exception.error_code_valid) 8170 kvm_queue_exception_e(vcpu, ctxt->exception.vector, 8171 ctxt->exception.error_code); 8172 else 8173 kvm_queue_exception(vcpu, ctxt->exception.vector); 8174 return false; 8175 } 8176 8177 static struct x86_emulate_ctxt *alloc_emulate_ctxt(struct kvm_vcpu *vcpu) 8178 { 8179 struct x86_emulate_ctxt *ctxt; 8180 8181 ctxt = kmem_cache_zalloc(x86_emulator_cache, GFP_KERNEL_ACCOUNT); 8182 if (!ctxt) { 8183 pr_err("kvm: failed to allocate vcpu's emulator\n"); 8184 return NULL; 8185 } 8186 8187 ctxt->vcpu = vcpu; 8188 ctxt->ops = &emulate_ops; 8189 vcpu->arch.emulate_ctxt = ctxt; 8190 8191 return ctxt; 8192 } 8193 8194 static void init_emulate_ctxt(struct kvm_vcpu *vcpu) 8195 { 8196 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 8197 int cs_db, cs_l; 8198 8199 static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l); 8200 8201 ctxt->gpa_available = false; 8202 ctxt->eflags = kvm_get_rflags(vcpu); 8203 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0; 8204 8205 ctxt->eip = kvm_rip_read(vcpu); 8206 ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : 8207 (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 : 8208 (cs_l && is_long_mode(vcpu)) ? X86EMUL_MODE_PROT64 : 8209 cs_db ? X86EMUL_MODE_PROT32 : 8210 X86EMUL_MODE_PROT16; 8211 BUILD_BUG_ON(HF_GUEST_MASK != X86EMUL_GUEST_MASK); 8212 BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK); 8213 BUILD_BUG_ON(HF_SMM_INSIDE_NMI_MASK != X86EMUL_SMM_INSIDE_NMI_MASK); 8214 8215 ctxt->interruptibility = 0; 8216 ctxt->have_exception = false; 8217 ctxt->exception.vector = -1; 8218 ctxt->perm_ok = false; 8219 8220 init_decode_cache(ctxt); 8221 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; 8222 } 8223 8224 void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip) 8225 { 8226 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 8227 int ret; 8228 8229 init_emulate_ctxt(vcpu); 8230 8231 ctxt->op_bytes = 2; 8232 ctxt->ad_bytes = 2; 8233 ctxt->_eip = ctxt->eip + inc_eip; 8234 ret = emulate_int_real(ctxt, irq); 8235 8236 if (ret != X86EMUL_CONTINUE) { 8237 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 8238 } else { 8239 ctxt->eip = ctxt->_eip; 8240 kvm_rip_write(vcpu, ctxt->eip); 8241 kvm_set_rflags(vcpu, ctxt->eflags); 8242 } 8243 } 8244 EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt); 8245 8246 static void prepare_emulation_failure_exit(struct kvm_vcpu *vcpu, u64 *data, 8247 u8 ndata, u8 *insn_bytes, u8 insn_size) 8248 { 8249 struct kvm_run *run = vcpu->run; 8250 u64 info[5]; 8251 u8 info_start; 8252 8253 /* 8254 * Zero the whole array used to retrieve the exit info, as casting to 8255 * u32 for select entries will leave some chunks uninitialized. 8256 */ 8257 memset(&info, 0, sizeof(info)); 8258 8259 static_call(kvm_x86_get_exit_info)(vcpu, (u32 *)&info[0], &info[1], 8260 &info[2], (u32 *)&info[3], 8261 (u32 *)&info[4]); 8262 8263 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 8264 run->emulation_failure.suberror = KVM_INTERNAL_ERROR_EMULATION; 8265 8266 /* 8267 * There's currently space for 13 entries, but 5 are used for the exit 8268 * reason and info. Restrict to 4 to reduce the maintenance burden 8269 * when expanding kvm_run.emulation_failure in the future. 8270 */ 8271 if (WARN_ON_ONCE(ndata > 4)) 8272 ndata = 4; 8273 8274 /* Always include the flags as a 'data' entry. */ 8275 info_start = 1; 8276 run->emulation_failure.flags = 0; 8277 8278 if (insn_size) { 8279 BUILD_BUG_ON((sizeof(run->emulation_failure.insn_size) + 8280 sizeof(run->emulation_failure.insn_bytes) != 16)); 8281 info_start += 2; 8282 run->emulation_failure.flags |= 8283 KVM_INTERNAL_ERROR_EMULATION_FLAG_INSTRUCTION_BYTES; 8284 run->emulation_failure.insn_size = insn_size; 8285 memset(run->emulation_failure.insn_bytes, 0x90, 8286 sizeof(run->emulation_failure.insn_bytes)); 8287 memcpy(run->emulation_failure.insn_bytes, insn_bytes, insn_size); 8288 } 8289 8290 memcpy(&run->internal.data[info_start], info, sizeof(info)); 8291 memcpy(&run->internal.data[info_start + ARRAY_SIZE(info)], data, 8292 ndata * sizeof(data[0])); 8293 8294 run->emulation_failure.ndata = info_start + ARRAY_SIZE(info) + ndata; 8295 } 8296 8297 static void prepare_emulation_ctxt_failure_exit(struct kvm_vcpu *vcpu) 8298 { 8299 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 8300 8301 prepare_emulation_failure_exit(vcpu, NULL, 0, ctxt->fetch.data, 8302 ctxt->fetch.end - ctxt->fetch.data); 8303 } 8304 8305 void __kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu, u64 *data, 8306 u8 ndata) 8307 { 8308 prepare_emulation_failure_exit(vcpu, data, ndata, NULL, 0); 8309 } 8310 EXPORT_SYMBOL_GPL(__kvm_prepare_emulation_failure_exit); 8311 8312 void kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu) 8313 { 8314 __kvm_prepare_emulation_failure_exit(vcpu, NULL, 0); 8315 } 8316 EXPORT_SYMBOL_GPL(kvm_prepare_emulation_failure_exit); 8317 8318 static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type) 8319 { 8320 struct kvm *kvm = vcpu->kvm; 8321 8322 ++vcpu->stat.insn_emulation_fail; 8323 trace_kvm_emulate_insn_failed(vcpu); 8324 8325 if (emulation_type & EMULTYPE_VMWARE_GP) { 8326 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); 8327 return 1; 8328 } 8329 8330 if (kvm->arch.exit_on_emulation_error || 8331 (emulation_type & EMULTYPE_SKIP)) { 8332 prepare_emulation_ctxt_failure_exit(vcpu); 8333 return 0; 8334 } 8335 8336 kvm_queue_exception(vcpu, UD_VECTOR); 8337 8338 if (!is_guest_mode(vcpu) && static_call(kvm_x86_get_cpl)(vcpu) == 0) { 8339 prepare_emulation_ctxt_failure_exit(vcpu); 8340 return 0; 8341 } 8342 8343 return 1; 8344 } 8345 8346 static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, 8347 bool write_fault_to_shadow_pgtable, 8348 int emulation_type) 8349 { 8350 gpa_t gpa = cr2_or_gpa; 8351 kvm_pfn_t pfn; 8352 8353 if (!(emulation_type & EMULTYPE_ALLOW_RETRY_PF)) 8354 return false; 8355 8356 if (WARN_ON_ONCE(is_guest_mode(vcpu)) || 8357 WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF))) 8358 return false; 8359 8360 if (!vcpu->arch.mmu->root_role.direct) { 8361 /* 8362 * Write permission should be allowed since only 8363 * write access need to be emulated. 8364 */ 8365 gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL); 8366 8367 /* 8368 * If the mapping is invalid in guest, let cpu retry 8369 * it to generate fault. 8370 */ 8371 if (gpa == INVALID_GPA) 8372 return true; 8373 } 8374 8375 /* 8376 * Do not retry the unhandleable instruction if it faults on the 8377 * readonly host memory, otherwise it will goto a infinite loop: 8378 * retry instruction -> write #PF -> emulation fail -> retry 8379 * instruction -> ... 8380 */ 8381 pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa)); 8382 8383 /* 8384 * If the instruction failed on the error pfn, it can not be fixed, 8385 * report the error to userspace. 8386 */ 8387 if (is_error_noslot_pfn(pfn)) 8388 return false; 8389 8390 kvm_release_pfn_clean(pfn); 8391 8392 /* The instructions are well-emulated on direct mmu. */ 8393 if (vcpu->arch.mmu->root_role.direct) { 8394 unsigned int indirect_shadow_pages; 8395 8396 write_lock(&vcpu->kvm->mmu_lock); 8397 indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages; 8398 write_unlock(&vcpu->kvm->mmu_lock); 8399 8400 if (indirect_shadow_pages) 8401 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); 8402 8403 return true; 8404 } 8405 8406 /* 8407 * if emulation was due to access to shadowed page table 8408 * and it failed try to unshadow page and re-enter the 8409 * guest to let CPU execute the instruction. 8410 */ 8411 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); 8412 8413 /* 8414 * If the access faults on its page table, it can not 8415 * be fixed by unprotecting shadow page and it should 8416 * be reported to userspace. 8417 */ 8418 return !write_fault_to_shadow_pgtable; 8419 } 8420 8421 static bool retry_instruction(struct x86_emulate_ctxt *ctxt, 8422 gpa_t cr2_or_gpa, int emulation_type) 8423 { 8424 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 8425 unsigned long last_retry_eip, last_retry_addr, gpa = cr2_or_gpa; 8426 8427 last_retry_eip = vcpu->arch.last_retry_eip; 8428 last_retry_addr = vcpu->arch.last_retry_addr; 8429 8430 /* 8431 * If the emulation is caused by #PF and it is non-page_table 8432 * writing instruction, it means the VM-EXIT is caused by shadow 8433 * page protected, we can zap the shadow page and retry this 8434 * instruction directly. 8435 * 8436 * Note: if the guest uses a non-page-table modifying instruction 8437 * on the PDE that points to the instruction, then we will unmap 8438 * the instruction and go to an infinite loop. So, we cache the 8439 * last retried eip and the last fault address, if we meet the eip 8440 * and the address again, we can break out of the potential infinite 8441 * loop. 8442 */ 8443 vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0; 8444 8445 if (!(emulation_type & EMULTYPE_ALLOW_RETRY_PF)) 8446 return false; 8447 8448 if (WARN_ON_ONCE(is_guest_mode(vcpu)) || 8449 WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF))) 8450 return false; 8451 8452 if (x86_page_table_writing_insn(ctxt)) 8453 return false; 8454 8455 if (ctxt->eip == last_retry_eip && last_retry_addr == cr2_or_gpa) 8456 return false; 8457 8458 vcpu->arch.last_retry_eip = ctxt->eip; 8459 vcpu->arch.last_retry_addr = cr2_or_gpa; 8460 8461 if (!vcpu->arch.mmu->root_role.direct) 8462 gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL); 8463 8464 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); 8465 8466 return true; 8467 } 8468 8469 static int complete_emulated_mmio(struct kvm_vcpu *vcpu); 8470 static int complete_emulated_pio(struct kvm_vcpu *vcpu); 8471 8472 static void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm) 8473 { 8474 trace_kvm_smm_transition(vcpu->vcpu_id, vcpu->arch.smbase, entering_smm); 8475 8476 if (entering_smm) { 8477 vcpu->arch.hflags |= HF_SMM_MASK; 8478 } else { 8479 vcpu->arch.hflags &= ~(HF_SMM_MASK | HF_SMM_INSIDE_NMI_MASK); 8480 8481 /* Process a latched INIT or SMI, if any. */ 8482 kvm_make_request(KVM_REQ_EVENT, vcpu); 8483 8484 /* 8485 * Even if KVM_SET_SREGS2 loaded PDPTRs out of band, 8486 * on SMM exit we still need to reload them from 8487 * guest memory 8488 */ 8489 vcpu->arch.pdptrs_from_userspace = false; 8490 } 8491 8492 kvm_mmu_reset_context(vcpu); 8493 } 8494 8495 static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7, 8496 unsigned long *db) 8497 { 8498 u32 dr6 = 0; 8499 int i; 8500 u32 enable, rwlen; 8501 8502 enable = dr7; 8503 rwlen = dr7 >> 16; 8504 for (i = 0; i < 4; i++, enable >>= 2, rwlen >>= 4) 8505 if ((enable & 3) && (rwlen & 15) == type && db[i] == addr) 8506 dr6 |= (1 << i); 8507 return dr6; 8508 } 8509 8510 static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu) 8511 { 8512 struct kvm_run *kvm_run = vcpu->run; 8513 8514 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { 8515 kvm_run->debug.arch.dr6 = DR6_BS | DR6_ACTIVE_LOW; 8516 kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu); 8517 kvm_run->debug.arch.exception = DB_VECTOR; 8518 kvm_run->exit_reason = KVM_EXIT_DEBUG; 8519 return 0; 8520 } 8521 kvm_queue_exception_p(vcpu, DB_VECTOR, DR6_BS); 8522 return 1; 8523 } 8524 8525 int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu) 8526 { 8527 unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu); 8528 int r; 8529 8530 r = static_call(kvm_x86_skip_emulated_instruction)(vcpu); 8531 if (unlikely(!r)) 8532 return 0; 8533 8534 kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_INSTRUCTIONS); 8535 8536 /* 8537 * rflags is the old, "raw" value of the flags. The new value has 8538 * not been saved yet. 8539 * 8540 * This is correct even for TF set by the guest, because "the 8541 * processor will not generate this exception after the instruction 8542 * that sets the TF flag". 8543 */ 8544 if (unlikely(rflags & X86_EFLAGS_TF)) 8545 r = kvm_vcpu_do_singlestep(vcpu); 8546 return r; 8547 } 8548 EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction); 8549 8550 static bool kvm_vcpu_check_code_breakpoint(struct kvm_vcpu *vcpu, 8551 int emulation_type, int *r) 8552 { 8553 WARN_ON_ONCE(emulation_type & EMULTYPE_NO_DECODE); 8554 8555 /* 8556 * Do not check for code breakpoints if hardware has already done the 8557 * checks, as inferred from the emulation type. On NO_DECODE and SKIP, 8558 * the instruction has passed all exception checks, and all intercepted 8559 * exceptions that trigger emulation have lower priority than code 8560 * breakpoints, i.e. the fact that the intercepted exception occurred 8561 * means any code breakpoints have already been serviced. 8562 * 8563 * Note, KVM needs to check for code #DBs on EMULTYPE_TRAP_UD_FORCED as 8564 * hardware has checked the RIP of the magic prefix, but not the RIP of 8565 * the instruction being emulated. The intent of forced emulation is 8566 * to behave as if KVM intercepted the instruction without an exception 8567 * and without a prefix. 8568 */ 8569 if (emulation_type & (EMULTYPE_NO_DECODE | EMULTYPE_SKIP | 8570 EMULTYPE_TRAP_UD | EMULTYPE_VMWARE_GP | EMULTYPE_PF)) 8571 return false; 8572 8573 if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) && 8574 (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) { 8575 struct kvm_run *kvm_run = vcpu->run; 8576 unsigned long eip = kvm_get_linear_rip(vcpu); 8577 u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0, 8578 vcpu->arch.guest_debug_dr7, 8579 vcpu->arch.eff_db); 8580 8581 if (dr6 != 0) { 8582 kvm_run->debug.arch.dr6 = dr6 | DR6_ACTIVE_LOW; 8583 kvm_run->debug.arch.pc = eip; 8584 kvm_run->debug.arch.exception = DB_VECTOR; 8585 kvm_run->exit_reason = KVM_EXIT_DEBUG; 8586 *r = 0; 8587 return true; 8588 } 8589 } 8590 8591 if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) && 8592 !(kvm_get_rflags(vcpu) & X86_EFLAGS_RF)) { 8593 unsigned long eip = kvm_get_linear_rip(vcpu); 8594 u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0, 8595 vcpu->arch.dr7, 8596 vcpu->arch.db); 8597 8598 if (dr6 != 0) { 8599 kvm_queue_exception_p(vcpu, DB_VECTOR, dr6); 8600 *r = 1; 8601 return true; 8602 } 8603 } 8604 8605 return false; 8606 } 8607 8608 static bool is_vmware_backdoor_opcode(struct x86_emulate_ctxt *ctxt) 8609 { 8610 switch (ctxt->opcode_len) { 8611 case 1: 8612 switch (ctxt->b) { 8613 case 0xe4: /* IN */ 8614 case 0xe5: 8615 case 0xec: 8616 case 0xed: 8617 case 0xe6: /* OUT */ 8618 case 0xe7: 8619 case 0xee: 8620 case 0xef: 8621 case 0x6c: /* INS */ 8622 case 0x6d: 8623 case 0x6e: /* OUTS */ 8624 case 0x6f: 8625 return true; 8626 } 8627 break; 8628 case 2: 8629 switch (ctxt->b) { 8630 case 0x33: /* RDPMC */ 8631 return true; 8632 } 8633 break; 8634 } 8635 8636 return false; 8637 } 8638 8639 /* 8640 * Decode an instruction for emulation. The caller is responsible for handling 8641 * code breakpoints. Note, manually detecting code breakpoints is unnecessary 8642 * (and wrong) when emulating on an intercepted fault-like exception[*], as 8643 * code breakpoints have higher priority and thus have already been done by 8644 * hardware. 8645 * 8646 * [*] Except #MC, which is higher priority, but KVM should never emulate in 8647 * response to a machine check. 8648 */ 8649 int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type, 8650 void *insn, int insn_len) 8651 { 8652 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 8653 int r; 8654 8655 init_emulate_ctxt(vcpu); 8656 8657 r = x86_decode_insn(ctxt, insn, insn_len, emulation_type); 8658 8659 trace_kvm_emulate_insn_start(vcpu); 8660 ++vcpu->stat.insn_emulation; 8661 8662 return r; 8663 } 8664 EXPORT_SYMBOL_GPL(x86_decode_emulated_instruction); 8665 8666 int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, 8667 int emulation_type, void *insn, int insn_len) 8668 { 8669 int r; 8670 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 8671 bool writeback = true; 8672 bool write_fault_to_spt; 8673 8674 if (unlikely(!kvm_can_emulate_insn(vcpu, emulation_type, insn, insn_len))) 8675 return 1; 8676 8677 vcpu->arch.l1tf_flush_l1d = true; 8678 8679 /* 8680 * Clear write_fault_to_shadow_pgtable here to ensure it is 8681 * never reused. 8682 */ 8683 write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable; 8684 vcpu->arch.write_fault_to_shadow_pgtable = false; 8685 8686 if (!(emulation_type & EMULTYPE_NO_DECODE)) { 8687 kvm_clear_exception_queue(vcpu); 8688 8689 /* 8690 * Return immediately if RIP hits a code breakpoint, such #DBs 8691 * are fault-like and are higher priority than any faults on 8692 * the code fetch itself. 8693 */ 8694 if (kvm_vcpu_check_code_breakpoint(vcpu, emulation_type, &r)) 8695 return r; 8696 8697 r = x86_decode_emulated_instruction(vcpu, emulation_type, 8698 insn, insn_len); 8699 if (r != EMULATION_OK) { 8700 if ((emulation_type & EMULTYPE_TRAP_UD) || 8701 (emulation_type & EMULTYPE_TRAP_UD_FORCED)) { 8702 kvm_queue_exception(vcpu, UD_VECTOR); 8703 return 1; 8704 } 8705 if (reexecute_instruction(vcpu, cr2_or_gpa, 8706 write_fault_to_spt, 8707 emulation_type)) 8708 return 1; 8709 if (ctxt->have_exception) { 8710 /* 8711 * #UD should result in just EMULATION_FAILED, and trap-like 8712 * exception should not be encountered during decode. 8713 */ 8714 WARN_ON_ONCE(ctxt->exception.vector == UD_VECTOR || 8715 exception_type(ctxt->exception.vector) == EXCPT_TRAP); 8716 inject_emulated_exception(vcpu); 8717 return 1; 8718 } 8719 return handle_emulation_failure(vcpu, emulation_type); 8720 } 8721 } 8722 8723 if ((emulation_type & EMULTYPE_VMWARE_GP) && 8724 !is_vmware_backdoor_opcode(ctxt)) { 8725 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); 8726 return 1; 8727 } 8728 8729 /* 8730 * EMULTYPE_SKIP without EMULTYPE_COMPLETE_USER_EXIT is intended for 8731 * use *only* by vendor callbacks for kvm_skip_emulated_instruction(). 8732 * The caller is responsible for updating interruptibility state and 8733 * injecting single-step #DBs. 8734 */ 8735 if (emulation_type & EMULTYPE_SKIP) { 8736 if (ctxt->mode != X86EMUL_MODE_PROT64) 8737 ctxt->eip = (u32)ctxt->_eip; 8738 else 8739 ctxt->eip = ctxt->_eip; 8740 8741 if (emulation_type & EMULTYPE_COMPLETE_USER_EXIT) { 8742 r = 1; 8743 goto writeback; 8744 } 8745 8746 kvm_rip_write(vcpu, ctxt->eip); 8747 if (ctxt->eflags & X86_EFLAGS_RF) 8748 kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF); 8749 return 1; 8750 } 8751 8752 if (retry_instruction(ctxt, cr2_or_gpa, emulation_type)) 8753 return 1; 8754 8755 /* this is needed for vmware backdoor interface to work since it 8756 changes registers values during IO operation */ 8757 if (vcpu->arch.emulate_regs_need_sync_from_vcpu) { 8758 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; 8759 emulator_invalidate_register_cache(ctxt); 8760 } 8761 8762 restart: 8763 if (emulation_type & EMULTYPE_PF) { 8764 /* Save the faulting GPA (cr2) in the address field */ 8765 ctxt->exception.address = cr2_or_gpa; 8766 8767 /* With shadow page tables, cr2 contains a GVA or nGPA. */ 8768 if (vcpu->arch.mmu->root_role.direct) { 8769 ctxt->gpa_available = true; 8770 ctxt->gpa_val = cr2_or_gpa; 8771 } 8772 } else { 8773 /* Sanitize the address out of an abundance of paranoia. */ 8774 ctxt->exception.address = 0; 8775 } 8776 8777 r = x86_emulate_insn(ctxt); 8778 8779 if (r == EMULATION_INTERCEPTED) 8780 return 1; 8781 8782 if (r == EMULATION_FAILED) { 8783 if (reexecute_instruction(vcpu, cr2_or_gpa, write_fault_to_spt, 8784 emulation_type)) 8785 return 1; 8786 8787 return handle_emulation_failure(vcpu, emulation_type); 8788 } 8789 8790 if (ctxt->have_exception) { 8791 r = 1; 8792 if (inject_emulated_exception(vcpu)) 8793 return r; 8794 } else if (vcpu->arch.pio.count) { 8795 if (!vcpu->arch.pio.in) { 8796 /* FIXME: return into emulator if single-stepping. */ 8797 vcpu->arch.pio.count = 0; 8798 } else { 8799 writeback = false; 8800 vcpu->arch.complete_userspace_io = complete_emulated_pio; 8801 } 8802 r = 0; 8803 } else if (vcpu->mmio_needed) { 8804 ++vcpu->stat.mmio_exits; 8805 8806 if (!vcpu->mmio_is_write) 8807 writeback = false; 8808 r = 0; 8809 vcpu->arch.complete_userspace_io = complete_emulated_mmio; 8810 } else if (vcpu->arch.complete_userspace_io) { 8811 writeback = false; 8812 r = 0; 8813 } else if (r == EMULATION_RESTART) 8814 goto restart; 8815 else 8816 r = 1; 8817 8818 writeback: 8819 if (writeback) { 8820 unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu); 8821 toggle_interruptibility(vcpu, ctxt->interruptibility); 8822 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; 8823 if (!ctxt->have_exception || 8824 exception_type(ctxt->exception.vector) == EXCPT_TRAP) { 8825 kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_INSTRUCTIONS); 8826 if (ctxt->is_branch) 8827 kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_BRANCH_INSTRUCTIONS); 8828 kvm_rip_write(vcpu, ctxt->eip); 8829 if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP))) 8830 r = kvm_vcpu_do_singlestep(vcpu); 8831 static_call_cond(kvm_x86_update_emulated_instruction)(vcpu); 8832 __kvm_set_rflags(vcpu, ctxt->eflags); 8833 } 8834 8835 /* 8836 * For STI, interrupts are shadowed; so KVM_REQ_EVENT will 8837 * do nothing, and it will be requested again as soon as 8838 * the shadow expires. But we still need to check here, 8839 * because POPF has no interrupt shadow. 8840 */ 8841 if (unlikely((ctxt->eflags & ~rflags) & X86_EFLAGS_IF)) 8842 kvm_make_request(KVM_REQ_EVENT, vcpu); 8843 } else 8844 vcpu->arch.emulate_regs_need_sync_to_vcpu = true; 8845 8846 return r; 8847 } 8848 8849 int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type) 8850 { 8851 return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0); 8852 } 8853 EXPORT_SYMBOL_GPL(kvm_emulate_instruction); 8854 8855 int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu, 8856 void *insn, int insn_len) 8857 { 8858 return x86_emulate_instruction(vcpu, 0, 0, insn, insn_len); 8859 } 8860 EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer); 8861 8862 static int complete_fast_pio_out_port_0x7e(struct kvm_vcpu *vcpu) 8863 { 8864 vcpu->arch.pio.count = 0; 8865 return 1; 8866 } 8867 8868 static int complete_fast_pio_out(struct kvm_vcpu *vcpu) 8869 { 8870 vcpu->arch.pio.count = 0; 8871 8872 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) 8873 return 1; 8874 8875 return kvm_skip_emulated_instruction(vcpu); 8876 } 8877 8878 static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, 8879 unsigned short port) 8880 { 8881 unsigned long val = kvm_rax_read(vcpu); 8882 int ret = emulator_pio_out(vcpu, size, port, &val, 1); 8883 8884 if (ret) 8885 return ret; 8886 8887 /* 8888 * Workaround userspace that relies on old KVM behavior of %rip being 8889 * incremented prior to exiting to userspace to handle "OUT 0x7e". 8890 */ 8891 if (port == 0x7e && 8892 kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_OUT_7E_INC_RIP)) { 8893 vcpu->arch.complete_userspace_io = 8894 complete_fast_pio_out_port_0x7e; 8895 kvm_skip_emulated_instruction(vcpu); 8896 } else { 8897 vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu); 8898 vcpu->arch.complete_userspace_io = complete_fast_pio_out; 8899 } 8900 return 0; 8901 } 8902 8903 static int complete_fast_pio_in(struct kvm_vcpu *vcpu) 8904 { 8905 unsigned long val; 8906 8907 /* We should only ever be called with arch.pio.count equal to 1 */ 8908 BUG_ON(vcpu->arch.pio.count != 1); 8909 8910 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) { 8911 vcpu->arch.pio.count = 0; 8912 return 1; 8913 } 8914 8915 /* For size less than 4 we merge, else we zero extend */ 8916 val = (vcpu->arch.pio.size < 4) ? kvm_rax_read(vcpu) : 0; 8917 8918 complete_emulator_pio_in(vcpu, &val); 8919 kvm_rax_write(vcpu, val); 8920 8921 return kvm_skip_emulated_instruction(vcpu); 8922 } 8923 8924 static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size, 8925 unsigned short port) 8926 { 8927 unsigned long val; 8928 int ret; 8929 8930 /* For size less than 4 we merge, else we zero extend */ 8931 val = (size < 4) ? kvm_rax_read(vcpu) : 0; 8932 8933 ret = emulator_pio_in(vcpu, size, port, &val, 1); 8934 if (ret) { 8935 kvm_rax_write(vcpu, val); 8936 return ret; 8937 } 8938 8939 vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu); 8940 vcpu->arch.complete_userspace_io = complete_fast_pio_in; 8941 8942 return 0; 8943 } 8944 8945 int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in) 8946 { 8947 int ret; 8948 8949 if (in) 8950 ret = kvm_fast_pio_in(vcpu, size, port); 8951 else 8952 ret = kvm_fast_pio_out(vcpu, size, port); 8953 return ret && kvm_skip_emulated_instruction(vcpu); 8954 } 8955 EXPORT_SYMBOL_GPL(kvm_fast_pio); 8956 8957 static int kvmclock_cpu_down_prep(unsigned int cpu) 8958 { 8959 __this_cpu_write(cpu_tsc_khz, 0); 8960 return 0; 8961 } 8962 8963 static void tsc_khz_changed(void *data) 8964 { 8965 struct cpufreq_freqs *freq = data; 8966 unsigned long khz = 0; 8967 8968 if (data) 8969 khz = freq->new; 8970 else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) 8971 khz = cpufreq_quick_get(raw_smp_processor_id()); 8972 if (!khz) 8973 khz = tsc_khz; 8974 __this_cpu_write(cpu_tsc_khz, khz); 8975 } 8976 8977 #ifdef CONFIG_X86_64 8978 static void kvm_hyperv_tsc_notifier(void) 8979 { 8980 struct kvm *kvm; 8981 int cpu; 8982 8983 mutex_lock(&kvm_lock); 8984 list_for_each_entry(kvm, &vm_list, vm_list) 8985 kvm_make_mclock_inprogress_request(kvm); 8986 8987 /* no guest entries from this point */ 8988 hyperv_stop_tsc_emulation(); 8989 8990 /* TSC frequency always matches when on Hyper-V */ 8991 for_each_present_cpu(cpu) 8992 per_cpu(cpu_tsc_khz, cpu) = tsc_khz; 8993 kvm_caps.max_guest_tsc_khz = tsc_khz; 8994 8995 list_for_each_entry(kvm, &vm_list, vm_list) { 8996 __kvm_start_pvclock_update(kvm); 8997 pvclock_update_vm_gtod_copy(kvm); 8998 kvm_end_pvclock_update(kvm); 8999 } 9000 9001 mutex_unlock(&kvm_lock); 9002 } 9003 #endif 9004 9005 static void __kvmclock_cpufreq_notifier(struct cpufreq_freqs *freq, int cpu) 9006 { 9007 struct kvm *kvm; 9008 struct kvm_vcpu *vcpu; 9009 int send_ipi = 0; 9010 unsigned long i; 9011 9012 /* 9013 * We allow guests to temporarily run on slowing clocks, 9014 * provided we notify them after, or to run on accelerating 9015 * clocks, provided we notify them before. Thus time never 9016 * goes backwards. 9017 * 9018 * However, we have a problem. We can't atomically update 9019 * the frequency of a given CPU from this function; it is 9020 * merely a notifier, which can be called from any CPU. 9021 * Changing the TSC frequency at arbitrary points in time 9022 * requires a recomputation of local variables related to 9023 * the TSC for each VCPU. We must flag these local variables 9024 * to be updated and be sure the update takes place with the 9025 * new frequency before any guests proceed. 9026 * 9027 * Unfortunately, the combination of hotplug CPU and frequency 9028 * change creates an intractable locking scenario; the order 9029 * of when these callouts happen is undefined with respect to 9030 * CPU hotplug, and they can race with each other. As such, 9031 * merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is 9032 * undefined; you can actually have a CPU frequency change take 9033 * place in between the computation of X and the setting of the 9034 * variable. To protect against this problem, all updates of 9035 * the per_cpu tsc_khz variable are done in an interrupt 9036 * protected IPI, and all callers wishing to update the value 9037 * must wait for a synchronous IPI to complete (which is trivial 9038 * if the caller is on the CPU already). This establishes the 9039 * necessary total order on variable updates. 9040 * 9041 * Note that because a guest time update may take place 9042 * anytime after the setting of the VCPU's request bit, the 9043 * correct TSC value must be set before the request. However, 9044 * to ensure the update actually makes it to any guest which 9045 * starts running in hardware virtualization between the set 9046 * and the acquisition of the spinlock, we must also ping the 9047 * CPU after setting the request bit. 9048 * 9049 */ 9050 9051 smp_call_function_single(cpu, tsc_khz_changed, freq, 1); 9052 9053 mutex_lock(&kvm_lock); 9054 list_for_each_entry(kvm, &vm_list, vm_list) { 9055 kvm_for_each_vcpu(i, vcpu, kvm) { 9056 if (vcpu->cpu != cpu) 9057 continue; 9058 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 9059 if (vcpu->cpu != raw_smp_processor_id()) 9060 send_ipi = 1; 9061 } 9062 } 9063 mutex_unlock(&kvm_lock); 9064 9065 if (freq->old < freq->new && send_ipi) { 9066 /* 9067 * We upscale the frequency. Must make the guest 9068 * doesn't see old kvmclock values while running with 9069 * the new frequency, otherwise we risk the guest sees 9070 * time go backwards. 9071 * 9072 * In case we update the frequency for another cpu 9073 * (which might be in guest context) send an interrupt 9074 * to kick the cpu out of guest context. Next time 9075 * guest context is entered kvmclock will be updated, 9076 * so the guest will not see stale values. 9077 */ 9078 smp_call_function_single(cpu, tsc_khz_changed, freq, 1); 9079 } 9080 } 9081 9082 static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val, 9083 void *data) 9084 { 9085 struct cpufreq_freqs *freq = data; 9086 int cpu; 9087 9088 if (val == CPUFREQ_PRECHANGE && freq->old > freq->new) 9089 return 0; 9090 if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new) 9091 return 0; 9092 9093 for_each_cpu(cpu, freq->policy->cpus) 9094 __kvmclock_cpufreq_notifier(freq, cpu); 9095 9096 return 0; 9097 } 9098 9099 static struct notifier_block kvmclock_cpufreq_notifier_block = { 9100 .notifier_call = kvmclock_cpufreq_notifier 9101 }; 9102 9103 static int kvmclock_cpu_online(unsigned int cpu) 9104 { 9105 tsc_khz_changed(NULL); 9106 return 0; 9107 } 9108 9109 static void kvm_timer_init(void) 9110 { 9111 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { 9112 max_tsc_khz = tsc_khz; 9113 9114 if (IS_ENABLED(CONFIG_CPU_FREQ)) { 9115 struct cpufreq_policy *policy; 9116 int cpu; 9117 9118 cpu = get_cpu(); 9119 policy = cpufreq_cpu_get(cpu); 9120 if (policy) { 9121 if (policy->cpuinfo.max_freq) 9122 max_tsc_khz = policy->cpuinfo.max_freq; 9123 cpufreq_cpu_put(policy); 9124 } 9125 put_cpu(); 9126 } 9127 cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block, 9128 CPUFREQ_TRANSITION_NOTIFIER); 9129 } 9130 9131 cpuhp_setup_state(CPUHP_AP_X86_KVM_CLK_ONLINE, "x86/kvm/clk:online", 9132 kvmclock_cpu_online, kvmclock_cpu_down_prep); 9133 } 9134 9135 #ifdef CONFIG_X86_64 9136 static void pvclock_gtod_update_fn(struct work_struct *work) 9137 { 9138 struct kvm *kvm; 9139 struct kvm_vcpu *vcpu; 9140 unsigned long i; 9141 9142 mutex_lock(&kvm_lock); 9143 list_for_each_entry(kvm, &vm_list, vm_list) 9144 kvm_for_each_vcpu(i, vcpu, kvm) 9145 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); 9146 atomic_set(&kvm_guest_has_master_clock, 0); 9147 mutex_unlock(&kvm_lock); 9148 } 9149 9150 static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn); 9151 9152 /* 9153 * Indirection to move queue_work() out of the tk_core.seq write held 9154 * region to prevent possible deadlocks against time accessors which 9155 * are invoked with work related locks held. 9156 */ 9157 static void pvclock_irq_work_fn(struct irq_work *w) 9158 { 9159 queue_work(system_long_wq, &pvclock_gtod_work); 9160 } 9161 9162 static DEFINE_IRQ_WORK(pvclock_irq_work, pvclock_irq_work_fn); 9163 9164 /* 9165 * Notification about pvclock gtod data update. 9166 */ 9167 static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused, 9168 void *priv) 9169 { 9170 struct pvclock_gtod_data *gtod = &pvclock_gtod_data; 9171 struct timekeeper *tk = priv; 9172 9173 update_pvclock_gtod(tk); 9174 9175 /* 9176 * Disable master clock if host does not trust, or does not use, 9177 * TSC based clocksource. Delegate queue_work() to irq_work as 9178 * this is invoked with tk_core.seq write held. 9179 */ 9180 if (!gtod_is_based_on_tsc(gtod->clock.vclock_mode) && 9181 atomic_read(&kvm_guest_has_master_clock) != 0) 9182 irq_work_queue(&pvclock_irq_work); 9183 return 0; 9184 } 9185 9186 static struct notifier_block pvclock_gtod_notifier = { 9187 .notifier_call = pvclock_gtod_notify, 9188 }; 9189 #endif 9190 9191 int kvm_arch_init(void *opaque) 9192 { 9193 struct kvm_x86_init_ops *ops = opaque; 9194 u64 host_pat; 9195 int r; 9196 9197 if (kvm_x86_ops.hardware_enable) { 9198 pr_err("kvm: already loaded vendor module '%s'\n", kvm_x86_ops.name); 9199 return -EEXIST; 9200 } 9201 9202 if (!ops->cpu_has_kvm_support()) { 9203 pr_err_ratelimited("kvm: no hardware support for '%s'\n", 9204 ops->runtime_ops->name); 9205 return -EOPNOTSUPP; 9206 } 9207 if (ops->disabled_by_bios()) { 9208 pr_err_ratelimited("kvm: support for '%s' disabled by bios\n", 9209 ops->runtime_ops->name); 9210 return -EOPNOTSUPP; 9211 } 9212 9213 /* 9214 * KVM explicitly assumes that the guest has an FPU and 9215 * FXSAVE/FXRSTOR. For example, the KVM_GET_FPU explicitly casts the 9216 * vCPU's FPU state as a fxregs_state struct. 9217 */ 9218 if (!boot_cpu_has(X86_FEATURE_FPU) || !boot_cpu_has(X86_FEATURE_FXSR)) { 9219 printk(KERN_ERR "kvm: inadequate fpu\n"); 9220 return -EOPNOTSUPP; 9221 } 9222 9223 if (IS_ENABLED(CONFIG_PREEMPT_RT) && !boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { 9224 pr_err("RT requires X86_FEATURE_CONSTANT_TSC\n"); 9225 return -EOPNOTSUPP; 9226 } 9227 9228 /* 9229 * KVM assumes that PAT entry '0' encodes WB memtype and simply zeroes 9230 * the PAT bits in SPTEs. Bail if PAT[0] is programmed to something 9231 * other than WB. Note, EPT doesn't utilize the PAT, but don't bother 9232 * with an exception. PAT[0] is set to WB on RESET and also by the 9233 * kernel, i.e. failure indicates a kernel bug or broken firmware. 9234 */ 9235 if (rdmsrl_safe(MSR_IA32_CR_PAT, &host_pat) || 9236 (host_pat & GENMASK(2, 0)) != 6) { 9237 pr_err("kvm: host PAT[0] is not WB\n"); 9238 return -EIO; 9239 } 9240 9241 x86_emulator_cache = kvm_alloc_emulator_cache(); 9242 if (!x86_emulator_cache) { 9243 pr_err("kvm: failed to allocate cache for x86 emulator\n"); 9244 return -ENOMEM; 9245 } 9246 9247 user_return_msrs = alloc_percpu(struct kvm_user_return_msrs); 9248 if (!user_return_msrs) { 9249 printk(KERN_ERR "kvm: failed to allocate percpu kvm_user_return_msrs\n"); 9250 r = -ENOMEM; 9251 goto out_free_x86_emulator_cache; 9252 } 9253 kvm_nr_uret_msrs = 0; 9254 9255 r = kvm_mmu_vendor_module_init(); 9256 if (r) 9257 goto out_free_percpu; 9258 9259 kvm_timer_init(); 9260 9261 if (boot_cpu_has(X86_FEATURE_XSAVE)) { 9262 host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK); 9263 kvm_caps.supported_xcr0 = host_xcr0 & KVM_SUPPORTED_XCR0; 9264 } 9265 9266 if (pi_inject_timer == -1) 9267 pi_inject_timer = housekeeping_enabled(HK_TYPE_TIMER); 9268 #ifdef CONFIG_X86_64 9269 pvclock_gtod_register_notifier(&pvclock_gtod_notifier); 9270 9271 if (hypervisor_is_type(X86_HYPER_MS_HYPERV)) 9272 set_hv_tscchange_cb(kvm_hyperv_tsc_notifier); 9273 #endif 9274 9275 return 0; 9276 9277 out_free_percpu: 9278 free_percpu(user_return_msrs); 9279 out_free_x86_emulator_cache: 9280 kmem_cache_destroy(x86_emulator_cache); 9281 return r; 9282 } 9283 9284 void kvm_arch_exit(void) 9285 { 9286 #ifdef CONFIG_X86_64 9287 if (hypervisor_is_type(X86_HYPER_MS_HYPERV)) 9288 clear_hv_tscchange_cb(); 9289 #endif 9290 kvm_lapic_exit(); 9291 9292 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) 9293 cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block, 9294 CPUFREQ_TRANSITION_NOTIFIER); 9295 cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE); 9296 #ifdef CONFIG_X86_64 9297 pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier); 9298 irq_work_sync(&pvclock_irq_work); 9299 cancel_work_sync(&pvclock_gtod_work); 9300 #endif 9301 kvm_x86_ops.hardware_enable = NULL; 9302 kvm_mmu_vendor_module_exit(); 9303 free_percpu(user_return_msrs); 9304 kmem_cache_destroy(x86_emulator_cache); 9305 #ifdef CONFIG_KVM_XEN 9306 static_key_deferred_flush(&kvm_xen_enabled); 9307 WARN_ON(static_branch_unlikely(&kvm_xen_enabled.key)); 9308 #endif 9309 } 9310 9311 static int __kvm_emulate_halt(struct kvm_vcpu *vcpu, int state, int reason) 9312 { 9313 /* 9314 * The vCPU has halted, e.g. executed HLT. Update the run state if the 9315 * local APIC is in-kernel, the run loop will detect the non-runnable 9316 * state and halt the vCPU. Exit to userspace if the local APIC is 9317 * managed by userspace, in which case userspace is responsible for 9318 * handling wake events. 9319 */ 9320 ++vcpu->stat.halt_exits; 9321 if (lapic_in_kernel(vcpu)) { 9322 vcpu->arch.mp_state = state; 9323 return 1; 9324 } else { 9325 vcpu->run->exit_reason = reason; 9326 return 0; 9327 } 9328 } 9329 9330 int kvm_emulate_halt_noskip(struct kvm_vcpu *vcpu) 9331 { 9332 return __kvm_emulate_halt(vcpu, KVM_MP_STATE_HALTED, KVM_EXIT_HLT); 9333 } 9334 EXPORT_SYMBOL_GPL(kvm_emulate_halt_noskip); 9335 9336 int kvm_emulate_halt(struct kvm_vcpu *vcpu) 9337 { 9338 int ret = kvm_skip_emulated_instruction(vcpu); 9339 /* 9340 * TODO: we might be squashing a GUESTDBG_SINGLESTEP-triggered 9341 * KVM_EXIT_DEBUG here. 9342 */ 9343 return kvm_emulate_halt_noskip(vcpu) && ret; 9344 } 9345 EXPORT_SYMBOL_GPL(kvm_emulate_halt); 9346 9347 int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu) 9348 { 9349 int ret = kvm_skip_emulated_instruction(vcpu); 9350 9351 return __kvm_emulate_halt(vcpu, KVM_MP_STATE_AP_RESET_HOLD, 9352 KVM_EXIT_AP_RESET_HOLD) && ret; 9353 } 9354 EXPORT_SYMBOL_GPL(kvm_emulate_ap_reset_hold); 9355 9356 #ifdef CONFIG_X86_64 9357 static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr, 9358 unsigned long clock_type) 9359 { 9360 struct kvm_clock_pairing clock_pairing; 9361 struct timespec64 ts; 9362 u64 cycle; 9363 int ret; 9364 9365 if (clock_type != KVM_CLOCK_PAIRING_WALLCLOCK) 9366 return -KVM_EOPNOTSUPP; 9367 9368 /* 9369 * When tsc is in permanent catchup mode guests won't be able to use 9370 * pvclock_read_retry loop to get consistent view of pvclock 9371 */ 9372 if (vcpu->arch.tsc_always_catchup) 9373 return -KVM_EOPNOTSUPP; 9374 9375 if (!kvm_get_walltime_and_clockread(&ts, &cycle)) 9376 return -KVM_EOPNOTSUPP; 9377 9378 clock_pairing.sec = ts.tv_sec; 9379 clock_pairing.nsec = ts.tv_nsec; 9380 clock_pairing.tsc = kvm_read_l1_tsc(vcpu, cycle); 9381 clock_pairing.flags = 0; 9382 memset(&clock_pairing.pad, 0, sizeof(clock_pairing.pad)); 9383 9384 ret = 0; 9385 if (kvm_write_guest(vcpu->kvm, paddr, &clock_pairing, 9386 sizeof(struct kvm_clock_pairing))) 9387 ret = -KVM_EFAULT; 9388 9389 return ret; 9390 } 9391 #endif 9392 9393 /* 9394 * kvm_pv_kick_cpu_op: Kick a vcpu. 9395 * 9396 * @apicid - apicid of vcpu to be kicked. 9397 */ 9398 static void kvm_pv_kick_cpu_op(struct kvm *kvm, int apicid) 9399 { 9400 /* 9401 * All other fields are unused for APIC_DM_REMRD, but may be consumed by 9402 * common code, e.g. for tracing. Defer initialization to the compiler. 9403 */ 9404 struct kvm_lapic_irq lapic_irq = { 9405 .delivery_mode = APIC_DM_REMRD, 9406 .dest_mode = APIC_DEST_PHYSICAL, 9407 .shorthand = APIC_DEST_NOSHORT, 9408 .dest_id = apicid, 9409 }; 9410 9411 kvm_irq_delivery_to_apic(kvm, NULL, &lapic_irq, NULL); 9412 } 9413 9414 bool kvm_apicv_activated(struct kvm *kvm) 9415 { 9416 return (READ_ONCE(kvm->arch.apicv_inhibit_reasons) == 0); 9417 } 9418 EXPORT_SYMBOL_GPL(kvm_apicv_activated); 9419 9420 bool kvm_vcpu_apicv_activated(struct kvm_vcpu *vcpu) 9421 { 9422 ulong vm_reasons = READ_ONCE(vcpu->kvm->arch.apicv_inhibit_reasons); 9423 ulong vcpu_reasons = static_call(kvm_x86_vcpu_get_apicv_inhibit_reasons)(vcpu); 9424 9425 return (vm_reasons | vcpu_reasons) == 0; 9426 } 9427 EXPORT_SYMBOL_GPL(kvm_vcpu_apicv_activated); 9428 9429 static void set_or_clear_apicv_inhibit(unsigned long *inhibits, 9430 enum kvm_apicv_inhibit reason, bool set) 9431 { 9432 if (set) 9433 __set_bit(reason, inhibits); 9434 else 9435 __clear_bit(reason, inhibits); 9436 9437 trace_kvm_apicv_inhibit_changed(reason, set, *inhibits); 9438 } 9439 9440 static void kvm_apicv_init(struct kvm *kvm) 9441 { 9442 unsigned long *inhibits = &kvm->arch.apicv_inhibit_reasons; 9443 9444 init_rwsem(&kvm->arch.apicv_update_lock); 9445 9446 set_or_clear_apicv_inhibit(inhibits, APICV_INHIBIT_REASON_ABSENT, true); 9447 9448 if (!enable_apicv) 9449 set_or_clear_apicv_inhibit(inhibits, 9450 APICV_INHIBIT_REASON_DISABLE, true); 9451 } 9452 9453 static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id) 9454 { 9455 struct kvm_vcpu *target = NULL; 9456 struct kvm_apic_map *map; 9457 9458 vcpu->stat.directed_yield_attempted++; 9459 9460 if (single_task_running()) 9461 goto no_yield; 9462 9463 rcu_read_lock(); 9464 map = rcu_dereference(vcpu->kvm->arch.apic_map); 9465 9466 if (likely(map) && dest_id <= map->max_apic_id && map->phys_map[dest_id]) 9467 target = map->phys_map[dest_id]->vcpu; 9468 9469 rcu_read_unlock(); 9470 9471 if (!target || !READ_ONCE(target->ready)) 9472 goto no_yield; 9473 9474 /* Ignore requests to yield to self */ 9475 if (vcpu == target) 9476 goto no_yield; 9477 9478 if (kvm_vcpu_yield_to(target) <= 0) 9479 goto no_yield; 9480 9481 vcpu->stat.directed_yield_successful++; 9482 9483 no_yield: 9484 return; 9485 } 9486 9487 static int complete_hypercall_exit(struct kvm_vcpu *vcpu) 9488 { 9489 u64 ret = vcpu->run->hypercall.ret; 9490 9491 if (!is_64_bit_mode(vcpu)) 9492 ret = (u32)ret; 9493 kvm_rax_write(vcpu, ret); 9494 ++vcpu->stat.hypercalls; 9495 return kvm_skip_emulated_instruction(vcpu); 9496 } 9497 9498 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) 9499 { 9500 unsigned long nr, a0, a1, a2, a3, ret; 9501 int op_64_bit; 9502 9503 if (kvm_xen_hypercall_enabled(vcpu->kvm)) 9504 return kvm_xen_hypercall(vcpu); 9505 9506 if (kvm_hv_hypercall_enabled(vcpu)) 9507 return kvm_hv_hypercall(vcpu); 9508 9509 nr = kvm_rax_read(vcpu); 9510 a0 = kvm_rbx_read(vcpu); 9511 a1 = kvm_rcx_read(vcpu); 9512 a2 = kvm_rdx_read(vcpu); 9513 a3 = kvm_rsi_read(vcpu); 9514 9515 trace_kvm_hypercall(nr, a0, a1, a2, a3); 9516 9517 op_64_bit = is_64_bit_hypercall(vcpu); 9518 if (!op_64_bit) { 9519 nr &= 0xFFFFFFFF; 9520 a0 &= 0xFFFFFFFF; 9521 a1 &= 0xFFFFFFFF; 9522 a2 &= 0xFFFFFFFF; 9523 a3 &= 0xFFFFFFFF; 9524 } 9525 9526 if (static_call(kvm_x86_get_cpl)(vcpu) != 0) { 9527 ret = -KVM_EPERM; 9528 goto out; 9529 } 9530 9531 ret = -KVM_ENOSYS; 9532 9533 switch (nr) { 9534 case KVM_HC_VAPIC_POLL_IRQ: 9535 ret = 0; 9536 break; 9537 case KVM_HC_KICK_CPU: 9538 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_UNHALT)) 9539 break; 9540 9541 kvm_pv_kick_cpu_op(vcpu->kvm, a1); 9542 kvm_sched_yield(vcpu, a1); 9543 ret = 0; 9544 break; 9545 #ifdef CONFIG_X86_64 9546 case KVM_HC_CLOCK_PAIRING: 9547 ret = kvm_pv_clock_pairing(vcpu, a0, a1); 9548 break; 9549 #endif 9550 case KVM_HC_SEND_IPI: 9551 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_SEND_IPI)) 9552 break; 9553 9554 ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit); 9555 break; 9556 case KVM_HC_SCHED_YIELD: 9557 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_SCHED_YIELD)) 9558 break; 9559 9560 kvm_sched_yield(vcpu, a0); 9561 ret = 0; 9562 break; 9563 case KVM_HC_MAP_GPA_RANGE: { 9564 u64 gpa = a0, npages = a1, attrs = a2; 9565 9566 ret = -KVM_ENOSYS; 9567 if (!(vcpu->kvm->arch.hypercall_exit_enabled & (1 << KVM_HC_MAP_GPA_RANGE))) 9568 break; 9569 9570 if (!PAGE_ALIGNED(gpa) || !npages || 9571 gpa_to_gfn(gpa) + npages <= gpa_to_gfn(gpa)) { 9572 ret = -KVM_EINVAL; 9573 break; 9574 } 9575 9576 vcpu->run->exit_reason = KVM_EXIT_HYPERCALL; 9577 vcpu->run->hypercall.nr = KVM_HC_MAP_GPA_RANGE; 9578 vcpu->run->hypercall.args[0] = gpa; 9579 vcpu->run->hypercall.args[1] = npages; 9580 vcpu->run->hypercall.args[2] = attrs; 9581 vcpu->run->hypercall.longmode = op_64_bit; 9582 vcpu->arch.complete_userspace_io = complete_hypercall_exit; 9583 return 0; 9584 } 9585 default: 9586 ret = -KVM_ENOSYS; 9587 break; 9588 } 9589 out: 9590 if (!op_64_bit) 9591 ret = (u32)ret; 9592 kvm_rax_write(vcpu, ret); 9593 9594 ++vcpu->stat.hypercalls; 9595 return kvm_skip_emulated_instruction(vcpu); 9596 } 9597 EXPORT_SYMBOL_GPL(kvm_emulate_hypercall); 9598 9599 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt) 9600 { 9601 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 9602 char instruction[3]; 9603 unsigned long rip = kvm_rip_read(vcpu); 9604 9605 /* 9606 * If the quirk is disabled, synthesize a #UD and let the guest pick up 9607 * the pieces. 9608 */ 9609 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_FIX_HYPERCALL_INSN)) { 9610 ctxt->exception.error_code_valid = false; 9611 ctxt->exception.vector = UD_VECTOR; 9612 ctxt->have_exception = true; 9613 return X86EMUL_PROPAGATE_FAULT; 9614 } 9615 9616 static_call(kvm_x86_patch_hypercall)(vcpu, instruction); 9617 9618 return emulator_write_emulated(ctxt, rip, instruction, 3, 9619 &ctxt->exception); 9620 } 9621 9622 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) 9623 { 9624 return vcpu->run->request_interrupt_window && 9625 likely(!pic_in_kernel(vcpu->kvm)); 9626 } 9627 9628 /* Called within kvm->srcu read side. */ 9629 static void post_kvm_run_save(struct kvm_vcpu *vcpu) 9630 { 9631 struct kvm_run *kvm_run = vcpu->run; 9632 9633 kvm_run->if_flag = static_call(kvm_x86_get_if_flag)(vcpu); 9634 kvm_run->cr8 = kvm_get_cr8(vcpu); 9635 kvm_run->apic_base = kvm_get_apic_base(vcpu); 9636 9637 kvm_run->ready_for_interrupt_injection = 9638 pic_in_kernel(vcpu->kvm) || 9639 kvm_vcpu_ready_for_interrupt_injection(vcpu); 9640 9641 if (is_smm(vcpu)) 9642 kvm_run->flags |= KVM_RUN_X86_SMM; 9643 } 9644 9645 static void update_cr8_intercept(struct kvm_vcpu *vcpu) 9646 { 9647 int max_irr, tpr; 9648 9649 if (!kvm_x86_ops.update_cr8_intercept) 9650 return; 9651 9652 if (!lapic_in_kernel(vcpu)) 9653 return; 9654 9655 if (vcpu->arch.apic->apicv_active) 9656 return; 9657 9658 if (!vcpu->arch.apic->vapic_addr) 9659 max_irr = kvm_lapic_find_highest_irr(vcpu); 9660 else 9661 max_irr = -1; 9662 9663 if (max_irr != -1) 9664 max_irr >>= 4; 9665 9666 tpr = kvm_lapic_get_cr8(vcpu); 9667 9668 static_call(kvm_x86_update_cr8_intercept)(vcpu, tpr, max_irr); 9669 } 9670 9671 9672 int kvm_check_nested_events(struct kvm_vcpu *vcpu) 9673 { 9674 if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) { 9675 kvm_x86_ops.nested_ops->triple_fault(vcpu); 9676 return 1; 9677 } 9678 9679 return kvm_x86_ops.nested_ops->check_events(vcpu); 9680 } 9681 9682 static void kvm_inject_exception(struct kvm_vcpu *vcpu) 9683 { 9684 trace_kvm_inj_exception(vcpu->arch.exception.nr, 9685 vcpu->arch.exception.has_error_code, 9686 vcpu->arch.exception.error_code, 9687 vcpu->arch.exception.injected); 9688 9689 if (vcpu->arch.exception.error_code && !is_protmode(vcpu)) 9690 vcpu->arch.exception.error_code = false; 9691 static_call(kvm_x86_queue_exception)(vcpu); 9692 } 9693 9694 static int inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit) 9695 { 9696 int r; 9697 bool can_inject = true; 9698 9699 /* try to reinject previous events if any */ 9700 9701 if (vcpu->arch.exception.injected) { 9702 kvm_inject_exception(vcpu); 9703 can_inject = false; 9704 } 9705 /* 9706 * Do not inject an NMI or interrupt if there is a pending 9707 * exception. Exceptions and interrupts are recognized at 9708 * instruction boundaries, i.e. the start of an instruction. 9709 * Trap-like exceptions, e.g. #DB, have higher priority than 9710 * NMIs and interrupts, i.e. traps are recognized before an 9711 * NMI/interrupt that's pending on the same instruction. 9712 * Fault-like exceptions, e.g. #GP and #PF, are the lowest 9713 * priority, but are only generated (pended) during instruction 9714 * execution, i.e. a pending fault-like exception means the 9715 * fault occurred on the *previous* instruction and must be 9716 * serviced prior to recognizing any new events in order to 9717 * fully complete the previous instruction. 9718 */ 9719 else if (!vcpu->arch.exception.pending) { 9720 if (vcpu->arch.nmi_injected) { 9721 static_call(kvm_x86_inject_nmi)(vcpu); 9722 can_inject = false; 9723 } else if (vcpu->arch.interrupt.injected) { 9724 static_call(kvm_x86_inject_irq)(vcpu, true); 9725 can_inject = false; 9726 } 9727 } 9728 9729 WARN_ON_ONCE(vcpu->arch.exception.injected && 9730 vcpu->arch.exception.pending); 9731 9732 /* 9733 * Call check_nested_events() even if we reinjected a previous event 9734 * in order for caller to determine if it should require immediate-exit 9735 * from L2 to L1 due to pending L1 events which require exit 9736 * from L2 to L1. 9737 */ 9738 if (is_guest_mode(vcpu)) { 9739 r = kvm_check_nested_events(vcpu); 9740 if (r < 0) 9741 goto out; 9742 } 9743 9744 /* try to inject new event if pending */ 9745 if (vcpu->arch.exception.pending) { 9746 if (exception_type(vcpu->arch.exception.nr) == EXCPT_FAULT) 9747 __kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) | 9748 X86_EFLAGS_RF); 9749 9750 if (vcpu->arch.exception.nr == DB_VECTOR) { 9751 kvm_deliver_exception_payload(vcpu); 9752 if (vcpu->arch.dr7 & DR7_GD) { 9753 vcpu->arch.dr7 &= ~DR7_GD; 9754 kvm_update_dr7(vcpu); 9755 } 9756 } 9757 9758 kvm_inject_exception(vcpu); 9759 9760 vcpu->arch.exception.pending = false; 9761 vcpu->arch.exception.injected = true; 9762 9763 can_inject = false; 9764 } 9765 9766 /* Don't inject interrupts if the user asked to avoid doing so */ 9767 if (vcpu->guest_debug & KVM_GUESTDBG_BLOCKIRQ) 9768 return 0; 9769 9770 /* 9771 * Finally, inject interrupt events. If an event cannot be injected 9772 * due to architectural conditions (e.g. IF=0) a window-open exit 9773 * will re-request KVM_REQ_EVENT. Sometimes however an event is pending 9774 * and can architecturally be injected, but we cannot do it right now: 9775 * an interrupt could have arrived just now and we have to inject it 9776 * as a vmexit, or there could already an event in the queue, which is 9777 * indicated by can_inject. In that case we request an immediate exit 9778 * in order to make progress and get back here for another iteration. 9779 * The kvm_x86_ops hooks communicate this by returning -EBUSY. 9780 */ 9781 if (vcpu->arch.smi_pending) { 9782 r = can_inject ? static_call(kvm_x86_smi_allowed)(vcpu, true) : -EBUSY; 9783 if (r < 0) 9784 goto out; 9785 if (r) { 9786 vcpu->arch.smi_pending = false; 9787 ++vcpu->arch.smi_count; 9788 enter_smm(vcpu); 9789 can_inject = false; 9790 } else 9791 static_call(kvm_x86_enable_smi_window)(vcpu); 9792 } 9793 9794 if (vcpu->arch.nmi_pending) { 9795 r = can_inject ? static_call(kvm_x86_nmi_allowed)(vcpu, true) : -EBUSY; 9796 if (r < 0) 9797 goto out; 9798 if (r) { 9799 --vcpu->arch.nmi_pending; 9800 vcpu->arch.nmi_injected = true; 9801 static_call(kvm_x86_inject_nmi)(vcpu); 9802 can_inject = false; 9803 WARN_ON(static_call(kvm_x86_nmi_allowed)(vcpu, true) < 0); 9804 } 9805 if (vcpu->arch.nmi_pending) 9806 static_call(kvm_x86_enable_nmi_window)(vcpu); 9807 } 9808 9809 if (kvm_cpu_has_injectable_intr(vcpu)) { 9810 r = can_inject ? static_call(kvm_x86_interrupt_allowed)(vcpu, true) : -EBUSY; 9811 if (r < 0) 9812 goto out; 9813 if (r) { 9814 kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), false); 9815 static_call(kvm_x86_inject_irq)(vcpu, false); 9816 WARN_ON(static_call(kvm_x86_interrupt_allowed)(vcpu, true) < 0); 9817 } 9818 if (kvm_cpu_has_injectable_intr(vcpu)) 9819 static_call(kvm_x86_enable_irq_window)(vcpu); 9820 } 9821 9822 if (is_guest_mode(vcpu) && 9823 kvm_x86_ops.nested_ops->hv_timer_pending && 9824 kvm_x86_ops.nested_ops->hv_timer_pending(vcpu)) 9825 *req_immediate_exit = true; 9826 9827 WARN_ON(vcpu->arch.exception.pending); 9828 return 0; 9829 9830 out: 9831 if (r == -EBUSY) { 9832 *req_immediate_exit = true; 9833 r = 0; 9834 } 9835 return r; 9836 } 9837 9838 static void process_nmi(struct kvm_vcpu *vcpu) 9839 { 9840 unsigned limit = 2; 9841 9842 /* 9843 * x86 is limited to one NMI running, and one NMI pending after it. 9844 * If an NMI is already in progress, limit further NMIs to just one. 9845 * Otherwise, allow two (and we'll inject the first one immediately). 9846 */ 9847 if (static_call(kvm_x86_get_nmi_mask)(vcpu) || vcpu->arch.nmi_injected) 9848 limit = 1; 9849 9850 vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0); 9851 vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit); 9852 kvm_make_request(KVM_REQ_EVENT, vcpu); 9853 } 9854 9855 static u32 enter_smm_get_segment_flags(struct kvm_segment *seg) 9856 { 9857 u32 flags = 0; 9858 flags |= seg->g << 23; 9859 flags |= seg->db << 22; 9860 flags |= seg->l << 21; 9861 flags |= seg->avl << 20; 9862 flags |= seg->present << 15; 9863 flags |= seg->dpl << 13; 9864 flags |= seg->s << 12; 9865 flags |= seg->type << 8; 9866 return flags; 9867 } 9868 9869 static void enter_smm_save_seg_32(struct kvm_vcpu *vcpu, char *buf, int n) 9870 { 9871 struct kvm_segment seg; 9872 int offset; 9873 9874 kvm_get_segment(vcpu, &seg, n); 9875 put_smstate(u32, buf, 0x7fa8 + n * 4, seg.selector); 9876 9877 if (n < 3) 9878 offset = 0x7f84 + n * 12; 9879 else 9880 offset = 0x7f2c + (n - 3) * 12; 9881 9882 put_smstate(u32, buf, offset + 8, seg.base); 9883 put_smstate(u32, buf, offset + 4, seg.limit); 9884 put_smstate(u32, buf, offset, enter_smm_get_segment_flags(&seg)); 9885 } 9886 9887 #ifdef CONFIG_X86_64 9888 static void enter_smm_save_seg_64(struct kvm_vcpu *vcpu, char *buf, int n) 9889 { 9890 struct kvm_segment seg; 9891 int offset; 9892 u16 flags; 9893 9894 kvm_get_segment(vcpu, &seg, n); 9895 offset = 0x7e00 + n * 16; 9896 9897 flags = enter_smm_get_segment_flags(&seg) >> 8; 9898 put_smstate(u16, buf, offset, seg.selector); 9899 put_smstate(u16, buf, offset + 2, flags); 9900 put_smstate(u32, buf, offset + 4, seg.limit); 9901 put_smstate(u64, buf, offset + 8, seg.base); 9902 } 9903 #endif 9904 9905 static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf) 9906 { 9907 struct desc_ptr dt; 9908 struct kvm_segment seg; 9909 unsigned long val; 9910 int i; 9911 9912 put_smstate(u32, buf, 0x7ffc, kvm_read_cr0(vcpu)); 9913 put_smstate(u32, buf, 0x7ff8, kvm_read_cr3(vcpu)); 9914 put_smstate(u32, buf, 0x7ff4, kvm_get_rflags(vcpu)); 9915 put_smstate(u32, buf, 0x7ff0, kvm_rip_read(vcpu)); 9916 9917 for (i = 0; i < 8; i++) 9918 put_smstate(u32, buf, 0x7fd0 + i * 4, kvm_register_read_raw(vcpu, i)); 9919 9920 kvm_get_dr(vcpu, 6, &val); 9921 put_smstate(u32, buf, 0x7fcc, (u32)val); 9922 kvm_get_dr(vcpu, 7, &val); 9923 put_smstate(u32, buf, 0x7fc8, (u32)val); 9924 9925 kvm_get_segment(vcpu, &seg, VCPU_SREG_TR); 9926 put_smstate(u32, buf, 0x7fc4, seg.selector); 9927 put_smstate(u32, buf, 0x7f64, seg.base); 9928 put_smstate(u32, buf, 0x7f60, seg.limit); 9929 put_smstate(u32, buf, 0x7f5c, enter_smm_get_segment_flags(&seg)); 9930 9931 kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR); 9932 put_smstate(u32, buf, 0x7fc0, seg.selector); 9933 put_smstate(u32, buf, 0x7f80, seg.base); 9934 put_smstate(u32, buf, 0x7f7c, seg.limit); 9935 put_smstate(u32, buf, 0x7f78, enter_smm_get_segment_flags(&seg)); 9936 9937 static_call(kvm_x86_get_gdt)(vcpu, &dt); 9938 put_smstate(u32, buf, 0x7f74, dt.address); 9939 put_smstate(u32, buf, 0x7f70, dt.size); 9940 9941 static_call(kvm_x86_get_idt)(vcpu, &dt); 9942 put_smstate(u32, buf, 0x7f58, dt.address); 9943 put_smstate(u32, buf, 0x7f54, dt.size); 9944 9945 for (i = 0; i < 6; i++) 9946 enter_smm_save_seg_32(vcpu, buf, i); 9947 9948 put_smstate(u32, buf, 0x7f14, kvm_read_cr4(vcpu)); 9949 9950 /* revision id */ 9951 put_smstate(u32, buf, 0x7efc, 0x00020000); 9952 put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase); 9953 } 9954 9955 #ifdef CONFIG_X86_64 9956 static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf) 9957 { 9958 struct desc_ptr dt; 9959 struct kvm_segment seg; 9960 unsigned long val; 9961 int i; 9962 9963 for (i = 0; i < 16; i++) 9964 put_smstate(u64, buf, 0x7ff8 - i * 8, kvm_register_read_raw(vcpu, i)); 9965 9966 put_smstate(u64, buf, 0x7f78, kvm_rip_read(vcpu)); 9967 put_smstate(u32, buf, 0x7f70, kvm_get_rflags(vcpu)); 9968 9969 kvm_get_dr(vcpu, 6, &val); 9970 put_smstate(u64, buf, 0x7f68, val); 9971 kvm_get_dr(vcpu, 7, &val); 9972 put_smstate(u64, buf, 0x7f60, val); 9973 9974 put_smstate(u64, buf, 0x7f58, kvm_read_cr0(vcpu)); 9975 put_smstate(u64, buf, 0x7f50, kvm_read_cr3(vcpu)); 9976 put_smstate(u64, buf, 0x7f48, kvm_read_cr4(vcpu)); 9977 9978 put_smstate(u32, buf, 0x7f00, vcpu->arch.smbase); 9979 9980 /* revision id */ 9981 put_smstate(u32, buf, 0x7efc, 0x00020064); 9982 9983 put_smstate(u64, buf, 0x7ed0, vcpu->arch.efer); 9984 9985 kvm_get_segment(vcpu, &seg, VCPU_SREG_TR); 9986 put_smstate(u16, buf, 0x7e90, seg.selector); 9987 put_smstate(u16, buf, 0x7e92, enter_smm_get_segment_flags(&seg) >> 8); 9988 put_smstate(u32, buf, 0x7e94, seg.limit); 9989 put_smstate(u64, buf, 0x7e98, seg.base); 9990 9991 static_call(kvm_x86_get_idt)(vcpu, &dt); 9992 put_smstate(u32, buf, 0x7e84, dt.size); 9993 put_smstate(u64, buf, 0x7e88, dt.address); 9994 9995 kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR); 9996 put_smstate(u16, buf, 0x7e70, seg.selector); 9997 put_smstate(u16, buf, 0x7e72, enter_smm_get_segment_flags(&seg) >> 8); 9998 put_smstate(u32, buf, 0x7e74, seg.limit); 9999 put_smstate(u64, buf, 0x7e78, seg.base); 10000 10001 static_call(kvm_x86_get_gdt)(vcpu, &dt); 10002 put_smstate(u32, buf, 0x7e64, dt.size); 10003 put_smstate(u64, buf, 0x7e68, dt.address); 10004 10005 for (i = 0; i < 6; i++) 10006 enter_smm_save_seg_64(vcpu, buf, i); 10007 } 10008 #endif 10009 10010 static void enter_smm(struct kvm_vcpu *vcpu) 10011 { 10012 struct kvm_segment cs, ds; 10013 struct desc_ptr dt; 10014 unsigned long cr0; 10015 char buf[512]; 10016 10017 memset(buf, 0, 512); 10018 #ifdef CONFIG_X86_64 10019 if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) 10020 enter_smm_save_state_64(vcpu, buf); 10021 else 10022 #endif 10023 enter_smm_save_state_32(vcpu, buf); 10024 10025 /* 10026 * Give enter_smm() a chance to make ISA-specific changes to the vCPU 10027 * state (e.g. leave guest mode) after we've saved the state into the 10028 * SMM state-save area. 10029 */ 10030 static_call(kvm_x86_enter_smm)(vcpu, buf); 10031 10032 kvm_smm_changed(vcpu, true); 10033 kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf)); 10034 10035 if (static_call(kvm_x86_get_nmi_mask)(vcpu)) 10036 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; 10037 else 10038 static_call(kvm_x86_set_nmi_mask)(vcpu, true); 10039 10040 kvm_set_rflags(vcpu, X86_EFLAGS_FIXED); 10041 kvm_rip_write(vcpu, 0x8000); 10042 10043 cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG); 10044 static_call(kvm_x86_set_cr0)(vcpu, cr0); 10045 vcpu->arch.cr0 = cr0; 10046 10047 static_call(kvm_x86_set_cr4)(vcpu, 0); 10048 10049 /* Undocumented: IDT limit is set to zero on entry to SMM. */ 10050 dt.address = dt.size = 0; 10051 static_call(kvm_x86_set_idt)(vcpu, &dt); 10052 10053 kvm_set_dr(vcpu, 7, DR7_FIXED_1); 10054 10055 cs.selector = (vcpu->arch.smbase >> 4) & 0xffff; 10056 cs.base = vcpu->arch.smbase; 10057 10058 ds.selector = 0; 10059 ds.base = 0; 10060 10061 cs.limit = ds.limit = 0xffffffff; 10062 cs.type = ds.type = 0x3; 10063 cs.dpl = ds.dpl = 0; 10064 cs.db = ds.db = 0; 10065 cs.s = ds.s = 1; 10066 cs.l = ds.l = 0; 10067 cs.g = ds.g = 1; 10068 cs.avl = ds.avl = 0; 10069 cs.present = ds.present = 1; 10070 cs.unusable = ds.unusable = 0; 10071 cs.padding = ds.padding = 0; 10072 10073 kvm_set_segment(vcpu, &cs, VCPU_SREG_CS); 10074 kvm_set_segment(vcpu, &ds, VCPU_SREG_DS); 10075 kvm_set_segment(vcpu, &ds, VCPU_SREG_ES); 10076 kvm_set_segment(vcpu, &ds, VCPU_SREG_FS); 10077 kvm_set_segment(vcpu, &ds, VCPU_SREG_GS); 10078 kvm_set_segment(vcpu, &ds, VCPU_SREG_SS); 10079 10080 #ifdef CONFIG_X86_64 10081 if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) 10082 static_call(kvm_x86_set_efer)(vcpu, 0); 10083 #endif 10084 10085 kvm_update_cpuid_runtime(vcpu); 10086 kvm_mmu_reset_context(vcpu); 10087 } 10088 10089 static void process_smi(struct kvm_vcpu *vcpu) 10090 { 10091 vcpu->arch.smi_pending = true; 10092 kvm_make_request(KVM_REQ_EVENT, vcpu); 10093 } 10094 10095 void kvm_make_scan_ioapic_request_mask(struct kvm *kvm, 10096 unsigned long *vcpu_bitmap) 10097 { 10098 kvm_make_vcpus_request_mask(kvm, KVM_REQ_SCAN_IOAPIC, vcpu_bitmap); 10099 } 10100 10101 void kvm_make_scan_ioapic_request(struct kvm *kvm) 10102 { 10103 kvm_make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC); 10104 } 10105 10106 void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu) 10107 { 10108 struct kvm_lapic *apic = vcpu->arch.apic; 10109 bool activate; 10110 10111 if (!lapic_in_kernel(vcpu)) 10112 return; 10113 10114 down_read(&vcpu->kvm->arch.apicv_update_lock); 10115 preempt_disable(); 10116 10117 /* Do not activate APICV when APIC is disabled */ 10118 activate = kvm_vcpu_apicv_activated(vcpu) && 10119 (kvm_get_apic_mode(vcpu) != LAPIC_MODE_DISABLED); 10120 10121 if (apic->apicv_active == activate) 10122 goto out; 10123 10124 apic->apicv_active = activate; 10125 kvm_apic_update_apicv(vcpu); 10126 static_call(kvm_x86_refresh_apicv_exec_ctrl)(vcpu); 10127 10128 /* 10129 * When APICv gets disabled, we may still have injected interrupts 10130 * pending. At the same time, KVM_REQ_EVENT may not be set as APICv was 10131 * still active when the interrupt got accepted. Make sure 10132 * inject_pending_event() is called to check for that. 10133 */ 10134 if (!apic->apicv_active) 10135 kvm_make_request(KVM_REQ_EVENT, vcpu); 10136 10137 out: 10138 preempt_enable(); 10139 up_read(&vcpu->kvm->arch.apicv_update_lock); 10140 } 10141 EXPORT_SYMBOL_GPL(kvm_vcpu_update_apicv); 10142 10143 void __kvm_set_or_clear_apicv_inhibit(struct kvm *kvm, 10144 enum kvm_apicv_inhibit reason, bool set) 10145 { 10146 unsigned long old, new; 10147 10148 lockdep_assert_held_write(&kvm->arch.apicv_update_lock); 10149 10150 if (!static_call(kvm_x86_check_apicv_inhibit_reasons)(reason)) 10151 return; 10152 10153 old = new = kvm->arch.apicv_inhibit_reasons; 10154 10155 set_or_clear_apicv_inhibit(&new, reason, set); 10156 10157 if (!!old != !!new) { 10158 /* 10159 * Kick all vCPUs before setting apicv_inhibit_reasons to avoid 10160 * false positives in the sanity check WARN in svm_vcpu_run(). 10161 * This task will wait for all vCPUs to ack the kick IRQ before 10162 * updating apicv_inhibit_reasons, and all other vCPUs will 10163 * block on acquiring apicv_update_lock so that vCPUs can't 10164 * redo svm_vcpu_run() without seeing the new inhibit state. 10165 * 10166 * Note, holding apicv_update_lock and taking it in the read 10167 * side (handling the request) also prevents other vCPUs from 10168 * servicing the request with a stale apicv_inhibit_reasons. 10169 */ 10170 kvm_make_all_cpus_request(kvm, KVM_REQ_APICV_UPDATE); 10171 kvm->arch.apicv_inhibit_reasons = new; 10172 if (new) { 10173 unsigned long gfn = gpa_to_gfn(APIC_DEFAULT_PHYS_BASE); 10174 kvm_zap_gfn_range(kvm, gfn, gfn+1); 10175 } 10176 } else { 10177 kvm->arch.apicv_inhibit_reasons = new; 10178 } 10179 } 10180 10181 void kvm_set_or_clear_apicv_inhibit(struct kvm *kvm, 10182 enum kvm_apicv_inhibit reason, bool set) 10183 { 10184 if (!enable_apicv) 10185 return; 10186 10187 down_write(&kvm->arch.apicv_update_lock); 10188 __kvm_set_or_clear_apicv_inhibit(kvm, reason, set); 10189 up_write(&kvm->arch.apicv_update_lock); 10190 } 10191 EXPORT_SYMBOL_GPL(kvm_set_or_clear_apicv_inhibit); 10192 10193 static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) 10194 { 10195 if (!kvm_apic_present(vcpu)) 10196 return; 10197 10198 bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256); 10199 10200 if (irqchip_split(vcpu->kvm)) 10201 kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors); 10202 else { 10203 static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); 10204 if (ioapic_in_kernel(vcpu->kvm)) 10205 kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors); 10206 } 10207 10208 if (is_guest_mode(vcpu)) 10209 vcpu->arch.load_eoi_exitmap_pending = true; 10210 else 10211 kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu); 10212 } 10213 10214 static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu) 10215 { 10216 u64 eoi_exit_bitmap[4]; 10217 10218 if (!kvm_apic_hw_enabled(vcpu->arch.apic)) 10219 return; 10220 10221 if (to_hv_vcpu(vcpu)) { 10222 bitmap_or((ulong *)eoi_exit_bitmap, 10223 vcpu->arch.ioapic_handled_vectors, 10224 to_hv_synic(vcpu)->vec_bitmap, 256); 10225 static_call_cond(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap); 10226 return; 10227 } 10228 10229 static_call_cond(kvm_x86_load_eoi_exitmap)( 10230 vcpu, (u64 *)vcpu->arch.ioapic_handled_vectors); 10231 } 10232 10233 void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, 10234 unsigned long start, unsigned long end) 10235 { 10236 unsigned long apic_address; 10237 10238 /* 10239 * The physical address of apic access page is stored in the VMCS. 10240 * Update it when it becomes invalid. 10241 */ 10242 apic_address = gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT); 10243 if (start <= apic_address && apic_address < end) 10244 kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD); 10245 } 10246 10247 void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) 10248 { 10249 static_call_cond(kvm_x86_guest_memory_reclaimed)(kvm); 10250 } 10251 10252 static void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) 10253 { 10254 if (!lapic_in_kernel(vcpu)) 10255 return; 10256 10257 static_call_cond(kvm_x86_set_apic_access_page_addr)(vcpu); 10258 } 10259 10260 void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu) 10261 { 10262 smp_send_reschedule(vcpu->cpu); 10263 } 10264 EXPORT_SYMBOL_GPL(__kvm_request_immediate_exit); 10265 10266 /* 10267 * Called within kvm->srcu read side. 10268 * Returns 1 to let vcpu_run() continue the guest execution loop without 10269 * exiting to the userspace. Otherwise, the value will be returned to the 10270 * userspace. 10271 */ 10272 static int vcpu_enter_guest(struct kvm_vcpu *vcpu) 10273 { 10274 int r; 10275 bool req_int_win = 10276 dm_request_for_irq_injection(vcpu) && 10277 kvm_cpu_accept_dm_intr(vcpu); 10278 fastpath_t exit_fastpath; 10279 10280 bool req_immediate_exit = false; 10281 10282 /* Forbid vmenter if vcpu dirty ring is soft-full */ 10283 if (unlikely(vcpu->kvm->dirty_ring_size && 10284 kvm_dirty_ring_soft_full(&vcpu->dirty_ring))) { 10285 vcpu->run->exit_reason = KVM_EXIT_DIRTY_RING_FULL; 10286 trace_kvm_dirty_ring_exit(vcpu); 10287 r = 0; 10288 goto out; 10289 } 10290 10291 if (kvm_request_pending(vcpu)) { 10292 if (kvm_check_request(KVM_REQ_VM_DEAD, vcpu)) { 10293 r = -EIO; 10294 goto out; 10295 } 10296 if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) { 10297 if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) { 10298 r = 0; 10299 goto out; 10300 } 10301 } 10302 if (kvm_check_request(KVM_REQ_MMU_FREE_OBSOLETE_ROOTS, vcpu)) 10303 kvm_mmu_free_obsolete_roots(vcpu); 10304 if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu)) 10305 __kvm_migrate_timers(vcpu); 10306 if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu)) 10307 kvm_update_masterclock(vcpu->kvm); 10308 if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu)) 10309 kvm_gen_kvmclock_update(vcpu); 10310 if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) { 10311 r = kvm_guest_time_update(vcpu); 10312 if (unlikely(r)) 10313 goto out; 10314 } 10315 if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu)) 10316 kvm_mmu_sync_roots(vcpu); 10317 if (kvm_check_request(KVM_REQ_LOAD_MMU_PGD, vcpu)) 10318 kvm_mmu_load_pgd(vcpu); 10319 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { 10320 kvm_vcpu_flush_tlb_all(vcpu); 10321 10322 /* Flushing all ASIDs flushes the current ASID... */ 10323 kvm_clear_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); 10324 } 10325 kvm_service_local_tlb_flush_requests(vcpu); 10326 10327 if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) { 10328 vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS; 10329 r = 0; 10330 goto out; 10331 } 10332 if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) { 10333 if (is_guest_mode(vcpu)) { 10334 kvm_x86_ops.nested_ops->triple_fault(vcpu); 10335 } else { 10336 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; 10337 vcpu->mmio_needed = 0; 10338 r = 0; 10339 goto out; 10340 } 10341 } 10342 if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) { 10343 /* Page is swapped out. Do synthetic halt */ 10344 vcpu->arch.apf.halted = true; 10345 r = 1; 10346 goto out; 10347 } 10348 if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu)) 10349 record_steal_time(vcpu); 10350 if (kvm_check_request(KVM_REQ_SMI, vcpu)) 10351 process_smi(vcpu); 10352 if (kvm_check_request(KVM_REQ_NMI, vcpu)) 10353 process_nmi(vcpu); 10354 if (kvm_check_request(KVM_REQ_PMU, vcpu)) 10355 kvm_pmu_handle_event(vcpu); 10356 if (kvm_check_request(KVM_REQ_PMI, vcpu)) 10357 kvm_pmu_deliver_pmi(vcpu); 10358 if (kvm_check_request(KVM_REQ_IOAPIC_EOI_EXIT, vcpu)) { 10359 BUG_ON(vcpu->arch.pending_ioapic_eoi > 255); 10360 if (test_bit(vcpu->arch.pending_ioapic_eoi, 10361 vcpu->arch.ioapic_handled_vectors)) { 10362 vcpu->run->exit_reason = KVM_EXIT_IOAPIC_EOI; 10363 vcpu->run->eoi.vector = 10364 vcpu->arch.pending_ioapic_eoi; 10365 r = 0; 10366 goto out; 10367 } 10368 } 10369 if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu)) 10370 vcpu_scan_ioapic(vcpu); 10371 if (kvm_check_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu)) 10372 vcpu_load_eoi_exitmap(vcpu); 10373 if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu)) 10374 kvm_vcpu_reload_apic_access_page(vcpu); 10375 if (kvm_check_request(KVM_REQ_HV_CRASH, vcpu)) { 10376 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; 10377 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_CRASH; 10378 vcpu->run->system_event.ndata = 0; 10379 r = 0; 10380 goto out; 10381 } 10382 if (kvm_check_request(KVM_REQ_HV_RESET, vcpu)) { 10383 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; 10384 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_RESET; 10385 vcpu->run->system_event.ndata = 0; 10386 r = 0; 10387 goto out; 10388 } 10389 if (kvm_check_request(KVM_REQ_HV_EXIT, vcpu)) { 10390 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); 10391 10392 vcpu->run->exit_reason = KVM_EXIT_HYPERV; 10393 vcpu->run->hyperv = hv_vcpu->exit; 10394 r = 0; 10395 goto out; 10396 } 10397 10398 /* 10399 * KVM_REQ_HV_STIMER has to be processed after 10400 * KVM_REQ_CLOCK_UPDATE, because Hyper-V SynIC timers 10401 * depend on the guest clock being up-to-date 10402 */ 10403 if (kvm_check_request(KVM_REQ_HV_STIMER, vcpu)) 10404 kvm_hv_process_stimers(vcpu); 10405 if (kvm_check_request(KVM_REQ_APICV_UPDATE, vcpu)) 10406 kvm_vcpu_update_apicv(vcpu); 10407 if (kvm_check_request(KVM_REQ_APF_READY, vcpu)) 10408 kvm_check_async_pf_completion(vcpu); 10409 if (kvm_check_request(KVM_REQ_MSR_FILTER_CHANGED, vcpu)) 10410 static_call(kvm_x86_msr_filter_changed)(vcpu); 10411 10412 if (kvm_check_request(KVM_REQ_UPDATE_CPU_DIRTY_LOGGING, vcpu)) 10413 static_call(kvm_x86_update_cpu_dirty_logging)(vcpu); 10414 } 10415 10416 if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win || 10417 kvm_xen_has_interrupt(vcpu)) { 10418 ++vcpu->stat.req_event; 10419 r = kvm_apic_accept_events(vcpu); 10420 if (r < 0) { 10421 r = 0; 10422 goto out; 10423 } 10424 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { 10425 r = 1; 10426 goto out; 10427 } 10428 10429 r = inject_pending_event(vcpu, &req_immediate_exit); 10430 if (r < 0) { 10431 r = 0; 10432 goto out; 10433 } 10434 if (req_int_win) 10435 static_call(kvm_x86_enable_irq_window)(vcpu); 10436 10437 if (kvm_lapic_enabled(vcpu)) { 10438 update_cr8_intercept(vcpu); 10439 kvm_lapic_sync_to_vapic(vcpu); 10440 } 10441 } 10442 10443 r = kvm_mmu_reload(vcpu); 10444 if (unlikely(r)) { 10445 goto cancel_injection; 10446 } 10447 10448 preempt_disable(); 10449 10450 static_call(kvm_x86_prepare_switch_to_guest)(vcpu); 10451 10452 /* 10453 * Disable IRQs before setting IN_GUEST_MODE. Posted interrupt 10454 * IPI are then delayed after guest entry, which ensures that they 10455 * result in virtual interrupt delivery. 10456 */ 10457 local_irq_disable(); 10458 10459 /* Store vcpu->apicv_active before vcpu->mode. */ 10460 smp_store_release(&vcpu->mode, IN_GUEST_MODE); 10461 10462 kvm_vcpu_srcu_read_unlock(vcpu); 10463 10464 /* 10465 * 1) We should set ->mode before checking ->requests. Please see 10466 * the comment in kvm_vcpu_exiting_guest_mode(). 10467 * 10468 * 2) For APICv, we should set ->mode before checking PID.ON. This 10469 * pairs with the memory barrier implicit in pi_test_and_set_on 10470 * (see vmx_deliver_posted_interrupt). 10471 * 10472 * 3) This also orders the write to mode from any reads to the page 10473 * tables done while the VCPU is running. Please see the comment 10474 * in kvm_flush_remote_tlbs. 10475 */ 10476 smp_mb__after_srcu_read_unlock(); 10477 10478 /* 10479 * Process pending posted interrupts to handle the case where the 10480 * notification IRQ arrived in the host, or was never sent (because the 10481 * target vCPU wasn't running). Do this regardless of the vCPU's APICv 10482 * status, KVM doesn't update assigned devices when APICv is inhibited, 10483 * i.e. they can post interrupts even if APICv is temporarily disabled. 10484 */ 10485 if (kvm_lapic_enabled(vcpu)) 10486 static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); 10487 10488 if (kvm_vcpu_exit_request(vcpu)) { 10489 vcpu->mode = OUTSIDE_GUEST_MODE; 10490 smp_wmb(); 10491 local_irq_enable(); 10492 preempt_enable(); 10493 kvm_vcpu_srcu_read_lock(vcpu); 10494 r = 1; 10495 goto cancel_injection; 10496 } 10497 10498 if (req_immediate_exit) { 10499 kvm_make_request(KVM_REQ_EVENT, vcpu); 10500 static_call(kvm_x86_request_immediate_exit)(vcpu); 10501 } 10502 10503 fpregs_assert_state_consistent(); 10504 if (test_thread_flag(TIF_NEED_FPU_LOAD)) 10505 switch_fpu_return(); 10506 10507 if (vcpu->arch.guest_fpu.xfd_err) 10508 wrmsrl(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err); 10509 10510 if (unlikely(vcpu->arch.switch_db_regs)) { 10511 set_debugreg(0, 7); 10512 set_debugreg(vcpu->arch.eff_db[0], 0); 10513 set_debugreg(vcpu->arch.eff_db[1], 1); 10514 set_debugreg(vcpu->arch.eff_db[2], 2); 10515 set_debugreg(vcpu->arch.eff_db[3], 3); 10516 } else if (unlikely(hw_breakpoint_active())) { 10517 set_debugreg(0, 7); 10518 } 10519 10520 guest_timing_enter_irqoff(); 10521 10522 for (;;) { 10523 /* 10524 * Assert that vCPU vs. VM APICv state is consistent. An APICv 10525 * update must kick and wait for all vCPUs before toggling the 10526 * per-VM state, and responsing vCPUs must wait for the update 10527 * to complete before servicing KVM_REQ_APICV_UPDATE. 10528 */ 10529 WARN_ON_ONCE((kvm_vcpu_apicv_activated(vcpu) != kvm_vcpu_apicv_active(vcpu)) && 10530 (kvm_get_apic_mode(vcpu) != LAPIC_MODE_DISABLED)); 10531 10532 exit_fastpath = static_call(kvm_x86_vcpu_run)(vcpu); 10533 if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST)) 10534 break; 10535 10536 if (kvm_lapic_enabled(vcpu)) 10537 static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); 10538 10539 if (unlikely(kvm_vcpu_exit_request(vcpu))) { 10540 exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED; 10541 break; 10542 } 10543 } 10544 10545 /* 10546 * Do this here before restoring debug registers on the host. And 10547 * since we do this before handling the vmexit, a DR access vmexit 10548 * can (a) read the correct value of the debug registers, (b) set 10549 * KVM_DEBUGREG_WONT_EXIT again. 10550 */ 10551 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) { 10552 WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP); 10553 static_call(kvm_x86_sync_dirty_debug_regs)(vcpu); 10554 kvm_update_dr0123(vcpu); 10555 kvm_update_dr7(vcpu); 10556 } 10557 10558 /* 10559 * If the guest has used debug registers, at least dr7 10560 * will be disabled while returning to the host. 10561 * If we don't have active breakpoints in the host, we don't 10562 * care about the messed up debug address registers. But if 10563 * we have some of them active, restore the old state. 10564 */ 10565 if (hw_breakpoint_active()) 10566 hw_breakpoint_restore(); 10567 10568 vcpu->arch.last_vmentry_cpu = vcpu->cpu; 10569 vcpu->arch.last_guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc()); 10570 10571 vcpu->mode = OUTSIDE_GUEST_MODE; 10572 smp_wmb(); 10573 10574 /* 10575 * Sync xfd before calling handle_exit_irqoff() which may 10576 * rely on the fact that guest_fpu::xfd is up-to-date (e.g. 10577 * in #NM irqoff handler). 10578 */ 10579 if (vcpu->arch.xfd_no_write_intercept) 10580 fpu_sync_guest_vmexit_xfd_state(); 10581 10582 static_call(kvm_x86_handle_exit_irqoff)(vcpu); 10583 10584 if (vcpu->arch.guest_fpu.xfd_err) 10585 wrmsrl(MSR_IA32_XFD_ERR, 0); 10586 10587 /* 10588 * Consume any pending interrupts, including the possible source of 10589 * VM-Exit on SVM and any ticks that occur between VM-Exit and now. 10590 * An instruction is required after local_irq_enable() to fully unblock 10591 * interrupts on processors that implement an interrupt shadow, the 10592 * stat.exits increment will do nicely. 10593 */ 10594 kvm_before_interrupt(vcpu, KVM_HANDLING_IRQ); 10595 local_irq_enable(); 10596 ++vcpu->stat.exits; 10597 local_irq_disable(); 10598 kvm_after_interrupt(vcpu); 10599 10600 /* 10601 * Wait until after servicing IRQs to account guest time so that any 10602 * ticks that occurred while running the guest are properly accounted 10603 * to the guest. Waiting until IRQs are enabled degrades the accuracy 10604 * of accounting via context tracking, but the loss of accuracy is 10605 * acceptable for all known use cases. 10606 */ 10607 guest_timing_exit_irqoff(); 10608 10609 local_irq_enable(); 10610 preempt_enable(); 10611 10612 kvm_vcpu_srcu_read_lock(vcpu); 10613 10614 /* 10615 * Profile KVM exit RIPs: 10616 */ 10617 if (unlikely(prof_on == KVM_PROFILING)) { 10618 unsigned long rip = kvm_rip_read(vcpu); 10619 profile_hit(KVM_PROFILING, (void *)rip); 10620 } 10621 10622 if (unlikely(vcpu->arch.tsc_always_catchup)) 10623 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 10624 10625 if (vcpu->arch.apic_attention) 10626 kvm_lapic_sync_from_vapic(vcpu); 10627 10628 r = static_call(kvm_x86_handle_exit)(vcpu, exit_fastpath); 10629 return r; 10630 10631 cancel_injection: 10632 if (req_immediate_exit) 10633 kvm_make_request(KVM_REQ_EVENT, vcpu); 10634 static_call(kvm_x86_cancel_injection)(vcpu); 10635 if (unlikely(vcpu->arch.apic_attention)) 10636 kvm_lapic_sync_from_vapic(vcpu); 10637 out: 10638 return r; 10639 } 10640 10641 /* Called within kvm->srcu read side. */ 10642 static inline int vcpu_block(struct kvm_vcpu *vcpu) 10643 { 10644 bool hv_timer; 10645 10646 if (!kvm_arch_vcpu_runnable(vcpu)) { 10647 /* 10648 * Switch to the software timer before halt-polling/blocking as 10649 * the guest's timer may be a break event for the vCPU, and the 10650 * hypervisor timer runs only when the CPU is in guest mode. 10651 * Switch before halt-polling so that KVM recognizes an expired 10652 * timer before blocking. 10653 */ 10654 hv_timer = kvm_lapic_hv_timer_in_use(vcpu); 10655 if (hv_timer) 10656 kvm_lapic_switch_to_sw_timer(vcpu); 10657 10658 kvm_vcpu_srcu_read_unlock(vcpu); 10659 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) 10660 kvm_vcpu_halt(vcpu); 10661 else 10662 kvm_vcpu_block(vcpu); 10663 kvm_vcpu_srcu_read_lock(vcpu); 10664 10665 if (hv_timer) 10666 kvm_lapic_switch_to_hv_timer(vcpu); 10667 10668 if (!kvm_check_request(KVM_REQ_UNHALT, vcpu)) 10669 return 1; 10670 } 10671 10672 if (kvm_apic_accept_events(vcpu) < 0) 10673 return 0; 10674 switch(vcpu->arch.mp_state) { 10675 case KVM_MP_STATE_HALTED: 10676 case KVM_MP_STATE_AP_RESET_HOLD: 10677 vcpu->arch.pv.pv_unhalted = false; 10678 vcpu->arch.mp_state = 10679 KVM_MP_STATE_RUNNABLE; 10680 fallthrough; 10681 case KVM_MP_STATE_RUNNABLE: 10682 vcpu->arch.apf.halted = false; 10683 break; 10684 case KVM_MP_STATE_INIT_RECEIVED: 10685 break; 10686 default: 10687 return -EINTR; 10688 } 10689 return 1; 10690 } 10691 10692 static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu) 10693 { 10694 if (is_guest_mode(vcpu)) 10695 kvm_check_nested_events(vcpu); 10696 10697 return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && 10698 !vcpu->arch.apf.halted); 10699 } 10700 10701 /* Called within kvm->srcu read side. */ 10702 static int vcpu_run(struct kvm_vcpu *vcpu) 10703 { 10704 int r; 10705 10706 vcpu->arch.l1tf_flush_l1d = true; 10707 10708 for (;;) { 10709 /* 10710 * If another guest vCPU requests a PV TLB flush in the middle 10711 * of instruction emulation, the rest of the emulation could 10712 * use a stale page translation. Assume that any code after 10713 * this point can start executing an instruction. 10714 */ 10715 vcpu->arch.at_instruction_boundary = false; 10716 if (kvm_vcpu_running(vcpu)) { 10717 r = vcpu_enter_guest(vcpu); 10718 } else { 10719 r = vcpu_block(vcpu); 10720 } 10721 10722 if (r <= 0) 10723 break; 10724 10725 kvm_clear_request(KVM_REQ_UNBLOCK, vcpu); 10726 if (kvm_xen_has_pending_events(vcpu)) 10727 kvm_xen_inject_pending_events(vcpu); 10728 10729 if (kvm_cpu_has_pending_timer(vcpu)) 10730 kvm_inject_pending_timer_irqs(vcpu); 10731 10732 if (dm_request_for_irq_injection(vcpu) && 10733 kvm_vcpu_ready_for_interrupt_injection(vcpu)) { 10734 r = 0; 10735 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; 10736 ++vcpu->stat.request_irq_exits; 10737 break; 10738 } 10739 10740 if (__xfer_to_guest_mode_work_pending()) { 10741 kvm_vcpu_srcu_read_unlock(vcpu); 10742 r = xfer_to_guest_mode_handle_work(vcpu); 10743 kvm_vcpu_srcu_read_lock(vcpu); 10744 if (r) 10745 return r; 10746 } 10747 } 10748 10749 return r; 10750 } 10751 10752 static inline int complete_emulated_io(struct kvm_vcpu *vcpu) 10753 { 10754 return kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE); 10755 } 10756 10757 static int complete_emulated_pio(struct kvm_vcpu *vcpu) 10758 { 10759 BUG_ON(!vcpu->arch.pio.count); 10760 10761 return complete_emulated_io(vcpu); 10762 } 10763 10764 /* 10765 * Implements the following, as a state machine: 10766 * 10767 * read: 10768 * for each fragment 10769 * for each mmio piece in the fragment 10770 * write gpa, len 10771 * exit 10772 * copy data 10773 * execute insn 10774 * 10775 * write: 10776 * for each fragment 10777 * for each mmio piece in the fragment 10778 * write gpa, len 10779 * copy data 10780 * exit 10781 */ 10782 static int complete_emulated_mmio(struct kvm_vcpu *vcpu) 10783 { 10784 struct kvm_run *run = vcpu->run; 10785 struct kvm_mmio_fragment *frag; 10786 unsigned len; 10787 10788 BUG_ON(!vcpu->mmio_needed); 10789 10790 /* Complete previous fragment */ 10791 frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment]; 10792 len = min(8u, frag->len); 10793 if (!vcpu->mmio_is_write) 10794 memcpy(frag->data, run->mmio.data, len); 10795 10796 if (frag->len <= 8) { 10797 /* Switch to the next fragment. */ 10798 frag++; 10799 vcpu->mmio_cur_fragment++; 10800 } else { 10801 /* Go forward to the next mmio piece. */ 10802 frag->data += len; 10803 frag->gpa += len; 10804 frag->len -= len; 10805 } 10806 10807 if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) { 10808 vcpu->mmio_needed = 0; 10809 10810 /* FIXME: return into emulator if single-stepping. */ 10811 if (vcpu->mmio_is_write) 10812 return 1; 10813 vcpu->mmio_read_completed = 1; 10814 return complete_emulated_io(vcpu); 10815 } 10816 10817 run->exit_reason = KVM_EXIT_MMIO; 10818 run->mmio.phys_addr = frag->gpa; 10819 if (vcpu->mmio_is_write) 10820 memcpy(run->mmio.data, frag->data, min(8u, frag->len)); 10821 run->mmio.len = min(8u, frag->len); 10822 run->mmio.is_write = vcpu->mmio_is_write; 10823 vcpu->arch.complete_userspace_io = complete_emulated_mmio; 10824 return 0; 10825 } 10826 10827 /* Swap (qemu) user FPU context for the guest FPU context. */ 10828 static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) 10829 { 10830 /* Exclude PKRU, it's restored separately immediately after VM-Exit. */ 10831 fpu_swap_kvm_fpstate(&vcpu->arch.guest_fpu, true); 10832 trace_kvm_fpu(1); 10833 } 10834 10835 /* When vcpu_run ends, restore user space FPU context. */ 10836 static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) 10837 { 10838 fpu_swap_kvm_fpstate(&vcpu->arch.guest_fpu, false); 10839 ++vcpu->stat.fpu_reload; 10840 trace_kvm_fpu(0); 10841 } 10842 10843 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) 10844 { 10845 struct kvm_run *kvm_run = vcpu->run; 10846 int r; 10847 10848 vcpu_load(vcpu); 10849 kvm_sigset_activate(vcpu); 10850 kvm_run->flags = 0; 10851 kvm_load_guest_fpu(vcpu); 10852 10853 kvm_vcpu_srcu_read_lock(vcpu); 10854 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { 10855 if (kvm_run->immediate_exit) { 10856 r = -EINTR; 10857 goto out; 10858 } 10859 /* 10860 * It should be impossible for the hypervisor timer to be in 10861 * use before KVM has ever run the vCPU. 10862 */ 10863 WARN_ON_ONCE(kvm_lapic_hv_timer_in_use(vcpu)); 10864 10865 kvm_vcpu_srcu_read_unlock(vcpu); 10866 kvm_vcpu_block(vcpu); 10867 kvm_vcpu_srcu_read_lock(vcpu); 10868 10869 if (kvm_apic_accept_events(vcpu) < 0) { 10870 r = 0; 10871 goto out; 10872 } 10873 kvm_clear_request(KVM_REQ_UNHALT, vcpu); 10874 r = -EAGAIN; 10875 if (signal_pending(current)) { 10876 r = -EINTR; 10877 kvm_run->exit_reason = KVM_EXIT_INTR; 10878 ++vcpu->stat.signal_exits; 10879 } 10880 goto out; 10881 } 10882 10883 if ((kvm_run->kvm_valid_regs & ~KVM_SYNC_X86_VALID_FIELDS) || 10884 (kvm_run->kvm_dirty_regs & ~KVM_SYNC_X86_VALID_FIELDS)) { 10885 r = -EINVAL; 10886 goto out; 10887 } 10888 10889 if (kvm_run->kvm_dirty_regs) { 10890 r = sync_regs(vcpu); 10891 if (r != 0) 10892 goto out; 10893 } 10894 10895 /* re-sync apic's tpr */ 10896 if (!lapic_in_kernel(vcpu)) { 10897 if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) { 10898 r = -EINVAL; 10899 goto out; 10900 } 10901 } 10902 10903 if (unlikely(vcpu->arch.complete_userspace_io)) { 10904 int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io; 10905 vcpu->arch.complete_userspace_io = NULL; 10906 r = cui(vcpu); 10907 if (r <= 0) 10908 goto out; 10909 } else { 10910 WARN_ON_ONCE(vcpu->arch.pio.count); 10911 WARN_ON_ONCE(vcpu->mmio_needed); 10912 } 10913 10914 if (kvm_run->immediate_exit) { 10915 r = -EINTR; 10916 goto out; 10917 } 10918 10919 r = static_call(kvm_x86_vcpu_pre_run)(vcpu); 10920 if (r <= 0) 10921 goto out; 10922 10923 r = vcpu_run(vcpu); 10924 10925 out: 10926 kvm_put_guest_fpu(vcpu); 10927 if (kvm_run->kvm_valid_regs) 10928 store_regs(vcpu); 10929 post_kvm_run_save(vcpu); 10930 kvm_vcpu_srcu_read_unlock(vcpu); 10931 10932 kvm_sigset_deactivate(vcpu); 10933 vcpu_put(vcpu); 10934 return r; 10935 } 10936 10937 static void __get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 10938 { 10939 if (vcpu->arch.emulate_regs_need_sync_to_vcpu) { 10940 /* 10941 * We are here if userspace calls get_regs() in the middle of 10942 * instruction emulation. Registers state needs to be copied 10943 * back from emulation context to vcpu. Userspace shouldn't do 10944 * that usually, but some bad designed PV devices (vmware 10945 * backdoor interface) need this to work 10946 */ 10947 emulator_writeback_register_cache(vcpu->arch.emulate_ctxt); 10948 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; 10949 } 10950 regs->rax = kvm_rax_read(vcpu); 10951 regs->rbx = kvm_rbx_read(vcpu); 10952 regs->rcx = kvm_rcx_read(vcpu); 10953 regs->rdx = kvm_rdx_read(vcpu); 10954 regs->rsi = kvm_rsi_read(vcpu); 10955 regs->rdi = kvm_rdi_read(vcpu); 10956 regs->rsp = kvm_rsp_read(vcpu); 10957 regs->rbp = kvm_rbp_read(vcpu); 10958 #ifdef CONFIG_X86_64 10959 regs->r8 = kvm_r8_read(vcpu); 10960 regs->r9 = kvm_r9_read(vcpu); 10961 regs->r10 = kvm_r10_read(vcpu); 10962 regs->r11 = kvm_r11_read(vcpu); 10963 regs->r12 = kvm_r12_read(vcpu); 10964 regs->r13 = kvm_r13_read(vcpu); 10965 regs->r14 = kvm_r14_read(vcpu); 10966 regs->r15 = kvm_r15_read(vcpu); 10967 #endif 10968 10969 regs->rip = kvm_rip_read(vcpu); 10970 regs->rflags = kvm_get_rflags(vcpu); 10971 } 10972 10973 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 10974 { 10975 vcpu_load(vcpu); 10976 __get_regs(vcpu, regs); 10977 vcpu_put(vcpu); 10978 return 0; 10979 } 10980 10981 static void __set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 10982 { 10983 vcpu->arch.emulate_regs_need_sync_from_vcpu = true; 10984 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; 10985 10986 kvm_rax_write(vcpu, regs->rax); 10987 kvm_rbx_write(vcpu, regs->rbx); 10988 kvm_rcx_write(vcpu, regs->rcx); 10989 kvm_rdx_write(vcpu, regs->rdx); 10990 kvm_rsi_write(vcpu, regs->rsi); 10991 kvm_rdi_write(vcpu, regs->rdi); 10992 kvm_rsp_write(vcpu, regs->rsp); 10993 kvm_rbp_write(vcpu, regs->rbp); 10994 #ifdef CONFIG_X86_64 10995 kvm_r8_write(vcpu, regs->r8); 10996 kvm_r9_write(vcpu, regs->r9); 10997 kvm_r10_write(vcpu, regs->r10); 10998 kvm_r11_write(vcpu, regs->r11); 10999 kvm_r12_write(vcpu, regs->r12); 11000 kvm_r13_write(vcpu, regs->r13); 11001 kvm_r14_write(vcpu, regs->r14); 11002 kvm_r15_write(vcpu, regs->r15); 11003 #endif 11004 11005 kvm_rip_write(vcpu, regs->rip); 11006 kvm_set_rflags(vcpu, regs->rflags | X86_EFLAGS_FIXED); 11007 11008 vcpu->arch.exception.pending = false; 11009 11010 kvm_make_request(KVM_REQ_EVENT, vcpu); 11011 } 11012 11013 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 11014 { 11015 vcpu_load(vcpu); 11016 __set_regs(vcpu, regs); 11017 vcpu_put(vcpu); 11018 return 0; 11019 } 11020 11021 static void __get_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 11022 { 11023 struct desc_ptr dt; 11024 11025 if (vcpu->arch.guest_state_protected) 11026 goto skip_protected_regs; 11027 11028 kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS); 11029 kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS); 11030 kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES); 11031 kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS); 11032 kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS); 11033 kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS); 11034 11035 kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR); 11036 kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); 11037 11038 static_call(kvm_x86_get_idt)(vcpu, &dt); 11039 sregs->idt.limit = dt.size; 11040 sregs->idt.base = dt.address; 11041 static_call(kvm_x86_get_gdt)(vcpu, &dt); 11042 sregs->gdt.limit = dt.size; 11043 sregs->gdt.base = dt.address; 11044 11045 sregs->cr2 = vcpu->arch.cr2; 11046 sregs->cr3 = kvm_read_cr3(vcpu); 11047 11048 skip_protected_regs: 11049 sregs->cr0 = kvm_read_cr0(vcpu); 11050 sregs->cr4 = kvm_read_cr4(vcpu); 11051 sregs->cr8 = kvm_get_cr8(vcpu); 11052 sregs->efer = vcpu->arch.efer; 11053 sregs->apic_base = kvm_get_apic_base(vcpu); 11054 } 11055 11056 static void __get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 11057 { 11058 __get_sregs_common(vcpu, sregs); 11059 11060 if (vcpu->arch.guest_state_protected) 11061 return; 11062 11063 if (vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft) 11064 set_bit(vcpu->arch.interrupt.nr, 11065 (unsigned long *)sregs->interrupt_bitmap); 11066 } 11067 11068 static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2) 11069 { 11070 int i; 11071 11072 __get_sregs_common(vcpu, (struct kvm_sregs *)sregs2); 11073 11074 if (vcpu->arch.guest_state_protected) 11075 return; 11076 11077 if (is_pae_paging(vcpu)) { 11078 for (i = 0 ; i < 4 ; i++) 11079 sregs2->pdptrs[i] = kvm_pdptr_read(vcpu, i); 11080 sregs2->flags |= KVM_SREGS2_FLAGS_PDPTRS_VALID; 11081 } 11082 } 11083 11084 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 11085 struct kvm_sregs *sregs) 11086 { 11087 vcpu_load(vcpu); 11088 __get_sregs(vcpu, sregs); 11089 vcpu_put(vcpu); 11090 return 0; 11091 } 11092 11093 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 11094 struct kvm_mp_state *mp_state) 11095 { 11096 int r; 11097 11098 vcpu_load(vcpu); 11099 if (kvm_mpx_supported()) 11100 kvm_load_guest_fpu(vcpu); 11101 11102 r = kvm_apic_accept_events(vcpu); 11103 if (r < 0) 11104 goto out; 11105 r = 0; 11106 11107 if ((vcpu->arch.mp_state == KVM_MP_STATE_HALTED || 11108 vcpu->arch.mp_state == KVM_MP_STATE_AP_RESET_HOLD) && 11109 vcpu->arch.pv.pv_unhalted) 11110 mp_state->mp_state = KVM_MP_STATE_RUNNABLE; 11111 else 11112 mp_state->mp_state = vcpu->arch.mp_state; 11113 11114 out: 11115 if (kvm_mpx_supported()) 11116 kvm_put_guest_fpu(vcpu); 11117 vcpu_put(vcpu); 11118 return r; 11119 } 11120 11121 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 11122 struct kvm_mp_state *mp_state) 11123 { 11124 int ret = -EINVAL; 11125 11126 vcpu_load(vcpu); 11127 11128 if (!lapic_in_kernel(vcpu) && 11129 mp_state->mp_state != KVM_MP_STATE_RUNNABLE) 11130 goto out; 11131 11132 /* 11133 * KVM_MP_STATE_INIT_RECEIVED means the processor is in 11134 * INIT state; latched init should be reported using 11135 * KVM_SET_VCPU_EVENTS, so reject it here. 11136 */ 11137 if ((kvm_vcpu_latch_init(vcpu) || vcpu->arch.smi_pending) && 11138 (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED || 11139 mp_state->mp_state == KVM_MP_STATE_INIT_RECEIVED)) 11140 goto out; 11141 11142 if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) { 11143 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; 11144 set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events); 11145 } else 11146 vcpu->arch.mp_state = mp_state->mp_state; 11147 kvm_make_request(KVM_REQ_EVENT, vcpu); 11148 11149 ret = 0; 11150 out: 11151 vcpu_put(vcpu); 11152 return ret; 11153 } 11154 11155 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, 11156 int reason, bool has_error_code, u32 error_code) 11157 { 11158 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 11159 int ret; 11160 11161 init_emulate_ctxt(vcpu); 11162 11163 ret = emulator_task_switch(ctxt, tss_selector, idt_index, reason, 11164 has_error_code, error_code); 11165 if (ret) { 11166 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 11167 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; 11168 vcpu->run->internal.ndata = 0; 11169 return 0; 11170 } 11171 11172 kvm_rip_write(vcpu, ctxt->eip); 11173 kvm_set_rflags(vcpu, ctxt->eflags); 11174 return 1; 11175 } 11176 EXPORT_SYMBOL_GPL(kvm_task_switch); 11177 11178 static bool kvm_is_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 11179 { 11180 if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) { 11181 /* 11182 * When EFER.LME and CR0.PG are set, the processor is in 11183 * 64-bit mode (though maybe in a 32-bit code segment). 11184 * CR4.PAE and EFER.LMA must be set. 11185 */ 11186 if (!(sregs->cr4 & X86_CR4_PAE) || !(sregs->efer & EFER_LMA)) 11187 return false; 11188 if (kvm_vcpu_is_illegal_gpa(vcpu, sregs->cr3)) 11189 return false; 11190 } else { 11191 /* 11192 * Not in 64-bit mode: EFER.LMA is clear and the code 11193 * segment cannot be 64-bit. 11194 */ 11195 if (sregs->efer & EFER_LMA || sregs->cs.l) 11196 return false; 11197 } 11198 11199 return kvm_is_valid_cr4(vcpu, sregs->cr4); 11200 } 11201 11202 static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs, 11203 int *mmu_reset_needed, bool update_pdptrs) 11204 { 11205 struct msr_data apic_base_msr; 11206 int idx; 11207 struct desc_ptr dt; 11208 11209 if (!kvm_is_valid_sregs(vcpu, sregs)) 11210 return -EINVAL; 11211 11212 apic_base_msr.data = sregs->apic_base; 11213 apic_base_msr.host_initiated = true; 11214 if (kvm_set_apic_base(vcpu, &apic_base_msr)) 11215 return -EINVAL; 11216 11217 if (vcpu->arch.guest_state_protected) 11218 return 0; 11219 11220 dt.size = sregs->idt.limit; 11221 dt.address = sregs->idt.base; 11222 static_call(kvm_x86_set_idt)(vcpu, &dt); 11223 dt.size = sregs->gdt.limit; 11224 dt.address = sregs->gdt.base; 11225 static_call(kvm_x86_set_gdt)(vcpu, &dt); 11226 11227 vcpu->arch.cr2 = sregs->cr2; 11228 *mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; 11229 vcpu->arch.cr3 = sregs->cr3; 11230 kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); 11231 static_call_cond(kvm_x86_post_set_cr3)(vcpu, sregs->cr3); 11232 11233 kvm_set_cr8(vcpu, sregs->cr8); 11234 11235 *mmu_reset_needed |= vcpu->arch.efer != sregs->efer; 11236 static_call(kvm_x86_set_efer)(vcpu, sregs->efer); 11237 11238 *mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0; 11239 static_call(kvm_x86_set_cr0)(vcpu, sregs->cr0); 11240 vcpu->arch.cr0 = sregs->cr0; 11241 11242 *mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; 11243 static_call(kvm_x86_set_cr4)(vcpu, sregs->cr4); 11244 11245 if (update_pdptrs) { 11246 idx = srcu_read_lock(&vcpu->kvm->srcu); 11247 if (is_pae_paging(vcpu)) { 11248 load_pdptrs(vcpu, kvm_read_cr3(vcpu)); 11249 *mmu_reset_needed = 1; 11250 } 11251 srcu_read_unlock(&vcpu->kvm->srcu, idx); 11252 } 11253 11254 kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS); 11255 kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS); 11256 kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES); 11257 kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS); 11258 kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS); 11259 kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS); 11260 11261 kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR); 11262 kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); 11263 11264 update_cr8_intercept(vcpu); 11265 11266 /* Older userspace won't unhalt the vcpu on reset. */ 11267 if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 && 11268 sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 && 11269 !is_protmode(vcpu)) 11270 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 11271 11272 return 0; 11273 } 11274 11275 static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 11276 { 11277 int pending_vec, max_bits; 11278 int mmu_reset_needed = 0; 11279 int ret = __set_sregs_common(vcpu, sregs, &mmu_reset_needed, true); 11280 11281 if (ret) 11282 return ret; 11283 11284 if (mmu_reset_needed) 11285 kvm_mmu_reset_context(vcpu); 11286 11287 max_bits = KVM_NR_INTERRUPTS; 11288 pending_vec = find_first_bit( 11289 (const unsigned long *)sregs->interrupt_bitmap, max_bits); 11290 11291 if (pending_vec < max_bits) { 11292 kvm_queue_interrupt(vcpu, pending_vec, false); 11293 pr_debug("Set back pending irq %d\n", pending_vec); 11294 kvm_make_request(KVM_REQ_EVENT, vcpu); 11295 } 11296 return 0; 11297 } 11298 11299 static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2) 11300 { 11301 int mmu_reset_needed = 0; 11302 bool valid_pdptrs = sregs2->flags & KVM_SREGS2_FLAGS_PDPTRS_VALID; 11303 bool pae = (sregs2->cr0 & X86_CR0_PG) && (sregs2->cr4 & X86_CR4_PAE) && 11304 !(sregs2->efer & EFER_LMA); 11305 int i, ret; 11306 11307 if (sregs2->flags & ~KVM_SREGS2_FLAGS_PDPTRS_VALID) 11308 return -EINVAL; 11309 11310 if (valid_pdptrs && (!pae || vcpu->arch.guest_state_protected)) 11311 return -EINVAL; 11312 11313 ret = __set_sregs_common(vcpu, (struct kvm_sregs *)sregs2, 11314 &mmu_reset_needed, !valid_pdptrs); 11315 if (ret) 11316 return ret; 11317 11318 if (valid_pdptrs) { 11319 for (i = 0; i < 4 ; i++) 11320 kvm_pdptr_write(vcpu, i, sregs2->pdptrs[i]); 11321 11322 kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR); 11323 mmu_reset_needed = 1; 11324 vcpu->arch.pdptrs_from_userspace = true; 11325 } 11326 if (mmu_reset_needed) 11327 kvm_mmu_reset_context(vcpu); 11328 return 0; 11329 } 11330 11331 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 11332 struct kvm_sregs *sregs) 11333 { 11334 int ret; 11335 11336 vcpu_load(vcpu); 11337 ret = __set_sregs(vcpu, sregs); 11338 vcpu_put(vcpu); 11339 return ret; 11340 } 11341 11342 static void kvm_arch_vcpu_guestdbg_update_apicv_inhibit(struct kvm *kvm) 11343 { 11344 bool set = false; 11345 struct kvm_vcpu *vcpu; 11346 unsigned long i; 11347 11348 if (!enable_apicv) 11349 return; 11350 11351 down_write(&kvm->arch.apicv_update_lock); 11352 11353 kvm_for_each_vcpu(i, vcpu, kvm) { 11354 if (vcpu->guest_debug & KVM_GUESTDBG_BLOCKIRQ) { 11355 set = true; 11356 break; 11357 } 11358 } 11359 __kvm_set_or_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_BLOCKIRQ, set); 11360 up_write(&kvm->arch.apicv_update_lock); 11361 } 11362 11363 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 11364 struct kvm_guest_debug *dbg) 11365 { 11366 unsigned long rflags; 11367 int i, r; 11368 11369 if (vcpu->arch.guest_state_protected) 11370 return -EINVAL; 11371 11372 vcpu_load(vcpu); 11373 11374 if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) { 11375 r = -EBUSY; 11376 if (vcpu->arch.exception.pending) 11377 goto out; 11378 if (dbg->control & KVM_GUESTDBG_INJECT_DB) 11379 kvm_queue_exception(vcpu, DB_VECTOR); 11380 else 11381 kvm_queue_exception(vcpu, BP_VECTOR); 11382 } 11383 11384 /* 11385 * Read rflags as long as potentially injected trace flags are still 11386 * filtered out. 11387 */ 11388 rflags = kvm_get_rflags(vcpu); 11389 11390 vcpu->guest_debug = dbg->control; 11391 if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)) 11392 vcpu->guest_debug = 0; 11393 11394 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { 11395 for (i = 0; i < KVM_NR_DB_REGS; ++i) 11396 vcpu->arch.eff_db[i] = dbg->arch.debugreg[i]; 11397 vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7]; 11398 } else { 11399 for (i = 0; i < KVM_NR_DB_REGS; i++) 11400 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; 11401 } 11402 kvm_update_dr7(vcpu); 11403 11404 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) 11405 vcpu->arch.singlestep_rip = kvm_get_linear_rip(vcpu); 11406 11407 /* 11408 * Trigger an rflags update that will inject or remove the trace 11409 * flags. 11410 */ 11411 kvm_set_rflags(vcpu, rflags); 11412 11413 static_call(kvm_x86_update_exception_bitmap)(vcpu); 11414 11415 kvm_arch_vcpu_guestdbg_update_apicv_inhibit(vcpu->kvm); 11416 11417 r = 0; 11418 11419 out: 11420 vcpu_put(vcpu); 11421 return r; 11422 } 11423 11424 /* 11425 * Translate a guest virtual address to a guest physical address. 11426 */ 11427 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 11428 struct kvm_translation *tr) 11429 { 11430 unsigned long vaddr = tr->linear_address; 11431 gpa_t gpa; 11432 int idx; 11433 11434 vcpu_load(vcpu); 11435 11436 idx = srcu_read_lock(&vcpu->kvm->srcu); 11437 gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL); 11438 srcu_read_unlock(&vcpu->kvm->srcu, idx); 11439 tr->physical_address = gpa; 11440 tr->valid = gpa != INVALID_GPA; 11441 tr->writeable = 1; 11442 tr->usermode = 0; 11443 11444 vcpu_put(vcpu); 11445 return 0; 11446 } 11447 11448 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 11449 { 11450 struct fxregs_state *fxsave; 11451 11452 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) 11453 return 0; 11454 11455 vcpu_load(vcpu); 11456 11457 fxsave = &vcpu->arch.guest_fpu.fpstate->regs.fxsave; 11458 memcpy(fpu->fpr, fxsave->st_space, 128); 11459 fpu->fcw = fxsave->cwd; 11460 fpu->fsw = fxsave->swd; 11461 fpu->ftwx = fxsave->twd; 11462 fpu->last_opcode = fxsave->fop; 11463 fpu->last_ip = fxsave->rip; 11464 fpu->last_dp = fxsave->rdp; 11465 memcpy(fpu->xmm, fxsave->xmm_space, sizeof(fxsave->xmm_space)); 11466 11467 vcpu_put(vcpu); 11468 return 0; 11469 } 11470 11471 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 11472 { 11473 struct fxregs_state *fxsave; 11474 11475 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) 11476 return 0; 11477 11478 vcpu_load(vcpu); 11479 11480 fxsave = &vcpu->arch.guest_fpu.fpstate->regs.fxsave; 11481 11482 memcpy(fxsave->st_space, fpu->fpr, 128); 11483 fxsave->cwd = fpu->fcw; 11484 fxsave->swd = fpu->fsw; 11485 fxsave->twd = fpu->ftwx; 11486 fxsave->fop = fpu->last_opcode; 11487 fxsave->rip = fpu->last_ip; 11488 fxsave->rdp = fpu->last_dp; 11489 memcpy(fxsave->xmm_space, fpu->xmm, sizeof(fxsave->xmm_space)); 11490 11491 vcpu_put(vcpu); 11492 return 0; 11493 } 11494 11495 static void store_regs(struct kvm_vcpu *vcpu) 11496 { 11497 BUILD_BUG_ON(sizeof(struct kvm_sync_regs) > SYNC_REGS_SIZE_BYTES); 11498 11499 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_REGS) 11500 __get_regs(vcpu, &vcpu->run->s.regs.regs); 11501 11502 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_SREGS) 11503 __get_sregs(vcpu, &vcpu->run->s.regs.sregs); 11504 11505 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_EVENTS) 11506 kvm_vcpu_ioctl_x86_get_vcpu_events( 11507 vcpu, &vcpu->run->s.regs.events); 11508 } 11509 11510 static int sync_regs(struct kvm_vcpu *vcpu) 11511 { 11512 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_REGS) { 11513 __set_regs(vcpu, &vcpu->run->s.regs.regs); 11514 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_REGS; 11515 } 11516 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_SREGS) { 11517 if (__set_sregs(vcpu, &vcpu->run->s.regs.sregs)) 11518 return -EINVAL; 11519 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_SREGS; 11520 } 11521 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_EVENTS) { 11522 if (kvm_vcpu_ioctl_x86_set_vcpu_events( 11523 vcpu, &vcpu->run->s.regs.events)) 11524 return -EINVAL; 11525 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_EVENTS; 11526 } 11527 11528 return 0; 11529 } 11530 11531 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) 11532 { 11533 if (kvm_check_tsc_unstable() && kvm->created_vcpus) 11534 pr_warn_once("kvm: SMP vm created on host with unstable TSC; " 11535 "guest TSC will not be reliable\n"); 11536 11537 if (!kvm->arch.max_vcpu_ids) 11538 kvm->arch.max_vcpu_ids = KVM_MAX_VCPU_IDS; 11539 11540 if (id >= kvm->arch.max_vcpu_ids) 11541 return -EINVAL; 11542 11543 return static_call(kvm_x86_vcpu_precreate)(kvm); 11544 } 11545 11546 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) 11547 { 11548 struct page *page; 11549 int r; 11550 11551 vcpu->arch.last_vmentry_cpu = -1; 11552 vcpu->arch.regs_avail = ~0; 11553 vcpu->arch.regs_dirty = ~0; 11554 11555 if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu)) 11556 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 11557 else 11558 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; 11559 11560 r = kvm_mmu_create(vcpu); 11561 if (r < 0) 11562 return r; 11563 11564 if (irqchip_in_kernel(vcpu->kvm)) { 11565 r = kvm_create_lapic(vcpu, lapic_timer_advance_ns); 11566 if (r < 0) 11567 goto fail_mmu_destroy; 11568 11569 /* 11570 * Defer evaluating inhibits until the vCPU is first run, as 11571 * this vCPU will not get notified of any changes until this 11572 * vCPU is visible to other vCPUs (marked online and added to 11573 * the set of vCPUs). Opportunistically mark APICv active as 11574 * VMX in particularly is highly unlikely to have inhibits. 11575 * Ignore the current per-VM APICv state so that vCPU creation 11576 * is guaranteed to run with a deterministic value, the request 11577 * will ensure the vCPU gets the correct state before VM-Entry. 11578 */ 11579 if (enable_apicv) { 11580 vcpu->arch.apic->apicv_active = true; 11581 kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu); 11582 } 11583 } else 11584 static_branch_inc(&kvm_has_noapic_vcpu); 11585 11586 r = -ENOMEM; 11587 11588 page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); 11589 if (!page) 11590 goto fail_free_lapic; 11591 vcpu->arch.pio_data = page_address(page); 11592 11593 vcpu->arch.mce_banks = kcalloc(KVM_MAX_MCE_BANKS * 4, sizeof(u64), 11594 GFP_KERNEL_ACCOUNT); 11595 vcpu->arch.mci_ctl2_banks = kcalloc(KVM_MAX_MCE_BANKS, sizeof(u64), 11596 GFP_KERNEL_ACCOUNT); 11597 if (!vcpu->arch.mce_banks || !vcpu->arch.mci_ctl2_banks) 11598 goto fail_free_pio_data; 11599 vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS; 11600 11601 if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, 11602 GFP_KERNEL_ACCOUNT)) 11603 goto fail_free_mce_banks; 11604 11605 if (!alloc_emulate_ctxt(vcpu)) 11606 goto free_wbinvd_dirty_mask; 11607 11608 if (!fpu_alloc_guest_fpstate(&vcpu->arch.guest_fpu)) { 11609 pr_err("kvm: failed to allocate vcpu's fpu\n"); 11610 goto free_emulate_ctxt; 11611 } 11612 11613 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); 11614 vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu); 11615 11616 vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT; 11617 11618 kvm_async_pf_hash_reset(vcpu); 11619 kvm_pmu_init(vcpu); 11620 11621 vcpu->arch.pending_external_vector = -1; 11622 vcpu->arch.preempted_in_kernel = false; 11623 11624 #if IS_ENABLED(CONFIG_HYPERV) 11625 vcpu->arch.hv_root_tdp = INVALID_PAGE; 11626 #endif 11627 11628 r = static_call(kvm_x86_vcpu_create)(vcpu); 11629 if (r) 11630 goto free_guest_fpu; 11631 11632 vcpu->arch.arch_capabilities = kvm_get_arch_capabilities(); 11633 vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT; 11634 kvm_xen_init_vcpu(vcpu); 11635 kvm_vcpu_mtrr_init(vcpu); 11636 vcpu_load(vcpu); 11637 kvm_set_tsc_khz(vcpu, vcpu->kvm->arch.default_tsc_khz); 11638 kvm_vcpu_reset(vcpu, false); 11639 kvm_init_mmu(vcpu); 11640 vcpu_put(vcpu); 11641 return 0; 11642 11643 free_guest_fpu: 11644 fpu_free_guest_fpstate(&vcpu->arch.guest_fpu); 11645 free_emulate_ctxt: 11646 kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt); 11647 free_wbinvd_dirty_mask: 11648 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); 11649 fail_free_mce_banks: 11650 kfree(vcpu->arch.mce_banks); 11651 kfree(vcpu->arch.mci_ctl2_banks); 11652 fail_free_pio_data: 11653 free_page((unsigned long)vcpu->arch.pio_data); 11654 fail_free_lapic: 11655 kvm_free_lapic(vcpu); 11656 fail_mmu_destroy: 11657 kvm_mmu_destroy(vcpu); 11658 return r; 11659 } 11660 11661 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 11662 { 11663 struct kvm *kvm = vcpu->kvm; 11664 11665 if (mutex_lock_killable(&vcpu->mutex)) 11666 return; 11667 vcpu_load(vcpu); 11668 kvm_synchronize_tsc(vcpu, 0); 11669 vcpu_put(vcpu); 11670 11671 /* poll control enabled by default */ 11672 vcpu->arch.msr_kvm_poll_control = 1; 11673 11674 mutex_unlock(&vcpu->mutex); 11675 11676 if (kvmclock_periodic_sync && vcpu->vcpu_idx == 0) 11677 schedule_delayed_work(&kvm->arch.kvmclock_sync_work, 11678 KVMCLOCK_SYNC_PERIOD); 11679 } 11680 11681 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 11682 { 11683 int idx; 11684 11685 kvmclock_reset(vcpu); 11686 11687 static_call(kvm_x86_vcpu_free)(vcpu); 11688 11689 kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt); 11690 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); 11691 fpu_free_guest_fpstate(&vcpu->arch.guest_fpu); 11692 11693 kvm_xen_destroy_vcpu(vcpu); 11694 kvm_hv_vcpu_uninit(vcpu); 11695 kvm_pmu_destroy(vcpu); 11696 kfree(vcpu->arch.mce_banks); 11697 kfree(vcpu->arch.mci_ctl2_banks); 11698 kvm_free_lapic(vcpu); 11699 idx = srcu_read_lock(&vcpu->kvm->srcu); 11700 kvm_mmu_destroy(vcpu); 11701 srcu_read_unlock(&vcpu->kvm->srcu, idx); 11702 free_page((unsigned long)vcpu->arch.pio_data); 11703 kvfree(vcpu->arch.cpuid_entries); 11704 if (!lapic_in_kernel(vcpu)) 11705 static_branch_dec(&kvm_has_noapic_vcpu); 11706 } 11707 11708 void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) 11709 { 11710 struct kvm_cpuid_entry2 *cpuid_0x1; 11711 unsigned long old_cr0 = kvm_read_cr0(vcpu); 11712 unsigned long new_cr0; 11713 11714 /* 11715 * Several of the "set" flows, e.g. ->set_cr0(), read other registers 11716 * to handle side effects. RESET emulation hits those flows and relies 11717 * on emulated/virtualized registers, including those that are loaded 11718 * into hardware, to be zeroed at vCPU creation. Use CRs as a sentinel 11719 * to detect improper or missing initialization. 11720 */ 11721 WARN_ON_ONCE(!init_event && 11722 (old_cr0 || kvm_read_cr3(vcpu) || kvm_read_cr4(vcpu))); 11723 11724 kvm_lapic_reset(vcpu, init_event); 11725 11726 vcpu->arch.hflags = 0; 11727 11728 vcpu->arch.smi_pending = 0; 11729 vcpu->arch.smi_count = 0; 11730 atomic_set(&vcpu->arch.nmi_queued, 0); 11731 vcpu->arch.nmi_pending = 0; 11732 vcpu->arch.nmi_injected = false; 11733 kvm_clear_interrupt_queue(vcpu); 11734 kvm_clear_exception_queue(vcpu); 11735 11736 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); 11737 kvm_update_dr0123(vcpu); 11738 vcpu->arch.dr6 = DR6_ACTIVE_LOW; 11739 vcpu->arch.dr7 = DR7_FIXED_1; 11740 kvm_update_dr7(vcpu); 11741 11742 vcpu->arch.cr2 = 0; 11743 11744 kvm_make_request(KVM_REQ_EVENT, vcpu); 11745 vcpu->arch.apf.msr_en_val = 0; 11746 vcpu->arch.apf.msr_int_val = 0; 11747 vcpu->arch.st.msr_val = 0; 11748 11749 kvmclock_reset(vcpu); 11750 11751 kvm_clear_async_pf_completion_queue(vcpu); 11752 kvm_async_pf_hash_reset(vcpu); 11753 vcpu->arch.apf.halted = false; 11754 11755 if (vcpu->arch.guest_fpu.fpstate && kvm_mpx_supported()) { 11756 struct fpstate *fpstate = vcpu->arch.guest_fpu.fpstate; 11757 11758 /* 11759 * To avoid have the INIT path from kvm_apic_has_events() that be 11760 * called with loaded FPU and does not let userspace fix the state. 11761 */ 11762 if (init_event) 11763 kvm_put_guest_fpu(vcpu); 11764 11765 fpstate_clear_xstate_component(fpstate, XFEATURE_BNDREGS); 11766 fpstate_clear_xstate_component(fpstate, XFEATURE_BNDCSR); 11767 11768 if (init_event) 11769 kvm_load_guest_fpu(vcpu); 11770 } 11771 11772 if (!init_event) { 11773 kvm_pmu_reset(vcpu); 11774 vcpu->arch.smbase = 0x30000; 11775 11776 vcpu->arch.msr_misc_features_enables = 0; 11777 vcpu->arch.ia32_misc_enable_msr = MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL | 11778 MSR_IA32_MISC_ENABLE_BTS_UNAVAIL; 11779 11780 __kvm_set_xcr(vcpu, 0, XFEATURE_MASK_FP); 11781 __kvm_set_msr(vcpu, MSR_IA32_XSS, 0, true); 11782 } 11783 11784 /* All GPRs except RDX (handled below) are zeroed on RESET/INIT. */ 11785 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); 11786 kvm_register_mark_dirty(vcpu, VCPU_REGS_RSP); 11787 11788 /* 11789 * Fall back to KVM's default Family/Model/Stepping of 0x600 (P6/Athlon) 11790 * if no CPUID match is found. Note, it's impossible to get a match at 11791 * RESET since KVM emulates RESET before exposing the vCPU to userspace, 11792 * i.e. it's impossible for kvm_find_cpuid_entry() to find a valid entry 11793 * on RESET. But, go through the motions in case that's ever remedied. 11794 */ 11795 cpuid_0x1 = kvm_find_cpuid_entry(vcpu, 1); 11796 kvm_rdx_write(vcpu, cpuid_0x1 ? cpuid_0x1->eax : 0x600); 11797 11798 static_call(kvm_x86_vcpu_reset)(vcpu, init_event); 11799 11800 kvm_set_rflags(vcpu, X86_EFLAGS_FIXED); 11801 kvm_rip_write(vcpu, 0xfff0); 11802 11803 vcpu->arch.cr3 = 0; 11804 kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); 11805 11806 /* 11807 * CR0.CD/NW are set on RESET, preserved on INIT. Note, some versions 11808 * of Intel's SDM list CD/NW as being set on INIT, but they contradict 11809 * (or qualify) that with a footnote stating that CD/NW are preserved. 11810 */ 11811 new_cr0 = X86_CR0_ET; 11812 if (init_event) 11813 new_cr0 |= (old_cr0 & (X86_CR0_NW | X86_CR0_CD)); 11814 else 11815 new_cr0 |= X86_CR0_NW | X86_CR0_CD; 11816 11817 static_call(kvm_x86_set_cr0)(vcpu, new_cr0); 11818 static_call(kvm_x86_set_cr4)(vcpu, 0); 11819 static_call(kvm_x86_set_efer)(vcpu, 0); 11820 static_call(kvm_x86_update_exception_bitmap)(vcpu); 11821 11822 /* 11823 * On the standard CR0/CR4/EFER modification paths, there are several 11824 * complex conditions determining whether the MMU has to be reset and/or 11825 * which PCIDs have to be flushed. However, CR0.WP and the paging-related 11826 * bits in CR4 and EFER are irrelevant if CR0.PG was '0'; and a reset+flush 11827 * is needed anyway if CR0.PG was '1' (which can only happen for INIT, as 11828 * CR0 will be '0' prior to RESET). So we only need to check CR0.PG here. 11829 */ 11830 if (old_cr0 & X86_CR0_PG) { 11831 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 11832 kvm_mmu_reset_context(vcpu); 11833 } 11834 11835 /* 11836 * Intel's SDM states that all TLB entries are flushed on INIT. AMD's 11837 * APM states the TLBs are untouched by INIT, but it also states that 11838 * the TLBs are flushed on "External initialization of the processor." 11839 * Flush the guest TLB regardless of vendor, there is no meaningful 11840 * benefit in relying on the guest to flush the TLB immediately after 11841 * INIT. A spurious TLB flush is benign and likely negligible from a 11842 * performance perspective. 11843 */ 11844 if (init_event) 11845 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 11846 } 11847 EXPORT_SYMBOL_GPL(kvm_vcpu_reset); 11848 11849 void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) 11850 { 11851 struct kvm_segment cs; 11852 11853 kvm_get_segment(vcpu, &cs, VCPU_SREG_CS); 11854 cs.selector = vector << 8; 11855 cs.base = vector << 12; 11856 kvm_set_segment(vcpu, &cs, VCPU_SREG_CS); 11857 kvm_rip_write(vcpu, 0); 11858 } 11859 EXPORT_SYMBOL_GPL(kvm_vcpu_deliver_sipi_vector); 11860 11861 int kvm_arch_hardware_enable(void) 11862 { 11863 struct kvm *kvm; 11864 struct kvm_vcpu *vcpu; 11865 unsigned long i; 11866 int ret; 11867 u64 local_tsc; 11868 u64 max_tsc = 0; 11869 bool stable, backwards_tsc = false; 11870 11871 kvm_user_return_msr_cpu_online(); 11872 ret = static_call(kvm_x86_hardware_enable)(); 11873 if (ret != 0) 11874 return ret; 11875 11876 local_tsc = rdtsc(); 11877 stable = !kvm_check_tsc_unstable(); 11878 list_for_each_entry(kvm, &vm_list, vm_list) { 11879 kvm_for_each_vcpu(i, vcpu, kvm) { 11880 if (!stable && vcpu->cpu == smp_processor_id()) 11881 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 11882 if (stable && vcpu->arch.last_host_tsc > local_tsc) { 11883 backwards_tsc = true; 11884 if (vcpu->arch.last_host_tsc > max_tsc) 11885 max_tsc = vcpu->arch.last_host_tsc; 11886 } 11887 } 11888 } 11889 11890 /* 11891 * Sometimes, even reliable TSCs go backwards. This happens on 11892 * platforms that reset TSC during suspend or hibernate actions, but 11893 * maintain synchronization. We must compensate. Fortunately, we can 11894 * detect that condition here, which happens early in CPU bringup, 11895 * before any KVM threads can be running. Unfortunately, we can't 11896 * bring the TSCs fully up to date with real time, as we aren't yet far 11897 * enough into CPU bringup that we know how much real time has actually 11898 * elapsed; our helper function, ktime_get_boottime_ns() will be using boot 11899 * variables that haven't been updated yet. 11900 * 11901 * So we simply find the maximum observed TSC above, then record the 11902 * adjustment to TSC in each VCPU. When the VCPU later gets loaded, 11903 * the adjustment will be applied. Note that we accumulate 11904 * adjustments, in case multiple suspend cycles happen before some VCPU 11905 * gets a chance to run again. In the event that no KVM threads get a 11906 * chance to run, we will miss the entire elapsed period, as we'll have 11907 * reset last_host_tsc, so VCPUs will not have the TSC adjusted and may 11908 * loose cycle time. This isn't too big a deal, since the loss will be 11909 * uniform across all VCPUs (not to mention the scenario is extremely 11910 * unlikely). It is possible that a second hibernate recovery happens 11911 * much faster than a first, causing the observed TSC here to be 11912 * smaller; this would require additional padding adjustment, which is 11913 * why we set last_host_tsc to the local tsc observed here. 11914 * 11915 * N.B. - this code below runs only on platforms with reliable TSC, 11916 * as that is the only way backwards_tsc is set above. Also note 11917 * that this runs for ALL vcpus, which is not a bug; all VCPUs should 11918 * have the same delta_cyc adjustment applied if backwards_tsc 11919 * is detected. Note further, this adjustment is only done once, 11920 * as we reset last_host_tsc on all VCPUs to stop this from being 11921 * called multiple times (one for each physical CPU bringup). 11922 * 11923 * Platforms with unreliable TSCs don't have to deal with this, they 11924 * will be compensated by the logic in vcpu_load, which sets the TSC to 11925 * catchup mode. This will catchup all VCPUs to real time, but cannot 11926 * guarantee that they stay in perfect synchronization. 11927 */ 11928 if (backwards_tsc) { 11929 u64 delta_cyc = max_tsc - local_tsc; 11930 list_for_each_entry(kvm, &vm_list, vm_list) { 11931 kvm->arch.backwards_tsc_observed = true; 11932 kvm_for_each_vcpu(i, vcpu, kvm) { 11933 vcpu->arch.tsc_offset_adjustment += delta_cyc; 11934 vcpu->arch.last_host_tsc = local_tsc; 11935 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); 11936 } 11937 11938 /* 11939 * We have to disable TSC offset matching.. if you were 11940 * booting a VM while issuing an S4 host suspend.... 11941 * you may have some problem. Solving this issue is 11942 * left as an exercise to the reader. 11943 */ 11944 kvm->arch.last_tsc_nsec = 0; 11945 kvm->arch.last_tsc_write = 0; 11946 } 11947 11948 } 11949 return 0; 11950 } 11951 11952 void kvm_arch_hardware_disable(void) 11953 { 11954 static_call(kvm_x86_hardware_disable)(); 11955 drop_user_return_notifiers(); 11956 } 11957 11958 static inline void kvm_ops_update(struct kvm_x86_init_ops *ops) 11959 { 11960 memcpy(&kvm_x86_ops, ops->runtime_ops, sizeof(kvm_x86_ops)); 11961 11962 #define __KVM_X86_OP(func) \ 11963 static_call_update(kvm_x86_##func, kvm_x86_ops.func); 11964 #define KVM_X86_OP(func) \ 11965 WARN_ON(!kvm_x86_ops.func); __KVM_X86_OP(func) 11966 #define KVM_X86_OP_OPTIONAL __KVM_X86_OP 11967 #define KVM_X86_OP_OPTIONAL_RET0(func) \ 11968 static_call_update(kvm_x86_##func, (void *)kvm_x86_ops.func ? : \ 11969 (void *)__static_call_return0); 11970 #include <asm/kvm-x86-ops.h> 11971 #undef __KVM_X86_OP 11972 11973 kvm_pmu_ops_update(ops->pmu_ops); 11974 } 11975 11976 int kvm_arch_hardware_setup(void *opaque) 11977 { 11978 struct kvm_x86_init_ops *ops = opaque; 11979 int r; 11980 11981 rdmsrl_safe(MSR_EFER, &host_efer); 11982 11983 if (boot_cpu_has(X86_FEATURE_XSAVES)) 11984 rdmsrl(MSR_IA32_XSS, host_xss); 11985 11986 kvm_init_pmu_capability(); 11987 11988 r = ops->hardware_setup(); 11989 if (r != 0) 11990 return r; 11991 11992 kvm_ops_update(ops); 11993 11994 kvm_register_perf_callbacks(ops->handle_intel_pt_intr); 11995 11996 if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES)) 11997 kvm_caps.supported_xss = 0; 11998 11999 #define __kvm_cpu_cap_has(UNUSED_, f) kvm_cpu_cap_has(f) 12000 cr4_reserved_bits = __cr4_reserved_bits(__kvm_cpu_cap_has, UNUSED_); 12001 #undef __kvm_cpu_cap_has 12002 12003 if (kvm_caps.has_tsc_control) { 12004 /* 12005 * Make sure the user can only configure tsc_khz values that 12006 * fit into a signed integer. 12007 * A min value is not calculated because it will always 12008 * be 1 on all machines. 12009 */ 12010 u64 max = min(0x7fffffffULL, 12011 __scale_tsc(kvm_caps.max_tsc_scaling_ratio, tsc_khz)); 12012 kvm_caps.max_guest_tsc_khz = max; 12013 } 12014 kvm_caps.default_tsc_scaling_ratio = 1ULL << kvm_caps.tsc_scaling_ratio_frac_bits; 12015 kvm_init_msr_list(); 12016 return 0; 12017 } 12018 12019 void kvm_arch_hardware_unsetup(void) 12020 { 12021 kvm_unregister_perf_callbacks(); 12022 12023 static_call(kvm_x86_hardware_unsetup)(); 12024 } 12025 12026 int kvm_arch_check_processor_compat(void *opaque) 12027 { 12028 struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); 12029 struct kvm_x86_init_ops *ops = opaque; 12030 12031 WARN_ON(!irqs_disabled()); 12032 12033 if (__cr4_reserved_bits(cpu_has, c) != 12034 __cr4_reserved_bits(cpu_has, &boot_cpu_data)) 12035 return -EIO; 12036 12037 return ops->check_processor_compatibility(); 12038 } 12039 12040 bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu) 12041 { 12042 return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id; 12043 } 12044 EXPORT_SYMBOL_GPL(kvm_vcpu_is_reset_bsp); 12045 12046 bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu) 12047 { 12048 return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0; 12049 } 12050 12051 __read_mostly DEFINE_STATIC_KEY_FALSE(kvm_has_noapic_vcpu); 12052 EXPORT_SYMBOL_GPL(kvm_has_noapic_vcpu); 12053 12054 void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) 12055 { 12056 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 12057 12058 vcpu->arch.l1tf_flush_l1d = true; 12059 if (pmu->version && unlikely(pmu->event_count)) { 12060 pmu->need_cleanup = true; 12061 kvm_make_request(KVM_REQ_PMU, vcpu); 12062 } 12063 static_call(kvm_x86_sched_in)(vcpu, cpu); 12064 } 12065 12066 void kvm_arch_free_vm(struct kvm *kvm) 12067 { 12068 kfree(to_kvm_hv(kvm)->hv_pa_pg); 12069 __kvm_arch_free_vm(kvm); 12070 } 12071 12072 12073 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 12074 { 12075 int ret; 12076 unsigned long flags; 12077 12078 if (type) 12079 return -EINVAL; 12080 12081 ret = kvm_page_track_init(kvm); 12082 if (ret) 12083 goto out; 12084 12085 ret = kvm_mmu_init_vm(kvm); 12086 if (ret) 12087 goto out_page_track; 12088 12089 ret = static_call(kvm_x86_vm_init)(kvm); 12090 if (ret) 12091 goto out_uninit_mmu; 12092 12093 INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list); 12094 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); 12095 atomic_set(&kvm->arch.noncoherent_dma_count, 0); 12096 12097 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ 12098 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); 12099 /* Reserve bit 1 of irq_sources_bitmap for irqfd-resampler */ 12100 set_bit(KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, 12101 &kvm->arch.irq_sources_bitmap); 12102 12103 raw_spin_lock_init(&kvm->arch.tsc_write_lock); 12104 mutex_init(&kvm->arch.apic_map_lock); 12105 seqcount_raw_spinlock_init(&kvm->arch.pvclock_sc, &kvm->arch.tsc_write_lock); 12106 kvm->arch.kvmclock_offset = -get_kvmclock_base_ns(); 12107 12108 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); 12109 pvclock_update_vm_gtod_copy(kvm); 12110 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); 12111 12112 kvm->arch.default_tsc_khz = max_tsc_khz ? : tsc_khz; 12113 kvm->arch.guest_can_read_msr_platform_info = true; 12114 kvm->arch.enable_pmu = enable_pmu; 12115 12116 #if IS_ENABLED(CONFIG_HYPERV) 12117 spin_lock_init(&kvm->arch.hv_root_tdp_lock); 12118 kvm->arch.hv_root_tdp = INVALID_PAGE; 12119 #endif 12120 12121 INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn); 12122 INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn); 12123 12124 kvm_apicv_init(kvm); 12125 kvm_hv_init_vm(kvm); 12126 kvm_xen_init_vm(kvm); 12127 12128 return 0; 12129 12130 out_uninit_mmu: 12131 kvm_mmu_uninit_vm(kvm); 12132 out_page_track: 12133 kvm_page_track_cleanup(kvm); 12134 out: 12135 return ret; 12136 } 12137 12138 int kvm_arch_post_init_vm(struct kvm *kvm) 12139 { 12140 return kvm_mmu_post_init_vm(kvm); 12141 } 12142 12143 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu) 12144 { 12145 vcpu_load(vcpu); 12146 kvm_mmu_unload(vcpu); 12147 vcpu_put(vcpu); 12148 } 12149 12150 static void kvm_unload_vcpu_mmus(struct kvm *kvm) 12151 { 12152 unsigned long i; 12153 struct kvm_vcpu *vcpu; 12154 12155 kvm_for_each_vcpu(i, vcpu, kvm) { 12156 kvm_clear_async_pf_completion_queue(vcpu); 12157 kvm_unload_vcpu_mmu(vcpu); 12158 } 12159 } 12160 12161 void kvm_arch_sync_events(struct kvm *kvm) 12162 { 12163 cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work); 12164 cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work); 12165 kvm_free_pit(kvm); 12166 } 12167 12168 /** 12169 * __x86_set_memory_region: Setup KVM internal memory slot 12170 * 12171 * @kvm: the kvm pointer to the VM. 12172 * @id: the slot ID to setup. 12173 * @gpa: the GPA to install the slot (unused when @size == 0). 12174 * @size: the size of the slot. Set to zero to uninstall a slot. 12175 * 12176 * This function helps to setup a KVM internal memory slot. Specify 12177 * @size > 0 to install a new slot, while @size == 0 to uninstall a 12178 * slot. The return code can be one of the following: 12179 * 12180 * HVA: on success (uninstall will return a bogus HVA) 12181 * -errno: on error 12182 * 12183 * The caller should always use IS_ERR() to check the return value 12184 * before use. Note, the KVM internal memory slots are guaranteed to 12185 * remain valid and unchanged until the VM is destroyed, i.e., the 12186 * GPA->HVA translation will not change. However, the HVA is a user 12187 * address, i.e. its accessibility is not guaranteed, and must be 12188 * accessed via __copy_{to,from}_user(). 12189 */ 12190 void __user * __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, 12191 u32 size) 12192 { 12193 int i, r; 12194 unsigned long hva, old_npages; 12195 struct kvm_memslots *slots = kvm_memslots(kvm); 12196 struct kvm_memory_slot *slot; 12197 12198 /* Called with kvm->slots_lock held. */ 12199 if (WARN_ON(id >= KVM_MEM_SLOTS_NUM)) 12200 return ERR_PTR_USR(-EINVAL); 12201 12202 slot = id_to_memslot(slots, id); 12203 if (size) { 12204 if (slot && slot->npages) 12205 return ERR_PTR_USR(-EEXIST); 12206 12207 /* 12208 * MAP_SHARED to prevent internal slot pages from being moved 12209 * by fork()/COW. 12210 */ 12211 hva = vm_mmap(NULL, 0, size, PROT_READ | PROT_WRITE, 12212 MAP_SHARED | MAP_ANONYMOUS, 0); 12213 if (IS_ERR((void *)hva)) 12214 return (void __user *)hva; 12215 } else { 12216 if (!slot || !slot->npages) 12217 return NULL; 12218 12219 old_npages = slot->npages; 12220 hva = slot->userspace_addr; 12221 } 12222 12223 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 12224 struct kvm_userspace_memory_region m; 12225 12226 m.slot = id | (i << 16); 12227 m.flags = 0; 12228 m.guest_phys_addr = gpa; 12229 m.userspace_addr = hva; 12230 m.memory_size = size; 12231 r = __kvm_set_memory_region(kvm, &m); 12232 if (r < 0) 12233 return ERR_PTR_USR(r); 12234 } 12235 12236 if (!size) 12237 vm_munmap(hva, old_npages * PAGE_SIZE); 12238 12239 return (void __user *)hva; 12240 } 12241 EXPORT_SYMBOL_GPL(__x86_set_memory_region); 12242 12243 void kvm_arch_pre_destroy_vm(struct kvm *kvm) 12244 { 12245 kvm_mmu_pre_destroy_vm(kvm); 12246 } 12247 12248 void kvm_arch_destroy_vm(struct kvm *kvm) 12249 { 12250 if (current->mm == kvm->mm) { 12251 /* 12252 * Free memory regions allocated on behalf of userspace, 12253 * unless the memory map has changed due to process exit 12254 * or fd copying. 12255 */ 12256 mutex_lock(&kvm->slots_lock); 12257 __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 12258 0, 0); 12259 __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 12260 0, 0); 12261 __x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0); 12262 mutex_unlock(&kvm->slots_lock); 12263 } 12264 kvm_unload_vcpu_mmus(kvm); 12265 static_call_cond(kvm_x86_vm_destroy)(kvm); 12266 kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1)); 12267 kvm_pic_destroy(kvm); 12268 kvm_ioapic_destroy(kvm); 12269 kvm_destroy_vcpus(kvm); 12270 kvfree(rcu_dereference_check(kvm->arch.apic_map, 1)); 12271 kfree(srcu_dereference_check(kvm->arch.pmu_event_filter, &kvm->srcu, 1)); 12272 kvm_mmu_uninit_vm(kvm); 12273 kvm_page_track_cleanup(kvm); 12274 kvm_xen_destroy_vm(kvm); 12275 kvm_hv_destroy_vm(kvm); 12276 } 12277 12278 static void memslot_rmap_free(struct kvm_memory_slot *slot) 12279 { 12280 int i; 12281 12282 for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { 12283 kvfree(slot->arch.rmap[i]); 12284 slot->arch.rmap[i] = NULL; 12285 } 12286 } 12287 12288 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) 12289 { 12290 int i; 12291 12292 memslot_rmap_free(slot); 12293 12294 for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) { 12295 kvfree(slot->arch.lpage_info[i - 1]); 12296 slot->arch.lpage_info[i - 1] = NULL; 12297 } 12298 12299 kvm_page_track_free_memslot(slot); 12300 } 12301 12302 int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages) 12303 { 12304 const int sz = sizeof(*slot->arch.rmap[0]); 12305 int i; 12306 12307 for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { 12308 int level = i + 1; 12309 int lpages = __kvm_mmu_slot_lpages(slot, npages, level); 12310 12311 if (slot->arch.rmap[i]) 12312 continue; 12313 12314 slot->arch.rmap[i] = __vcalloc(lpages, sz, GFP_KERNEL_ACCOUNT); 12315 if (!slot->arch.rmap[i]) { 12316 memslot_rmap_free(slot); 12317 return -ENOMEM; 12318 } 12319 } 12320 12321 return 0; 12322 } 12323 12324 static int kvm_alloc_memslot_metadata(struct kvm *kvm, 12325 struct kvm_memory_slot *slot) 12326 { 12327 unsigned long npages = slot->npages; 12328 int i, r; 12329 12330 /* 12331 * Clear out the previous array pointers for the KVM_MR_MOVE case. The 12332 * old arrays will be freed by __kvm_set_memory_region() if installing 12333 * the new memslot is successful. 12334 */ 12335 memset(&slot->arch, 0, sizeof(slot->arch)); 12336 12337 if (kvm_memslots_have_rmaps(kvm)) { 12338 r = memslot_rmap_alloc(slot, npages); 12339 if (r) 12340 return r; 12341 } 12342 12343 for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) { 12344 struct kvm_lpage_info *linfo; 12345 unsigned long ugfn; 12346 int lpages; 12347 int level = i + 1; 12348 12349 lpages = __kvm_mmu_slot_lpages(slot, npages, level); 12350 12351 linfo = __vcalloc(lpages, sizeof(*linfo), GFP_KERNEL_ACCOUNT); 12352 if (!linfo) 12353 goto out_free; 12354 12355 slot->arch.lpage_info[i - 1] = linfo; 12356 12357 if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1)) 12358 linfo[0].disallow_lpage = 1; 12359 if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1)) 12360 linfo[lpages - 1].disallow_lpage = 1; 12361 ugfn = slot->userspace_addr >> PAGE_SHIFT; 12362 /* 12363 * If the gfn and userspace address are not aligned wrt each 12364 * other, disable large page support for this slot. 12365 */ 12366 if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1)) { 12367 unsigned long j; 12368 12369 for (j = 0; j < lpages; ++j) 12370 linfo[j].disallow_lpage = 1; 12371 } 12372 } 12373 12374 if (kvm_page_track_create_memslot(kvm, slot, npages)) 12375 goto out_free; 12376 12377 return 0; 12378 12379 out_free: 12380 memslot_rmap_free(slot); 12381 12382 for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) { 12383 kvfree(slot->arch.lpage_info[i - 1]); 12384 slot->arch.lpage_info[i - 1] = NULL; 12385 } 12386 return -ENOMEM; 12387 } 12388 12389 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) 12390 { 12391 struct kvm_vcpu *vcpu; 12392 unsigned long i; 12393 12394 /* 12395 * memslots->generation has been incremented. 12396 * mmio generation may have reached its maximum value. 12397 */ 12398 kvm_mmu_invalidate_mmio_sptes(kvm, gen); 12399 12400 /* Force re-initialization of steal_time cache */ 12401 kvm_for_each_vcpu(i, vcpu, kvm) 12402 kvm_vcpu_kick(vcpu); 12403 } 12404 12405 int kvm_arch_prepare_memory_region(struct kvm *kvm, 12406 const struct kvm_memory_slot *old, 12407 struct kvm_memory_slot *new, 12408 enum kvm_mr_change change) 12409 { 12410 if (change == KVM_MR_CREATE || change == KVM_MR_MOVE) { 12411 if ((new->base_gfn + new->npages - 1) > kvm_mmu_max_gfn()) 12412 return -EINVAL; 12413 12414 return kvm_alloc_memslot_metadata(kvm, new); 12415 } 12416 12417 if (change == KVM_MR_FLAGS_ONLY) 12418 memcpy(&new->arch, &old->arch, sizeof(old->arch)); 12419 else if (WARN_ON_ONCE(change != KVM_MR_DELETE)) 12420 return -EIO; 12421 12422 return 0; 12423 } 12424 12425 12426 static void kvm_mmu_update_cpu_dirty_logging(struct kvm *kvm, bool enable) 12427 { 12428 struct kvm_arch *ka = &kvm->arch; 12429 12430 if (!kvm_x86_ops.cpu_dirty_log_size) 12431 return; 12432 12433 if ((enable && ++ka->cpu_dirty_logging_count == 1) || 12434 (!enable && --ka->cpu_dirty_logging_count == 0)) 12435 kvm_make_all_cpus_request(kvm, KVM_REQ_UPDATE_CPU_DIRTY_LOGGING); 12436 12437 WARN_ON_ONCE(ka->cpu_dirty_logging_count < 0); 12438 } 12439 12440 static void kvm_mmu_slot_apply_flags(struct kvm *kvm, 12441 struct kvm_memory_slot *old, 12442 const struct kvm_memory_slot *new, 12443 enum kvm_mr_change change) 12444 { 12445 u32 old_flags = old ? old->flags : 0; 12446 u32 new_flags = new ? new->flags : 0; 12447 bool log_dirty_pages = new_flags & KVM_MEM_LOG_DIRTY_PAGES; 12448 12449 /* 12450 * Update CPU dirty logging if dirty logging is being toggled. This 12451 * applies to all operations. 12452 */ 12453 if ((old_flags ^ new_flags) & KVM_MEM_LOG_DIRTY_PAGES) 12454 kvm_mmu_update_cpu_dirty_logging(kvm, log_dirty_pages); 12455 12456 /* 12457 * Nothing more to do for RO slots (which can't be dirtied and can't be 12458 * made writable) or CREATE/MOVE/DELETE of a slot. 12459 * 12460 * For a memslot with dirty logging disabled: 12461 * CREATE: No dirty mappings will already exist. 12462 * MOVE/DELETE: The old mappings will already have been cleaned up by 12463 * kvm_arch_flush_shadow_memslot() 12464 * 12465 * For a memslot with dirty logging enabled: 12466 * CREATE: No shadow pages exist, thus nothing to write-protect 12467 * and no dirty bits to clear. 12468 * MOVE/DELETE: The old mappings will already have been cleaned up by 12469 * kvm_arch_flush_shadow_memslot(). 12470 */ 12471 if ((change != KVM_MR_FLAGS_ONLY) || (new_flags & KVM_MEM_READONLY)) 12472 return; 12473 12474 /* 12475 * READONLY and non-flags changes were filtered out above, and the only 12476 * other flag is LOG_DIRTY_PAGES, i.e. something is wrong if dirty 12477 * logging isn't being toggled on or off. 12478 */ 12479 if (WARN_ON_ONCE(!((old_flags ^ new_flags) & KVM_MEM_LOG_DIRTY_PAGES))) 12480 return; 12481 12482 if (!log_dirty_pages) { 12483 /* 12484 * Dirty logging tracks sptes in 4k granularity, meaning that 12485 * large sptes have to be split. If live migration succeeds, 12486 * the guest in the source machine will be destroyed and large 12487 * sptes will be created in the destination. However, if the 12488 * guest continues to run in the source machine (for example if 12489 * live migration fails), small sptes will remain around and 12490 * cause bad performance. 12491 * 12492 * Scan sptes if dirty logging has been stopped, dropping those 12493 * which can be collapsed into a single large-page spte. Later 12494 * page faults will create the large-page sptes. 12495 */ 12496 kvm_mmu_zap_collapsible_sptes(kvm, new); 12497 } else { 12498 /* 12499 * Initially-all-set does not require write protecting any page, 12500 * because they're all assumed to be dirty. 12501 */ 12502 if (kvm_dirty_log_manual_protect_and_init_set(kvm)) 12503 return; 12504 12505 if (READ_ONCE(eager_page_split)) 12506 kvm_mmu_slot_try_split_huge_pages(kvm, new, PG_LEVEL_4K); 12507 12508 if (kvm_x86_ops.cpu_dirty_log_size) { 12509 kvm_mmu_slot_leaf_clear_dirty(kvm, new); 12510 kvm_mmu_slot_remove_write_access(kvm, new, PG_LEVEL_2M); 12511 } else { 12512 kvm_mmu_slot_remove_write_access(kvm, new, PG_LEVEL_4K); 12513 } 12514 12515 /* 12516 * Unconditionally flush the TLBs after enabling dirty logging. 12517 * A flush is almost always going to be necessary (see below), 12518 * and unconditionally flushing allows the helpers to omit 12519 * the subtly complex checks when removing write access. 12520 * 12521 * Do the flush outside of mmu_lock to reduce the amount of 12522 * time mmu_lock is held. Flushing after dropping mmu_lock is 12523 * safe as KVM only needs to guarantee the slot is fully 12524 * write-protected before returning to userspace, i.e. before 12525 * userspace can consume the dirty status. 12526 * 12527 * Flushing outside of mmu_lock requires KVM to be careful when 12528 * making decisions based on writable status of an SPTE, e.g. a 12529 * !writable SPTE doesn't guarantee a CPU can't perform writes. 12530 * 12531 * Specifically, KVM also write-protects guest page tables to 12532 * monitor changes when using shadow paging, and must guarantee 12533 * no CPUs can write to those page before mmu_lock is dropped. 12534 * Because CPUs may have stale TLB entries at this point, a 12535 * !writable SPTE doesn't guarantee CPUs can't perform writes. 12536 * 12537 * KVM also allows making SPTES writable outside of mmu_lock, 12538 * e.g. to allow dirty logging without taking mmu_lock. 12539 * 12540 * To handle these scenarios, KVM uses a separate software-only 12541 * bit (MMU-writable) to track if a SPTE is !writable due to 12542 * a guest page table being write-protected (KVM clears the 12543 * MMU-writable flag when write-protecting for shadow paging). 12544 * 12545 * The use of MMU-writable is also the primary motivation for 12546 * the unconditional flush. Because KVM must guarantee that a 12547 * CPU doesn't contain stale, writable TLB entries for a 12548 * !MMU-writable SPTE, KVM must flush if it encounters any 12549 * MMU-writable SPTE regardless of whether the actual hardware 12550 * writable bit was set. I.e. KVM is almost guaranteed to need 12551 * to flush, while unconditionally flushing allows the "remove 12552 * write access" helpers to ignore MMU-writable entirely. 12553 * 12554 * See is_writable_pte() for more details (the case involving 12555 * access-tracked SPTEs is particularly relevant). 12556 */ 12557 kvm_arch_flush_remote_tlbs_memslot(kvm, new); 12558 } 12559 } 12560 12561 void kvm_arch_commit_memory_region(struct kvm *kvm, 12562 struct kvm_memory_slot *old, 12563 const struct kvm_memory_slot *new, 12564 enum kvm_mr_change change) 12565 { 12566 if (!kvm->arch.n_requested_mmu_pages && 12567 (change == KVM_MR_CREATE || change == KVM_MR_DELETE)) { 12568 unsigned long nr_mmu_pages; 12569 12570 nr_mmu_pages = kvm->nr_memslot_pages / KVM_MEMSLOT_PAGES_TO_MMU_PAGES_RATIO; 12571 nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES); 12572 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); 12573 } 12574 12575 kvm_mmu_slot_apply_flags(kvm, old, new, change); 12576 12577 /* Free the arrays associated with the old memslot. */ 12578 if (change == KVM_MR_MOVE) 12579 kvm_arch_free_memslot(kvm, old); 12580 } 12581 12582 void kvm_arch_flush_shadow_all(struct kvm *kvm) 12583 { 12584 kvm_mmu_zap_all(kvm); 12585 } 12586 12587 void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 12588 struct kvm_memory_slot *slot) 12589 { 12590 kvm_page_track_flush_slot(kvm, slot); 12591 } 12592 12593 static inline bool kvm_guest_apic_has_interrupt(struct kvm_vcpu *vcpu) 12594 { 12595 return (is_guest_mode(vcpu) && 12596 static_call(kvm_x86_guest_apic_has_interrupt)(vcpu)); 12597 } 12598 12599 static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) 12600 { 12601 if (!list_empty_careful(&vcpu->async_pf.done)) 12602 return true; 12603 12604 if (kvm_apic_has_events(vcpu)) 12605 return true; 12606 12607 if (vcpu->arch.pv.pv_unhalted) 12608 return true; 12609 12610 if (vcpu->arch.exception.pending) 12611 return true; 12612 12613 if (kvm_test_request(KVM_REQ_NMI, vcpu) || 12614 (vcpu->arch.nmi_pending && 12615 static_call(kvm_x86_nmi_allowed)(vcpu, false))) 12616 return true; 12617 12618 if (kvm_test_request(KVM_REQ_SMI, vcpu) || 12619 (vcpu->arch.smi_pending && 12620 static_call(kvm_x86_smi_allowed)(vcpu, false))) 12621 return true; 12622 12623 if (kvm_arch_interrupt_allowed(vcpu) && 12624 (kvm_cpu_has_interrupt(vcpu) || 12625 kvm_guest_apic_has_interrupt(vcpu))) 12626 return true; 12627 12628 if (kvm_hv_has_stimer_pending(vcpu)) 12629 return true; 12630 12631 if (is_guest_mode(vcpu) && 12632 kvm_x86_ops.nested_ops->hv_timer_pending && 12633 kvm_x86_ops.nested_ops->hv_timer_pending(vcpu)) 12634 return true; 12635 12636 if (kvm_xen_has_pending_events(vcpu)) 12637 return true; 12638 12639 if (kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu)) 12640 return true; 12641 12642 return false; 12643 } 12644 12645 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) 12646 { 12647 return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu); 12648 } 12649 12650 bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu) 12651 { 12652 if (kvm_vcpu_apicv_active(vcpu) && 12653 static_call(kvm_x86_dy_apicv_has_pending_interrupt)(vcpu)) 12654 return true; 12655 12656 return false; 12657 } 12658 12659 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) 12660 { 12661 if (READ_ONCE(vcpu->arch.pv.pv_unhalted)) 12662 return true; 12663 12664 if (kvm_test_request(KVM_REQ_NMI, vcpu) || 12665 kvm_test_request(KVM_REQ_SMI, vcpu) || 12666 kvm_test_request(KVM_REQ_EVENT, vcpu)) 12667 return true; 12668 12669 return kvm_arch_dy_has_pending_interrupt(vcpu); 12670 } 12671 12672 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) 12673 { 12674 if (vcpu->arch.guest_state_protected) 12675 return true; 12676 12677 return vcpu->arch.preempted_in_kernel; 12678 } 12679 12680 unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu) 12681 { 12682 return kvm_rip_read(vcpu); 12683 } 12684 12685 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) 12686 { 12687 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; 12688 } 12689 12690 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu) 12691 { 12692 return static_call(kvm_x86_interrupt_allowed)(vcpu, false); 12693 } 12694 12695 unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu) 12696 { 12697 /* Can't read the RIP when guest state is protected, just return 0 */ 12698 if (vcpu->arch.guest_state_protected) 12699 return 0; 12700 12701 if (is_64_bit_mode(vcpu)) 12702 return kvm_rip_read(vcpu); 12703 return (u32)(get_segment_base(vcpu, VCPU_SREG_CS) + 12704 kvm_rip_read(vcpu)); 12705 } 12706 EXPORT_SYMBOL_GPL(kvm_get_linear_rip); 12707 12708 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip) 12709 { 12710 return kvm_get_linear_rip(vcpu) == linear_rip; 12711 } 12712 EXPORT_SYMBOL_GPL(kvm_is_linear_rip); 12713 12714 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu) 12715 { 12716 unsigned long rflags; 12717 12718 rflags = static_call(kvm_x86_get_rflags)(vcpu); 12719 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) 12720 rflags &= ~X86_EFLAGS_TF; 12721 return rflags; 12722 } 12723 EXPORT_SYMBOL_GPL(kvm_get_rflags); 12724 12725 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) 12726 { 12727 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP && 12728 kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) 12729 rflags |= X86_EFLAGS_TF; 12730 static_call(kvm_x86_set_rflags)(vcpu, rflags); 12731 } 12732 12733 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) 12734 { 12735 __kvm_set_rflags(vcpu, rflags); 12736 kvm_make_request(KVM_REQ_EVENT, vcpu); 12737 } 12738 EXPORT_SYMBOL_GPL(kvm_set_rflags); 12739 12740 static inline u32 kvm_async_pf_hash_fn(gfn_t gfn) 12741 { 12742 BUILD_BUG_ON(!is_power_of_2(ASYNC_PF_PER_VCPU)); 12743 12744 return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU)); 12745 } 12746 12747 static inline u32 kvm_async_pf_next_probe(u32 key) 12748 { 12749 return (key + 1) & (ASYNC_PF_PER_VCPU - 1); 12750 } 12751 12752 static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 12753 { 12754 u32 key = kvm_async_pf_hash_fn(gfn); 12755 12756 while (vcpu->arch.apf.gfns[key] != ~0) 12757 key = kvm_async_pf_next_probe(key); 12758 12759 vcpu->arch.apf.gfns[key] = gfn; 12760 } 12761 12762 static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn) 12763 { 12764 int i; 12765 u32 key = kvm_async_pf_hash_fn(gfn); 12766 12767 for (i = 0; i < ASYNC_PF_PER_VCPU && 12768 (vcpu->arch.apf.gfns[key] != gfn && 12769 vcpu->arch.apf.gfns[key] != ~0); i++) 12770 key = kvm_async_pf_next_probe(key); 12771 12772 return key; 12773 } 12774 12775 bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 12776 { 12777 return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn; 12778 } 12779 12780 static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 12781 { 12782 u32 i, j, k; 12783 12784 i = j = kvm_async_pf_gfn_slot(vcpu, gfn); 12785 12786 if (WARN_ON_ONCE(vcpu->arch.apf.gfns[i] != gfn)) 12787 return; 12788 12789 while (true) { 12790 vcpu->arch.apf.gfns[i] = ~0; 12791 do { 12792 j = kvm_async_pf_next_probe(j); 12793 if (vcpu->arch.apf.gfns[j] == ~0) 12794 return; 12795 k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]); 12796 /* 12797 * k lies cyclically in ]i,j] 12798 * | i.k.j | 12799 * |....j i.k.| or |.k..j i...| 12800 */ 12801 } while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j)); 12802 vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j]; 12803 i = j; 12804 } 12805 } 12806 12807 static inline int apf_put_user_notpresent(struct kvm_vcpu *vcpu) 12808 { 12809 u32 reason = KVM_PV_REASON_PAGE_NOT_PRESENT; 12810 12811 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &reason, 12812 sizeof(reason)); 12813 } 12814 12815 static inline int apf_put_user_ready(struct kvm_vcpu *vcpu, u32 token) 12816 { 12817 unsigned int offset = offsetof(struct kvm_vcpu_pv_apf_data, token); 12818 12819 return kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data, 12820 &token, offset, sizeof(token)); 12821 } 12822 12823 static inline bool apf_pageready_slot_free(struct kvm_vcpu *vcpu) 12824 { 12825 unsigned int offset = offsetof(struct kvm_vcpu_pv_apf_data, token); 12826 u32 val; 12827 12828 if (kvm_read_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data, 12829 &val, offset, sizeof(val))) 12830 return false; 12831 12832 return !val; 12833 } 12834 12835 static bool kvm_can_deliver_async_pf(struct kvm_vcpu *vcpu) 12836 { 12837 12838 if (!kvm_pv_async_pf_enabled(vcpu)) 12839 return false; 12840 12841 if (vcpu->arch.apf.send_user_only && 12842 static_call(kvm_x86_get_cpl)(vcpu) == 0) 12843 return false; 12844 12845 if (is_guest_mode(vcpu)) { 12846 /* 12847 * L1 needs to opt into the special #PF vmexits that are 12848 * used to deliver async page faults. 12849 */ 12850 return vcpu->arch.apf.delivery_as_pf_vmexit; 12851 } else { 12852 /* 12853 * Play it safe in case the guest temporarily disables paging. 12854 * The real mode IDT in particular is unlikely to have a #PF 12855 * exception setup. 12856 */ 12857 return is_paging(vcpu); 12858 } 12859 } 12860 12861 bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu) 12862 { 12863 if (unlikely(!lapic_in_kernel(vcpu) || 12864 kvm_event_needs_reinjection(vcpu) || 12865 vcpu->arch.exception.pending)) 12866 return false; 12867 12868 if (kvm_hlt_in_guest(vcpu->kvm) && !kvm_can_deliver_async_pf(vcpu)) 12869 return false; 12870 12871 /* 12872 * If interrupts are off we cannot even use an artificial 12873 * halt state. 12874 */ 12875 return kvm_arch_interrupt_allowed(vcpu); 12876 } 12877 12878 bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, 12879 struct kvm_async_pf *work) 12880 { 12881 struct x86_exception fault; 12882 12883 trace_kvm_async_pf_not_present(work->arch.token, work->cr2_or_gpa); 12884 kvm_add_async_pf_gfn(vcpu, work->arch.gfn); 12885 12886 if (kvm_can_deliver_async_pf(vcpu) && 12887 !apf_put_user_notpresent(vcpu)) { 12888 fault.vector = PF_VECTOR; 12889 fault.error_code_valid = true; 12890 fault.error_code = 0; 12891 fault.nested_page_fault = false; 12892 fault.address = work->arch.token; 12893 fault.async_page_fault = true; 12894 kvm_inject_page_fault(vcpu, &fault); 12895 return true; 12896 } else { 12897 /* 12898 * It is not possible to deliver a paravirtualized asynchronous 12899 * page fault, but putting the guest in an artificial halt state 12900 * can be beneficial nevertheless: if an interrupt arrives, we 12901 * can deliver it timely and perhaps the guest will schedule 12902 * another process. When the instruction that triggered a page 12903 * fault is retried, hopefully the page will be ready in the host. 12904 */ 12905 kvm_make_request(KVM_REQ_APF_HALT, vcpu); 12906 return false; 12907 } 12908 } 12909 12910 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, 12911 struct kvm_async_pf *work) 12912 { 12913 struct kvm_lapic_irq irq = { 12914 .delivery_mode = APIC_DM_FIXED, 12915 .vector = vcpu->arch.apf.vec 12916 }; 12917 12918 if (work->wakeup_all) 12919 work->arch.token = ~0; /* broadcast wakeup */ 12920 else 12921 kvm_del_async_pf_gfn(vcpu, work->arch.gfn); 12922 trace_kvm_async_pf_ready(work->arch.token, work->cr2_or_gpa); 12923 12924 if ((work->wakeup_all || work->notpresent_injected) && 12925 kvm_pv_async_pf_enabled(vcpu) && 12926 !apf_put_user_ready(vcpu, work->arch.token)) { 12927 vcpu->arch.apf.pageready_pending = true; 12928 kvm_apic_set_irq(vcpu, &irq, NULL); 12929 } 12930 12931 vcpu->arch.apf.halted = false; 12932 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 12933 } 12934 12935 void kvm_arch_async_page_present_queued(struct kvm_vcpu *vcpu) 12936 { 12937 kvm_make_request(KVM_REQ_APF_READY, vcpu); 12938 if (!vcpu->arch.apf.pageready_pending) 12939 kvm_vcpu_kick(vcpu); 12940 } 12941 12942 bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu) 12943 { 12944 if (!kvm_pv_async_pf_enabled(vcpu)) 12945 return true; 12946 else 12947 return kvm_lapic_enabled(vcpu) && apf_pageready_slot_free(vcpu); 12948 } 12949 12950 void kvm_arch_start_assignment(struct kvm *kvm) 12951 { 12952 if (atomic_inc_return(&kvm->arch.assigned_device_count) == 1) 12953 static_call_cond(kvm_x86_pi_start_assignment)(kvm); 12954 } 12955 EXPORT_SYMBOL_GPL(kvm_arch_start_assignment); 12956 12957 void kvm_arch_end_assignment(struct kvm *kvm) 12958 { 12959 atomic_dec(&kvm->arch.assigned_device_count); 12960 } 12961 EXPORT_SYMBOL_GPL(kvm_arch_end_assignment); 12962 12963 bool noinstr kvm_arch_has_assigned_device(struct kvm *kvm) 12964 { 12965 return arch_atomic_read(&kvm->arch.assigned_device_count); 12966 } 12967 EXPORT_SYMBOL_GPL(kvm_arch_has_assigned_device); 12968 12969 void kvm_arch_register_noncoherent_dma(struct kvm *kvm) 12970 { 12971 atomic_inc(&kvm->arch.noncoherent_dma_count); 12972 } 12973 EXPORT_SYMBOL_GPL(kvm_arch_register_noncoherent_dma); 12974 12975 void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm) 12976 { 12977 atomic_dec(&kvm->arch.noncoherent_dma_count); 12978 } 12979 EXPORT_SYMBOL_GPL(kvm_arch_unregister_noncoherent_dma); 12980 12981 bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) 12982 { 12983 return atomic_read(&kvm->arch.noncoherent_dma_count); 12984 } 12985 EXPORT_SYMBOL_GPL(kvm_arch_has_noncoherent_dma); 12986 12987 bool kvm_arch_has_irq_bypass(void) 12988 { 12989 return true; 12990 } 12991 12992 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons, 12993 struct irq_bypass_producer *prod) 12994 { 12995 struct kvm_kernel_irqfd *irqfd = 12996 container_of(cons, struct kvm_kernel_irqfd, consumer); 12997 int ret; 12998 12999 irqfd->producer = prod; 13000 kvm_arch_start_assignment(irqfd->kvm); 13001 ret = static_call(kvm_x86_pi_update_irte)(irqfd->kvm, 13002 prod->irq, irqfd->gsi, 1); 13003 13004 if (ret) 13005 kvm_arch_end_assignment(irqfd->kvm); 13006 13007 return ret; 13008 } 13009 13010 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, 13011 struct irq_bypass_producer *prod) 13012 { 13013 int ret; 13014 struct kvm_kernel_irqfd *irqfd = 13015 container_of(cons, struct kvm_kernel_irqfd, consumer); 13016 13017 WARN_ON(irqfd->producer != prod); 13018 irqfd->producer = NULL; 13019 13020 /* 13021 * When producer of consumer is unregistered, we change back to 13022 * remapped mode, so we can re-use the current implementation 13023 * when the irq is masked/disabled or the consumer side (KVM 13024 * int this case doesn't want to receive the interrupts. 13025 */ 13026 ret = static_call(kvm_x86_pi_update_irte)(irqfd->kvm, prod->irq, irqfd->gsi, 0); 13027 if (ret) 13028 printk(KERN_INFO "irq bypass consumer (token %p) unregistration" 13029 " fails: %d\n", irqfd->consumer.token, ret); 13030 13031 kvm_arch_end_assignment(irqfd->kvm); 13032 } 13033 13034 int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, 13035 uint32_t guest_irq, bool set) 13036 { 13037 return static_call(kvm_x86_pi_update_irte)(kvm, host_irq, guest_irq, set); 13038 } 13039 13040 bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *old, 13041 struct kvm_kernel_irq_routing_entry *new) 13042 { 13043 if (new->type != KVM_IRQ_ROUTING_MSI) 13044 return true; 13045 13046 return !!memcmp(&old->msi, &new->msi, sizeof(new->msi)); 13047 } 13048 13049 bool kvm_vector_hashing_enabled(void) 13050 { 13051 return vector_hashing; 13052 } 13053 13054 bool kvm_arch_no_poll(struct kvm_vcpu *vcpu) 13055 { 13056 return (vcpu->arch.msr_kvm_poll_control & 1) == 0; 13057 } 13058 EXPORT_SYMBOL_GPL(kvm_arch_no_poll); 13059 13060 13061 int kvm_spec_ctrl_test_value(u64 value) 13062 { 13063 /* 13064 * test that setting IA32_SPEC_CTRL to given value 13065 * is allowed by the host processor 13066 */ 13067 13068 u64 saved_value; 13069 unsigned long flags; 13070 int ret = 0; 13071 13072 local_irq_save(flags); 13073 13074 if (rdmsrl_safe(MSR_IA32_SPEC_CTRL, &saved_value)) 13075 ret = 1; 13076 else if (wrmsrl_safe(MSR_IA32_SPEC_CTRL, value)) 13077 ret = 1; 13078 else 13079 wrmsrl(MSR_IA32_SPEC_CTRL, saved_value); 13080 13081 local_irq_restore(flags); 13082 13083 return ret; 13084 } 13085 EXPORT_SYMBOL_GPL(kvm_spec_ctrl_test_value); 13086 13087 void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code) 13088 { 13089 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 13090 struct x86_exception fault; 13091 u64 access = error_code & 13092 (PFERR_WRITE_MASK | PFERR_FETCH_MASK | PFERR_USER_MASK); 13093 13094 if (!(error_code & PFERR_PRESENT_MASK) || 13095 mmu->gva_to_gpa(vcpu, mmu, gva, access, &fault) != INVALID_GPA) { 13096 /* 13097 * If vcpu->arch.walk_mmu->gva_to_gpa succeeded, the page 13098 * tables probably do not match the TLB. Just proceed 13099 * with the error code that the processor gave. 13100 */ 13101 fault.vector = PF_VECTOR; 13102 fault.error_code_valid = true; 13103 fault.error_code = error_code; 13104 fault.nested_page_fault = false; 13105 fault.address = gva; 13106 fault.async_page_fault = false; 13107 } 13108 vcpu->arch.walk_mmu->inject_page_fault(vcpu, &fault); 13109 } 13110 EXPORT_SYMBOL_GPL(kvm_fixup_and_inject_pf_error); 13111 13112 /* 13113 * Handles kvm_read/write_guest_virt*() result and either injects #PF or returns 13114 * KVM_EXIT_INTERNAL_ERROR for cases not currently handled by KVM. Return value 13115 * indicates whether exit to userspace is needed. 13116 */ 13117 int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r, 13118 struct x86_exception *e) 13119 { 13120 if (r == X86EMUL_PROPAGATE_FAULT) { 13121 kvm_inject_emulated_page_fault(vcpu, e); 13122 return 1; 13123 } 13124 13125 /* 13126 * In case kvm_read/write_guest_virt*() failed with X86EMUL_IO_NEEDED 13127 * while handling a VMX instruction KVM could've handled the request 13128 * correctly by exiting to userspace and performing I/O but there 13129 * doesn't seem to be a real use-case behind such requests, just return 13130 * KVM_EXIT_INTERNAL_ERROR for now. 13131 */ 13132 kvm_prepare_emulation_failure_exit(vcpu); 13133 13134 return 0; 13135 } 13136 EXPORT_SYMBOL_GPL(kvm_handle_memory_failure); 13137 13138 int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva) 13139 { 13140 bool pcid_enabled; 13141 struct x86_exception e; 13142 struct { 13143 u64 pcid; 13144 u64 gla; 13145 } operand; 13146 int r; 13147 13148 r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e); 13149 if (r != X86EMUL_CONTINUE) 13150 return kvm_handle_memory_failure(vcpu, r, &e); 13151 13152 if (operand.pcid >> 12 != 0) { 13153 kvm_inject_gp(vcpu, 0); 13154 return 1; 13155 } 13156 13157 pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE); 13158 13159 switch (type) { 13160 case INVPCID_TYPE_INDIV_ADDR: 13161 if ((!pcid_enabled && (operand.pcid != 0)) || 13162 is_noncanonical_address(operand.gla, vcpu)) { 13163 kvm_inject_gp(vcpu, 0); 13164 return 1; 13165 } 13166 kvm_mmu_invpcid_gva(vcpu, operand.gla, operand.pcid); 13167 return kvm_skip_emulated_instruction(vcpu); 13168 13169 case INVPCID_TYPE_SINGLE_CTXT: 13170 if (!pcid_enabled && (operand.pcid != 0)) { 13171 kvm_inject_gp(vcpu, 0); 13172 return 1; 13173 } 13174 13175 kvm_invalidate_pcid(vcpu, operand.pcid); 13176 return kvm_skip_emulated_instruction(vcpu); 13177 13178 case INVPCID_TYPE_ALL_NON_GLOBAL: 13179 /* 13180 * Currently, KVM doesn't mark global entries in the shadow 13181 * page tables, so a non-global flush just degenerates to a 13182 * global flush. If needed, we could optimize this later by 13183 * keeping track of global entries in shadow page tables. 13184 */ 13185 13186 fallthrough; 13187 case INVPCID_TYPE_ALL_INCL_GLOBAL: 13188 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 13189 return kvm_skip_emulated_instruction(vcpu); 13190 13191 default: 13192 kvm_inject_gp(vcpu, 0); 13193 return 1; 13194 } 13195 } 13196 EXPORT_SYMBOL_GPL(kvm_handle_invpcid); 13197 13198 static int complete_sev_es_emulated_mmio(struct kvm_vcpu *vcpu) 13199 { 13200 struct kvm_run *run = vcpu->run; 13201 struct kvm_mmio_fragment *frag; 13202 unsigned int len; 13203 13204 BUG_ON(!vcpu->mmio_needed); 13205 13206 /* Complete previous fragment */ 13207 frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment]; 13208 len = min(8u, frag->len); 13209 if (!vcpu->mmio_is_write) 13210 memcpy(frag->data, run->mmio.data, len); 13211 13212 if (frag->len <= 8) { 13213 /* Switch to the next fragment. */ 13214 frag++; 13215 vcpu->mmio_cur_fragment++; 13216 } else { 13217 /* Go forward to the next mmio piece. */ 13218 frag->data += len; 13219 frag->gpa += len; 13220 frag->len -= len; 13221 } 13222 13223 if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) { 13224 vcpu->mmio_needed = 0; 13225 13226 // VMG change, at this point, we're always done 13227 // RIP has already been advanced 13228 return 1; 13229 } 13230 13231 // More MMIO is needed 13232 run->mmio.phys_addr = frag->gpa; 13233 run->mmio.len = min(8u, frag->len); 13234 run->mmio.is_write = vcpu->mmio_is_write; 13235 if (run->mmio.is_write) 13236 memcpy(run->mmio.data, frag->data, min(8u, frag->len)); 13237 run->exit_reason = KVM_EXIT_MMIO; 13238 13239 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio; 13240 13241 return 0; 13242 } 13243 13244 int kvm_sev_es_mmio_write(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes, 13245 void *data) 13246 { 13247 int handled; 13248 struct kvm_mmio_fragment *frag; 13249 13250 if (!data) 13251 return -EINVAL; 13252 13253 handled = write_emultor.read_write_mmio(vcpu, gpa, bytes, data); 13254 if (handled == bytes) 13255 return 1; 13256 13257 bytes -= handled; 13258 gpa += handled; 13259 data += handled; 13260 13261 /*TODO: Check if need to increment number of frags */ 13262 frag = vcpu->mmio_fragments; 13263 vcpu->mmio_nr_fragments = 1; 13264 frag->len = bytes; 13265 frag->gpa = gpa; 13266 frag->data = data; 13267 13268 vcpu->mmio_needed = 1; 13269 vcpu->mmio_cur_fragment = 0; 13270 13271 vcpu->run->mmio.phys_addr = gpa; 13272 vcpu->run->mmio.len = min(8u, frag->len); 13273 vcpu->run->mmio.is_write = 1; 13274 memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); 13275 vcpu->run->exit_reason = KVM_EXIT_MMIO; 13276 13277 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio; 13278 13279 return 0; 13280 } 13281 EXPORT_SYMBOL_GPL(kvm_sev_es_mmio_write); 13282 13283 int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes, 13284 void *data) 13285 { 13286 int handled; 13287 struct kvm_mmio_fragment *frag; 13288 13289 if (!data) 13290 return -EINVAL; 13291 13292 handled = read_emultor.read_write_mmio(vcpu, gpa, bytes, data); 13293 if (handled == bytes) 13294 return 1; 13295 13296 bytes -= handled; 13297 gpa += handled; 13298 data += handled; 13299 13300 /*TODO: Check if need to increment number of frags */ 13301 frag = vcpu->mmio_fragments; 13302 vcpu->mmio_nr_fragments = 1; 13303 frag->len = bytes; 13304 frag->gpa = gpa; 13305 frag->data = data; 13306 13307 vcpu->mmio_needed = 1; 13308 vcpu->mmio_cur_fragment = 0; 13309 13310 vcpu->run->mmio.phys_addr = gpa; 13311 vcpu->run->mmio.len = min(8u, frag->len); 13312 vcpu->run->mmio.is_write = 0; 13313 vcpu->run->exit_reason = KVM_EXIT_MMIO; 13314 13315 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio; 13316 13317 return 0; 13318 } 13319 EXPORT_SYMBOL_GPL(kvm_sev_es_mmio_read); 13320 13321 static void advance_sev_es_emulated_pio(struct kvm_vcpu *vcpu, unsigned count, int size) 13322 { 13323 vcpu->arch.sev_pio_count -= count; 13324 vcpu->arch.sev_pio_data += count * size; 13325 } 13326 13327 static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size, 13328 unsigned int port); 13329 13330 static int complete_sev_es_emulated_outs(struct kvm_vcpu *vcpu) 13331 { 13332 int size = vcpu->arch.pio.size; 13333 int port = vcpu->arch.pio.port; 13334 13335 vcpu->arch.pio.count = 0; 13336 if (vcpu->arch.sev_pio_count) 13337 return kvm_sev_es_outs(vcpu, size, port); 13338 return 1; 13339 } 13340 13341 static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size, 13342 unsigned int port) 13343 { 13344 for (;;) { 13345 unsigned int count = 13346 min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count); 13347 int ret = emulator_pio_out(vcpu, size, port, vcpu->arch.sev_pio_data, count); 13348 13349 /* memcpy done already by emulator_pio_out. */ 13350 advance_sev_es_emulated_pio(vcpu, count, size); 13351 if (!ret) 13352 break; 13353 13354 /* Emulation done by the kernel. */ 13355 if (!vcpu->arch.sev_pio_count) 13356 return 1; 13357 } 13358 13359 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_outs; 13360 return 0; 13361 } 13362 13363 static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size, 13364 unsigned int port); 13365 13366 static int complete_sev_es_emulated_ins(struct kvm_vcpu *vcpu) 13367 { 13368 unsigned count = vcpu->arch.pio.count; 13369 int size = vcpu->arch.pio.size; 13370 int port = vcpu->arch.pio.port; 13371 13372 complete_emulator_pio_in(vcpu, vcpu->arch.sev_pio_data); 13373 advance_sev_es_emulated_pio(vcpu, count, size); 13374 if (vcpu->arch.sev_pio_count) 13375 return kvm_sev_es_ins(vcpu, size, port); 13376 return 1; 13377 } 13378 13379 static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size, 13380 unsigned int port) 13381 { 13382 for (;;) { 13383 unsigned int count = 13384 min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count); 13385 if (!emulator_pio_in(vcpu, size, port, vcpu->arch.sev_pio_data, count)) 13386 break; 13387 13388 /* Emulation done by the kernel. */ 13389 advance_sev_es_emulated_pio(vcpu, count, size); 13390 if (!vcpu->arch.sev_pio_count) 13391 return 1; 13392 } 13393 13394 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_ins; 13395 return 0; 13396 } 13397 13398 int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size, 13399 unsigned int port, void *data, unsigned int count, 13400 int in) 13401 { 13402 vcpu->arch.sev_pio_data = data; 13403 vcpu->arch.sev_pio_count = count; 13404 return in ? kvm_sev_es_ins(vcpu, size, port) 13405 : kvm_sev_es_outs(vcpu, size, port); 13406 } 13407 EXPORT_SYMBOL_GPL(kvm_sev_es_string_io); 13408 13409 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_entry); 13410 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit); 13411 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio); 13412 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq); 13413 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault); 13414 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr); 13415 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr); 13416 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmenter); 13417 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit); 13418 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject); 13419 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit); 13420 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmenter_failed); 13421 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga); 13422 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit); 13423 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts); 13424 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_write_tsc_offset); 13425 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ple_window_update); 13426 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pml_full); 13427 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pi_irte_update); 13428 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_unaccelerated_access); 13429 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_incomplete_ipi); 13430 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_ga_log); 13431 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_kick_vcpu_slowpath); 13432 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_doorbell); 13433 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_apicv_accept_irq); 13434 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_enter); 13435 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_exit); 13436 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_enter); 13437 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_exit); 13438 13439 static int __init kvm_x86_init(void) 13440 { 13441 kvm_mmu_x86_module_init(); 13442 return 0; 13443 } 13444 module_init(kvm_x86_init); 13445 13446 static void __exit kvm_x86_exit(void) 13447 { 13448 /* 13449 * If module_init() is implemented, module_exit() must also be 13450 * implemented to allow module unload. 13451 */ 13452 } 13453 module_exit(kvm_x86_exit); 13454