1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Kernel-based Virtual Machine driver for Linux 4 * 5 * derived from drivers/kvm/kvm_main.c 6 * 7 * Copyright (C) 2006 Qumranet, Inc. 8 * Copyright (C) 2008 Qumranet, Inc. 9 * Copyright IBM Corporation, 2008 10 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 11 * 12 * Authors: 13 * Avi Kivity <avi@qumranet.com> 14 * Yaniv Kamay <yaniv@qumranet.com> 15 * Amit Shah <amit.shah@qumranet.com> 16 * Ben-Ami Yassour <benami@il.ibm.com> 17 */ 18 19 #include <linux/kvm_host.h> 20 #include "irq.h" 21 #include "ioapic.h" 22 #include "mmu.h" 23 #include "i8254.h" 24 #include "tss.h" 25 #include "kvm_cache_regs.h" 26 #include "kvm_emulate.h" 27 #include "x86.h" 28 #include "cpuid.h" 29 #include "pmu.h" 30 #include "hyperv.h" 31 #include "lapic.h" 32 #include "xen.h" 33 34 #include <linux/clocksource.h> 35 #include <linux/interrupt.h> 36 #include <linux/kvm.h> 37 #include <linux/fs.h> 38 #include <linux/vmalloc.h> 39 #include <linux/export.h> 40 #include <linux/moduleparam.h> 41 #include <linux/mman.h> 42 #include <linux/highmem.h> 43 #include <linux/iommu.h> 44 #include <linux/intel-iommu.h> 45 #include <linux/cpufreq.h> 46 #include <linux/user-return-notifier.h> 47 #include <linux/srcu.h> 48 #include <linux/slab.h> 49 #include <linux/perf_event.h> 50 #include <linux/uaccess.h> 51 #include <linux/hash.h> 52 #include <linux/pci.h> 53 #include <linux/timekeeper_internal.h> 54 #include <linux/pvclock_gtod.h> 55 #include <linux/kvm_irqfd.h> 56 #include <linux/irqbypass.h> 57 #include <linux/sched/stat.h> 58 #include <linux/sched/isolation.h> 59 #include <linux/mem_encrypt.h> 60 #include <linux/entry-kvm.h> 61 #include <linux/suspend.h> 62 63 #include <trace/events/kvm.h> 64 65 #include <asm/debugreg.h> 66 #include <asm/msr.h> 67 #include <asm/desc.h> 68 #include <asm/mce.h> 69 #include <asm/pkru.h> 70 #include <linux/kernel_stat.h> 71 #include <asm/fpu/internal.h> /* Ugh! */ 72 #include <asm/pvclock.h> 73 #include <asm/div64.h> 74 #include <asm/irq_remapping.h> 75 #include <asm/mshyperv.h> 76 #include <asm/hypervisor.h> 77 #include <asm/tlbflush.h> 78 #include <asm/intel_pt.h> 79 #include <asm/emulate_prefix.h> 80 #include <asm/sgx.h> 81 #include <clocksource/hyperv_timer.h> 82 83 #define CREATE_TRACE_POINTS 84 #include "trace.h" 85 86 #define MAX_IO_MSRS 256 87 #define KVM_MAX_MCE_BANKS 32 88 u64 __read_mostly kvm_mce_cap_supported = MCG_CTL_P | MCG_SER_P; 89 EXPORT_SYMBOL_GPL(kvm_mce_cap_supported); 90 91 #define emul_to_vcpu(ctxt) \ 92 ((struct kvm_vcpu *)(ctxt)->vcpu) 93 94 /* EFER defaults: 95 * - enable syscall per default because its emulated by KVM 96 * - enable LME and LMA per default on 64 bit KVM 97 */ 98 #ifdef CONFIG_X86_64 99 static 100 u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA)); 101 #else 102 static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE); 103 #endif 104 105 static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS; 106 107 #define KVM_EXIT_HYPERCALL_VALID_MASK (1 << KVM_HC_MAP_GPA_RANGE) 108 109 #define KVM_X2APIC_API_VALID_FLAGS (KVM_X2APIC_API_USE_32BIT_IDS | \ 110 KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK) 111 112 static void update_cr8_intercept(struct kvm_vcpu *vcpu); 113 static void process_nmi(struct kvm_vcpu *vcpu); 114 static void process_smi(struct kvm_vcpu *vcpu); 115 static void enter_smm(struct kvm_vcpu *vcpu); 116 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); 117 static void store_regs(struct kvm_vcpu *vcpu); 118 static int sync_regs(struct kvm_vcpu *vcpu); 119 120 static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2); 121 static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2); 122 123 struct kvm_x86_ops kvm_x86_ops __read_mostly; 124 EXPORT_SYMBOL_GPL(kvm_x86_ops); 125 126 #define KVM_X86_OP(func) \ 127 DEFINE_STATIC_CALL_NULL(kvm_x86_##func, \ 128 *(((struct kvm_x86_ops *)0)->func)); 129 #define KVM_X86_OP_NULL KVM_X86_OP 130 #include <asm/kvm-x86-ops.h> 131 EXPORT_STATIC_CALL_GPL(kvm_x86_get_cs_db_l_bits); 132 EXPORT_STATIC_CALL_GPL(kvm_x86_cache_reg); 133 EXPORT_STATIC_CALL_GPL(kvm_x86_tlb_flush_current); 134 135 static bool __read_mostly ignore_msrs = 0; 136 module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR); 137 138 bool __read_mostly report_ignored_msrs = true; 139 module_param(report_ignored_msrs, bool, S_IRUGO | S_IWUSR); 140 EXPORT_SYMBOL_GPL(report_ignored_msrs); 141 142 unsigned int min_timer_period_us = 200; 143 module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR); 144 145 static bool __read_mostly kvmclock_periodic_sync = true; 146 module_param(kvmclock_periodic_sync, bool, S_IRUGO); 147 148 bool __read_mostly kvm_has_tsc_control; 149 EXPORT_SYMBOL_GPL(kvm_has_tsc_control); 150 u32 __read_mostly kvm_max_guest_tsc_khz; 151 EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz); 152 u8 __read_mostly kvm_tsc_scaling_ratio_frac_bits; 153 EXPORT_SYMBOL_GPL(kvm_tsc_scaling_ratio_frac_bits); 154 u64 __read_mostly kvm_max_tsc_scaling_ratio; 155 EXPORT_SYMBOL_GPL(kvm_max_tsc_scaling_ratio); 156 u64 __read_mostly kvm_default_tsc_scaling_ratio; 157 EXPORT_SYMBOL_GPL(kvm_default_tsc_scaling_ratio); 158 bool __read_mostly kvm_has_bus_lock_exit; 159 EXPORT_SYMBOL_GPL(kvm_has_bus_lock_exit); 160 161 /* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */ 162 static u32 __read_mostly tsc_tolerance_ppm = 250; 163 module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR); 164 165 /* 166 * lapic timer advance (tscdeadline mode only) in nanoseconds. '-1' enables 167 * adaptive tuning starting from default advancement of 1000ns. '0' disables 168 * advancement entirely. Any other value is used as-is and disables adaptive 169 * tuning, i.e. allows privileged userspace to set an exact advancement time. 170 */ 171 static int __read_mostly lapic_timer_advance_ns = -1; 172 module_param(lapic_timer_advance_ns, int, S_IRUGO | S_IWUSR); 173 174 static bool __read_mostly vector_hashing = true; 175 module_param(vector_hashing, bool, S_IRUGO); 176 177 bool __read_mostly enable_vmware_backdoor = false; 178 module_param(enable_vmware_backdoor, bool, S_IRUGO); 179 EXPORT_SYMBOL_GPL(enable_vmware_backdoor); 180 181 static bool __read_mostly force_emulation_prefix = false; 182 module_param(force_emulation_prefix, bool, S_IRUGO); 183 184 int __read_mostly pi_inject_timer = -1; 185 module_param(pi_inject_timer, bint, S_IRUGO | S_IWUSR); 186 187 /* 188 * Restoring the host value for MSRs that are only consumed when running in 189 * usermode, e.g. SYSCALL MSRs and TSC_AUX, can be deferred until the CPU 190 * returns to userspace, i.e. the kernel can run with the guest's value. 191 */ 192 #define KVM_MAX_NR_USER_RETURN_MSRS 16 193 194 struct kvm_user_return_msrs { 195 struct user_return_notifier urn; 196 bool registered; 197 struct kvm_user_return_msr_values { 198 u64 host; 199 u64 curr; 200 } values[KVM_MAX_NR_USER_RETURN_MSRS]; 201 }; 202 203 u32 __read_mostly kvm_nr_uret_msrs; 204 EXPORT_SYMBOL_GPL(kvm_nr_uret_msrs); 205 static u32 __read_mostly kvm_uret_msrs_list[KVM_MAX_NR_USER_RETURN_MSRS]; 206 static struct kvm_user_return_msrs __percpu *user_return_msrs; 207 208 #define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \ 209 | XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \ 210 | XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512 \ 211 | XFEATURE_MASK_PKRU) 212 213 u64 __read_mostly host_efer; 214 EXPORT_SYMBOL_GPL(host_efer); 215 216 bool __read_mostly allow_smaller_maxphyaddr = 0; 217 EXPORT_SYMBOL_GPL(allow_smaller_maxphyaddr); 218 219 bool __read_mostly enable_apicv = true; 220 EXPORT_SYMBOL_GPL(enable_apicv); 221 222 u64 __read_mostly host_xss; 223 EXPORT_SYMBOL_GPL(host_xss); 224 u64 __read_mostly supported_xss; 225 EXPORT_SYMBOL_GPL(supported_xss); 226 227 const struct _kvm_stats_desc kvm_vm_stats_desc[] = { 228 KVM_GENERIC_VM_STATS(), 229 STATS_DESC_COUNTER(VM, mmu_shadow_zapped), 230 STATS_DESC_COUNTER(VM, mmu_pte_write), 231 STATS_DESC_COUNTER(VM, mmu_pde_zapped), 232 STATS_DESC_COUNTER(VM, mmu_flooded), 233 STATS_DESC_COUNTER(VM, mmu_recycled), 234 STATS_DESC_COUNTER(VM, mmu_cache_miss), 235 STATS_DESC_ICOUNTER(VM, mmu_unsync), 236 STATS_DESC_ICOUNTER(VM, lpages), 237 STATS_DESC_ICOUNTER(VM, nx_lpage_splits), 238 STATS_DESC_PCOUNTER(VM, max_mmu_page_hash_collisions) 239 }; 240 static_assert(ARRAY_SIZE(kvm_vm_stats_desc) == 241 sizeof(struct kvm_vm_stat) / sizeof(u64)); 242 243 const struct kvm_stats_header kvm_vm_stats_header = { 244 .name_size = KVM_STATS_NAME_SIZE, 245 .num_desc = ARRAY_SIZE(kvm_vm_stats_desc), 246 .id_offset = sizeof(struct kvm_stats_header), 247 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, 248 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + 249 sizeof(kvm_vm_stats_desc), 250 }; 251 252 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { 253 KVM_GENERIC_VCPU_STATS(), 254 STATS_DESC_COUNTER(VCPU, pf_fixed), 255 STATS_DESC_COUNTER(VCPU, pf_guest), 256 STATS_DESC_COUNTER(VCPU, tlb_flush), 257 STATS_DESC_COUNTER(VCPU, invlpg), 258 STATS_DESC_COUNTER(VCPU, exits), 259 STATS_DESC_COUNTER(VCPU, io_exits), 260 STATS_DESC_COUNTER(VCPU, mmio_exits), 261 STATS_DESC_COUNTER(VCPU, signal_exits), 262 STATS_DESC_COUNTER(VCPU, irq_window_exits), 263 STATS_DESC_COUNTER(VCPU, nmi_window_exits), 264 STATS_DESC_COUNTER(VCPU, l1d_flush), 265 STATS_DESC_COUNTER(VCPU, halt_exits), 266 STATS_DESC_COUNTER(VCPU, request_irq_exits), 267 STATS_DESC_COUNTER(VCPU, irq_exits), 268 STATS_DESC_COUNTER(VCPU, host_state_reload), 269 STATS_DESC_COUNTER(VCPU, fpu_reload), 270 STATS_DESC_COUNTER(VCPU, insn_emulation), 271 STATS_DESC_COUNTER(VCPU, insn_emulation_fail), 272 STATS_DESC_COUNTER(VCPU, hypercalls), 273 STATS_DESC_COUNTER(VCPU, irq_injections), 274 STATS_DESC_COUNTER(VCPU, nmi_injections), 275 STATS_DESC_COUNTER(VCPU, req_event), 276 STATS_DESC_COUNTER(VCPU, nested_run), 277 STATS_DESC_COUNTER(VCPU, directed_yield_attempted), 278 STATS_DESC_COUNTER(VCPU, directed_yield_successful), 279 STATS_DESC_ICOUNTER(VCPU, guest_mode) 280 }; 281 static_assert(ARRAY_SIZE(kvm_vcpu_stats_desc) == 282 sizeof(struct kvm_vcpu_stat) / sizeof(u64)); 283 284 const struct kvm_stats_header kvm_vcpu_stats_header = { 285 .name_size = KVM_STATS_NAME_SIZE, 286 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc), 287 .id_offset = sizeof(struct kvm_stats_header), 288 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, 289 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + 290 sizeof(kvm_vcpu_stats_desc), 291 }; 292 293 u64 __read_mostly host_xcr0; 294 u64 __read_mostly supported_xcr0; 295 EXPORT_SYMBOL_GPL(supported_xcr0); 296 297 static struct kmem_cache *x86_fpu_cache; 298 299 static struct kmem_cache *x86_emulator_cache; 300 301 /* 302 * When called, it means the previous get/set msr reached an invalid msr. 303 * Return true if we want to ignore/silent this failed msr access. 304 */ 305 static bool kvm_msr_ignored_check(u32 msr, u64 data, bool write) 306 { 307 const char *op = write ? "wrmsr" : "rdmsr"; 308 309 if (ignore_msrs) { 310 if (report_ignored_msrs) 311 kvm_pr_unimpl("ignored %s: 0x%x data 0x%llx\n", 312 op, msr, data); 313 /* Mask the error */ 314 return true; 315 } else { 316 kvm_debug_ratelimited("unhandled %s: 0x%x data 0x%llx\n", 317 op, msr, data); 318 return false; 319 } 320 } 321 322 static struct kmem_cache *kvm_alloc_emulator_cache(void) 323 { 324 unsigned int useroffset = offsetof(struct x86_emulate_ctxt, src); 325 unsigned int size = sizeof(struct x86_emulate_ctxt); 326 327 return kmem_cache_create_usercopy("x86_emulator", size, 328 __alignof__(struct x86_emulate_ctxt), 329 SLAB_ACCOUNT, useroffset, 330 size - useroffset, NULL); 331 } 332 333 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt); 334 335 static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu) 336 { 337 int i; 338 for (i = 0; i < ASYNC_PF_PER_VCPU; i++) 339 vcpu->arch.apf.gfns[i] = ~0; 340 } 341 342 static void kvm_on_user_return(struct user_return_notifier *urn) 343 { 344 unsigned slot; 345 struct kvm_user_return_msrs *msrs 346 = container_of(urn, struct kvm_user_return_msrs, urn); 347 struct kvm_user_return_msr_values *values; 348 unsigned long flags; 349 350 /* 351 * Disabling irqs at this point since the following code could be 352 * interrupted and executed through kvm_arch_hardware_disable() 353 */ 354 local_irq_save(flags); 355 if (msrs->registered) { 356 msrs->registered = false; 357 user_return_notifier_unregister(urn); 358 } 359 local_irq_restore(flags); 360 for (slot = 0; slot < kvm_nr_uret_msrs; ++slot) { 361 values = &msrs->values[slot]; 362 if (values->host != values->curr) { 363 wrmsrl(kvm_uret_msrs_list[slot], values->host); 364 values->curr = values->host; 365 } 366 } 367 } 368 369 static int kvm_probe_user_return_msr(u32 msr) 370 { 371 u64 val; 372 int ret; 373 374 preempt_disable(); 375 ret = rdmsrl_safe(msr, &val); 376 if (ret) 377 goto out; 378 ret = wrmsrl_safe(msr, val); 379 out: 380 preempt_enable(); 381 return ret; 382 } 383 384 int kvm_add_user_return_msr(u32 msr) 385 { 386 BUG_ON(kvm_nr_uret_msrs >= KVM_MAX_NR_USER_RETURN_MSRS); 387 388 if (kvm_probe_user_return_msr(msr)) 389 return -1; 390 391 kvm_uret_msrs_list[kvm_nr_uret_msrs] = msr; 392 return kvm_nr_uret_msrs++; 393 } 394 EXPORT_SYMBOL_GPL(kvm_add_user_return_msr); 395 396 int kvm_find_user_return_msr(u32 msr) 397 { 398 int i; 399 400 for (i = 0; i < kvm_nr_uret_msrs; ++i) { 401 if (kvm_uret_msrs_list[i] == msr) 402 return i; 403 } 404 return -1; 405 } 406 EXPORT_SYMBOL_GPL(kvm_find_user_return_msr); 407 408 static void kvm_user_return_msr_cpu_online(void) 409 { 410 unsigned int cpu = smp_processor_id(); 411 struct kvm_user_return_msrs *msrs = per_cpu_ptr(user_return_msrs, cpu); 412 u64 value; 413 int i; 414 415 for (i = 0; i < kvm_nr_uret_msrs; ++i) { 416 rdmsrl_safe(kvm_uret_msrs_list[i], &value); 417 msrs->values[i].host = value; 418 msrs->values[i].curr = value; 419 } 420 } 421 422 int kvm_set_user_return_msr(unsigned slot, u64 value, u64 mask) 423 { 424 unsigned int cpu = smp_processor_id(); 425 struct kvm_user_return_msrs *msrs = per_cpu_ptr(user_return_msrs, cpu); 426 int err; 427 428 value = (value & mask) | (msrs->values[slot].host & ~mask); 429 if (value == msrs->values[slot].curr) 430 return 0; 431 err = wrmsrl_safe(kvm_uret_msrs_list[slot], value); 432 if (err) 433 return 1; 434 435 msrs->values[slot].curr = value; 436 if (!msrs->registered) { 437 msrs->urn.on_user_return = kvm_on_user_return; 438 user_return_notifier_register(&msrs->urn); 439 msrs->registered = true; 440 } 441 return 0; 442 } 443 EXPORT_SYMBOL_GPL(kvm_set_user_return_msr); 444 445 static void drop_user_return_notifiers(void) 446 { 447 unsigned int cpu = smp_processor_id(); 448 struct kvm_user_return_msrs *msrs = per_cpu_ptr(user_return_msrs, cpu); 449 450 if (msrs->registered) 451 kvm_on_user_return(&msrs->urn); 452 } 453 454 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu) 455 { 456 return vcpu->arch.apic_base; 457 } 458 EXPORT_SYMBOL_GPL(kvm_get_apic_base); 459 460 enum lapic_mode kvm_get_apic_mode(struct kvm_vcpu *vcpu) 461 { 462 return kvm_apic_mode(kvm_get_apic_base(vcpu)); 463 } 464 EXPORT_SYMBOL_GPL(kvm_get_apic_mode); 465 466 int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 467 { 468 enum lapic_mode old_mode = kvm_get_apic_mode(vcpu); 469 enum lapic_mode new_mode = kvm_apic_mode(msr_info->data); 470 u64 reserved_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu) | 0x2ff | 471 (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) ? 0 : X2APIC_ENABLE); 472 473 if ((msr_info->data & reserved_bits) != 0 || new_mode == LAPIC_MODE_INVALID) 474 return 1; 475 if (!msr_info->host_initiated) { 476 if (old_mode == LAPIC_MODE_X2APIC && new_mode == LAPIC_MODE_XAPIC) 477 return 1; 478 if (old_mode == LAPIC_MODE_DISABLED && new_mode == LAPIC_MODE_X2APIC) 479 return 1; 480 } 481 482 kvm_lapic_set_base(vcpu, msr_info->data); 483 kvm_recalculate_apic_map(vcpu->kvm); 484 return 0; 485 } 486 EXPORT_SYMBOL_GPL(kvm_set_apic_base); 487 488 asmlinkage __visible noinstr void kvm_spurious_fault(void) 489 { 490 /* Fault while not rebooting. We want the trace. */ 491 BUG_ON(!kvm_rebooting); 492 } 493 EXPORT_SYMBOL_GPL(kvm_spurious_fault); 494 495 #define EXCPT_BENIGN 0 496 #define EXCPT_CONTRIBUTORY 1 497 #define EXCPT_PF 2 498 499 static int exception_class(int vector) 500 { 501 switch (vector) { 502 case PF_VECTOR: 503 return EXCPT_PF; 504 case DE_VECTOR: 505 case TS_VECTOR: 506 case NP_VECTOR: 507 case SS_VECTOR: 508 case GP_VECTOR: 509 return EXCPT_CONTRIBUTORY; 510 default: 511 break; 512 } 513 return EXCPT_BENIGN; 514 } 515 516 #define EXCPT_FAULT 0 517 #define EXCPT_TRAP 1 518 #define EXCPT_ABORT 2 519 #define EXCPT_INTERRUPT 3 520 521 static int exception_type(int vector) 522 { 523 unsigned int mask; 524 525 if (WARN_ON(vector > 31 || vector == NMI_VECTOR)) 526 return EXCPT_INTERRUPT; 527 528 mask = 1 << vector; 529 530 /* #DB is trap, as instruction watchpoints are handled elsewhere */ 531 if (mask & ((1 << DB_VECTOR) | (1 << BP_VECTOR) | (1 << OF_VECTOR))) 532 return EXCPT_TRAP; 533 534 if (mask & ((1 << DF_VECTOR) | (1 << MC_VECTOR))) 535 return EXCPT_ABORT; 536 537 /* Reserved exceptions will result in fault */ 538 return EXCPT_FAULT; 539 } 540 541 void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu) 542 { 543 unsigned nr = vcpu->arch.exception.nr; 544 bool has_payload = vcpu->arch.exception.has_payload; 545 unsigned long payload = vcpu->arch.exception.payload; 546 547 if (!has_payload) 548 return; 549 550 switch (nr) { 551 case DB_VECTOR: 552 /* 553 * "Certain debug exceptions may clear bit 0-3. The 554 * remaining contents of the DR6 register are never 555 * cleared by the processor". 556 */ 557 vcpu->arch.dr6 &= ~DR_TRAP_BITS; 558 /* 559 * In order to reflect the #DB exception payload in guest 560 * dr6, three components need to be considered: active low 561 * bit, FIXED_1 bits and active high bits (e.g. DR6_BD, 562 * DR6_BS and DR6_BT) 563 * DR6_ACTIVE_LOW contains the FIXED_1 and active low bits. 564 * In the target guest dr6: 565 * FIXED_1 bits should always be set. 566 * Active low bits should be cleared if 1-setting in payload. 567 * Active high bits should be set if 1-setting in payload. 568 * 569 * Note, the payload is compatible with the pending debug 570 * exceptions/exit qualification under VMX, that active_low bits 571 * are active high in payload. 572 * So they need to be flipped for DR6. 573 */ 574 vcpu->arch.dr6 |= DR6_ACTIVE_LOW; 575 vcpu->arch.dr6 |= payload; 576 vcpu->arch.dr6 ^= payload & DR6_ACTIVE_LOW; 577 578 /* 579 * The #DB payload is defined as compatible with the 'pending 580 * debug exceptions' field under VMX, not DR6. While bit 12 is 581 * defined in the 'pending debug exceptions' field (enabled 582 * breakpoint), it is reserved and must be zero in DR6. 583 */ 584 vcpu->arch.dr6 &= ~BIT(12); 585 break; 586 case PF_VECTOR: 587 vcpu->arch.cr2 = payload; 588 break; 589 } 590 591 vcpu->arch.exception.has_payload = false; 592 vcpu->arch.exception.payload = 0; 593 } 594 EXPORT_SYMBOL_GPL(kvm_deliver_exception_payload); 595 596 static void kvm_multiple_exception(struct kvm_vcpu *vcpu, 597 unsigned nr, bool has_error, u32 error_code, 598 bool has_payload, unsigned long payload, bool reinject) 599 { 600 u32 prev_nr; 601 int class1, class2; 602 603 kvm_make_request(KVM_REQ_EVENT, vcpu); 604 605 if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) { 606 queue: 607 if (reinject) { 608 /* 609 * On vmentry, vcpu->arch.exception.pending is only 610 * true if an event injection was blocked by 611 * nested_run_pending. In that case, however, 612 * vcpu_enter_guest requests an immediate exit, 613 * and the guest shouldn't proceed far enough to 614 * need reinjection. 615 */ 616 WARN_ON_ONCE(vcpu->arch.exception.pending); 617 vcpu->arch.exception.injected = true; 618 if (WARN_ON_ONCE(has_payload)) { 619 /* 620 * A reinjected event has already 621 * delivered its payload. 622 */ 623 has_payload = false; 624 payload = 0; 625 } 626 } else { 627 vcpu->arch.exception.pending = true; 628 vcpu->arch.exception.injected = false; 629 } 630 vcpu->arch.exception.has_error_code = has_error; 631 vcpu->arch.exception.nr = nr; 632 vcpu->arch.exception.error_code = error_code; 633 vcpu->arch.exception.has_payload = has_payload; 634 vcpu->arch.exception.payload = payload; 635 if (!is_guest_mode(vcpu)) 636 kvm_deliver_exception_payload(vcpu); 637 return; 638 } 639 640 /* to check exception */ 641 prev_nr = vcpu->arch.exception.nr; 642 if (prev_nr == DF_VECTOR) { 643 /* triple fault -> shutdown */ 644 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 645 return; 646 } 647 class1 = exception_class(prev_nr); 648 class2 = exception_class(nr); 649 if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY) 650 || (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) { 651 /* 652 * Generate double fault per SDM Table 5-5. Set 653 * exception.pending = true so that the double fault 654 * can trigger a nested vmexit. 655 */ 656 vcpu->arch.exception.pending = true; 657 vcpu->arch.exception.injected = false; 658 vcpu->arch.exception.has_error_code = true; 659 vcpu->arch.exception.nr = DF_VECTOR; 660 vcpu->arch.exception.error_code = 0; 661 vcpu->arch.exception.has_payload = false; 662 vcpu->arch.exception.payload = 0; 663 } else 664 /* replace previous exception with a new one in a hope 665 that instruction re-execution will regenerate lost 666 exception */ 667 goto queue; 668 } 669 670 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr) 671 { 672 kvm_multiple_exception(vcpu, nr, false, 0, false, 0, false); 673 } 674 EXPORT_SYMBOL_GPL(kvm_queue_exception); 675 676 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr) 677 { 678 kvm_multiple_exception(vcpu, nr, false, 0, false, 0, true); 679 } 680 EXPORT_SYMBOL_GPL(kvm_requeue_exception); 681 682 void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr, 683 unsigned long payload) 684 { 685 kvm_multiple_exception(vcpu, nr, false, 0, true, payload, false); 686 } 687 EXPORT_SYMBOL_GPL(kvm_queue_exception_p); 688 689 static void kvm_queue_exception_e_p(struct kvm_vcpu *vcpu, unsigned nr, 690 u32 error_code, unsigned long payload) 691 { 692 kvm_multiple_exception(vcpu, nr, true, error_code, 693 true, payload, false); 694 } 695 696 int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err) 697 { 698 if (err) 699 kvm_inject_gp(vcpu, 0); 700 else 701 return kvm_skip_emulated_instruction(vcpu); 702 703 return 1; 704 } 705 EXPORT_SYMBOL_GPL(kvm_complete_insn_gp); 706 707 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) 708 { 709 ++vcpu->stat.pf_guest; 710 vcpu->arch.exception.nested_apf = 711 is_guest_mode(vcpu) && fault->async_page_fault; 712 if (vcpu->arch.exception.nested_apf) { 713 vcpu->arch.apf.nested_apf_token = fault->address; 714 kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code); 715 } else { 716 kvm_queue_exception_e_p(vcpu, PF_VECTOR, fault->error_code, 717 fault->address); 718 } 719 } 720 EXPORT_SYMBOL_GPL(kvm_inject_page_fault); 721 722 bool kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu, 723 struct x86_exception *fault) 724 { 725 struct kvm_mmu *fault_mmu; 726 WARN_ON_ONCE(fault->vector != PF_VECTOR); 727 728 fault_mmu = fault->nested_page_fault ? vcpu->arch.mmu : 729 vcpu->arch.walk_mmu; 730 731 /* 732 * Invalidate the TLB entry for the faulting address, if it exists, 733 * else the access will fault indefinitely (and to emulate hardware). 734 */ 735 if ((fault->error_code & PFERR_PRESENT_MASK) && 736 !(fault->error_code & PFERR_RSVD_MASK)) 737 kvm_mmu_invalidate_gva(vcpu, fault_mmu, fault->address, 738 fault_mmu->root_hpa); 739 740 fault_mmu->inject_page_fault(vcpu, fault); 741 return fault->nested_page_fault; 742 } 743 EXPORT_SYMBOL_GPL(kvm_inject_emulated_page_fault); 744 745 void kvm_inject_nmi(struct kvm_vcpu *vcpu) 746 { 747 atomic_inc(&vcpu->arch.nmi_queued); 748 kvm_make_request(KVM_REQ_NMI, vcpu); 749 } 750 EXPORT_SYMBOL_GPL(kvm_inject_nmi); 751 752 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) 753 { 754 kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, false); 755 } 756 EXPORT_SYMBOL_GPL(kvm_queue_exception_e); 757 758 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) 759 { 760 kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, true); 761 } 762 EXPORT_SYMBOL_GPL(kvm_requeue_exception_e); 763 764 /* 765 * Checks if cpl <= required_cpl; if true, return true. Otherwise queue 766 * a #GP and return false. 767 */ 768 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl) 769 { 770 if (static_call(kvm_x86_get_cpl)(vcpu) <= required_cpl) 771 return true; 772 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); 773 return false; 774 } 775 EXPORT_SYMBOL_GPL(kvm_require_cpl); 776 777 bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr) 778 { 779 if ((dr != 4 && dr != 5) || !kvm_read_cr4_bits(vcpu, X86_CR4_DE)) 780 return true; 781 782 kvm_queue_exception(vcpu, UD_VECTOR); 783 return false; 784 } 785 EXPORT_SYMBOL_GPL(kvm_require_dr); 786 787 /* 788 * This function will be used to read from the physical memory of the currently 789 * running guest. The difference to kvm_vcpu_read_guest_page is that this function 790 * can read from guest physical or from the guest's guest physical memory. 791 */ 792 int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, 793 gfn_t ngfn, void *data, int offset, int len, 794 u32 access) 795 { 796 struct x86_exception exception; 797 gfn_t real_gfn; 798 gpa_t ngpa; 799 800 ngpa = gfn_to_gpa(ngfn); 801 real_gfn = mmu->translate_gpa(vcpu, ngpa, access, &exception); 802 if (real_gfn == UNMAPPED_GVA) 803 return -EFAULT; 804 805 real_gfn = gpa_to_gfn(real_gfn); 806 807 return kvm_vcpu_read_guest_page(vcpu, real_gfn, data, offset, len); 808 } 809 EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu); 810 811 static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu) 812 { 813 return vcpu->arch.reserved_gpa_bits | rsvd_bits(5, 8) | rsvd_bits(1, 2); 814 } 815 816 /* 817 * Load the pae pdptrs. Return 1 if they are all valid, 0 otherwise. 818 */ 819 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3) 820 { 821 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT; 822 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2; 823 int i; 824 int ret; 825 u64 pdpte[ARRAY_SIZE(mmu->pdptrs)]; 826 827 ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte, 828 offset * sizeof(u64), sizeof(pdpte), 829 PFERR_USER_MASK|PFERR_WRITE_MASK); 830 if (ret < 0) { 831 ret = 0; 832 goto out; 833 } 834 for (i = 0; i < ARRAY_SIZE(pdpte); ++i) { 835 if ((pdpte[i] & PT_PRESENT_MASK) && 836 (pdpte[i] & pdptr_rsvd_bits(vcpu))) { 837 ret = 0; 838 goto out; 839 } 840 } 841 ret = 1; 842 843 memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs)); 844 kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR); 845 vcpu->arch.pdptrs_from_userspace = false; 846 847 out: 848 849 return ret; 850 } 851 EXPORT_SYMBOL_GPL(load_pdptrs); 852 853 void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0) 854 { 855 if ((cr0 ^ old_cr0) & X86_CR0_PG) { 856 kvm_clear_async_pf_completion_queue(vcpu); 857 kvm_async_pf_hash_reset(vcpu); 858 } 859 860 if ((cr0 ^ old_cr0) & KVM_MMU_CR0_ROLE_BITS) 861 kvm_mmu_reset_context(vcpu); 862 863 if (((cr0 ^ old_cr0) & X86_CR0_CD) && 864 kvm_arch_has_noncoherent_dma(vcpu->kvm) && 865 !kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) 866 kvm_zap_gfn_range(vcpu->kvm, 0, ~0ULL); 867 } 868 EXPORT_SYMBOL_GPL(kvm_post_set_cr0); 869 870 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 871 { 872 unsigned long old_cr0 = kvm_read_cr0(vcpu); 873 unsigned long pdptr_bits = X86_CR0_CD | X86_CR0_NW | X86_CR0_PG; 874 875 cr0 |= X86_CR0_ET; 876 877 #ifdef CONFIG_X86_64 878 if (cr0 & 0xffffffff00000000UL) 879 return 1; 880 #endif 881 882 cr0 &= ~CR0_RESERVED_BITS; 883 884 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) 885 return 1; 886 887 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) 888 return 1; 889 890 #ifdef CONFIG_X86_64 891 if ((vcpu->arch.efer & EFER_LME) && !is_paging(vcpu) && 892 (cr0 & X86_CR0_PG)) { 893 int cs_db, cs_l; 894 895 if (!is_pae(vcpu)) 896 return 1; 897 static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l); 898 if (cs_l) 899 return 1; 900 } 901 #endif 902 if (!(vcpu->arch.efer & EFER_LME) && (cr0 & X86_CR0_PG) && 903 is_pae(vcpu) && ((cr0 ^ old_cr0) & pdptr_bits) && 904 !load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu))) 905 return 1; 906 907 if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)) 908 return 1; 909 910 static_call(kvm_x86_set_cr0)(vcpu, cr0); 911 912 kvm_post_set_cr0(vcpu, old_cr0, cr0); 913 914 return 0; 915 } 916 EXPORT_SYMBOL_GPL(kvm_set_cr0); 917 918 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) 919 { 920 (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f)); 921 } 922 EXPORT_SYMBOL_GPL(kvm_lmsw); 923 924 void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu) 925 { 926 if (vcpu->arch.guest_state_protected) 927 return; 928 929 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) { 930 931 if (vcpu->arch.xcr0 != host_xcr0) 932 xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); 933 934 if (vcpu->arch.xsaves_enabled && 935 vcpu->arch.ia32_xss != host_xss) 936 wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss); 937 } 938 939 if (static_cpu_has(X86_FEATURE_PKU) && 940 (kvm_read_cr4_bits(vcpu, X86_CR4_PKE) || 941 (vcpu->arch.xcr0 & XFEATURE_MASK_PKRU)) && 942 vcpu->arch.pkru != vcpu->arch.host_pkru) 943 write_pkru(vcpu->arch.pkru); 944 } 945 EXPORT_SYMBOL_GPL(kvm_load_guest_xsave_state); 946 947 void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu) 948 { 949 if (vcpu->arch.guest_state_protected) 950 return; 951 952 if (static_cpu_has(X86_FEATURE_PKU) && 953 (kvm_read_cr4_bits(vcpu, X86_CR4_PKE) || 954 (vcpu->arch.xcr0 & XFEATURE_MASK_PKRU))) { 955 vcpu->arch.pkru = rdpkru(); 956 if (vcpu->arch.pkru != vcpu->arch.host_pkru) 957 write_pkru(vcpu->arch.host_pkru); 958 } 959 960 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) { 961 962 if (vcpu->arch.xcr0 != host_xcr0) 963 xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0); 964 965 if (vcpu->arch.xsaves_enabled && 966 vcpu->arch.ia32_xss != host_xss) 967 wrmsrl(MSR_IA32_XSS, host_xss); 968 } 969 970 } 971 EXPORT_SYMBOL_GPL(kvm_load_host_xsave_state); 972 973 static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) 974 { 975 u64 xcr0 = xcr; 976 u64 old_xcr0 = vcpu->arch.xcr0; 977 u64 valid_bits; 978 979 /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now */ 980 if (index != XCR_XFEATURE_ENABLED_MASK) 981 return 1; 982 if (!(xcr0 & XFEATURE_MASK_FP)) 983 return 1; 984 if ((xcr0 & XFEATURE_MASK_YMM) && !(xcr0 & XFEATURE_MASK_SSE)) 985 return 1; 986 987 /* 988 * Do not allow the guest to set bits that we do not support 989 * saving. However, xcr0 bit 0 is always set, even if the 990 * emulated CPU does not support XSAVE (see fx_init). 991 */ 992 valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP; 993 if (xcr0 & ~valid_bits) 994 return 1; 995 996 if ((!(xcr0 & XFEATURE_MASK_BNDREGS)) != 997 (!(xcr0 & XFEATURE_MASK_BNDCSR))) 998 return 1; 999 1000 if (xcr0 & XFEATURE_MASK_AVX512) { 1001 if (!(xcr0 & XFEATURE_MASK_YMM)) 1002 return 1; 1003 if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512) 1004 return 1; 1005 } 1006 vcpu->arch.xcr0 = xcr0; 1007 1008 if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND) 1009 kvm_update_cpuid_runtime(vcpu); 1010 return 0; 1011 } 1012 1013 int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu) 1014 { 1015 if (static_call(kvm_x86_get_cpl)(vcpu) != 0 || 1016 __kvm_set_xcr(vcpu, kvm_rcx_read(vcpu), kvm_read_edx_eax(vcpu))) { 1017 kvm_inject_gp(vcpu, 0); 1018 return 1; 1019 } 1020 1021 return kvm_skip_emulated_instruction(vcpu); 1022 } 1023 EXPORT_SYMBOL_GPL(kvm_emulate_xsetbv); 1024 1025 bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 1026 { 1027 if (cr4 & cr4_reserved_bits) 1028 return false; 1029 1030 if (cr4 & vcpu->arch.cr4_guest_rsvd_bits) 1031 return false; 1032 1033 return static_call(kvm_x86_is_valid_cr4)(vcpu, cr4); 1034 } 1035 EXPORT_SYMBOL_GPL(kvm_is_valid_cr4); 1036 1037 void kvm_post_set_cr4(struct kvm_vcpu *vcpu, unsigned long old_cr4, unsigned long cr4) 1038 { 1039 if (((cr4 ^ old_cr4) & KVM_MMU_CR4_ROLE_BITS) || 1040 (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE))) 1041 kvm_mmu_reset_context(vcpu); 1042 } 1043 EXPORT_SYMBOL_GPL(kvm_post_set_cr4); 1044 1045 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 1046 { 1047 unsigned long old_cr4 = kvm_read_cr4(vcpu); 1048 unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | 1049 X86_CR4_SMEP; 1050 1051 if (!kvm_is_valid_cr4(vcpu, cr4)) 1052 return 1; 1053 1054 if (is_long_mode(vcpu)) { 1055 if (!(cr4 & X86_CR4_PAE)) 1056 return 1; 1057 if ((cr4 ^ old_cr4) & X86_CR4_LA57) 1058 return 1; 1059 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) 1060 && ((cr4 ^ old_cr4) & pdptr_bits) 1061 && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, 1062 kvm_read_cr3(vcpu))) 1063 return 1; 1064 1065 if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) { 1066 if (!guest_cpuid_has(vcpu, X86_FEATURE_PCID)) 1067 return 1; 1068 1069 /* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */ 1070 if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu)) 1071 return 1; 1072 } 1073 1074 static_call(kvm_x86_set_cr4)(vcpu, cr4); 1075 1076 kvm_post_set_cr4(vcpu, old_cr4, cr4); 1077 1078 return 0; 1079 } 1080 EXPORT_SYMBOL_GPL(kvm_set_cr4); 1081 1082 static void kvm_invalidate_pcid(struct kvm_vcpu *vcpu, unsigned long pcid) 1083 { 1084 struct kvm_mmu *mmu = vcpu->arch.mmu; 1085 unsigned long roots_to_free = 0; 1086 int i; 1087 1088 /* 1089 * If neither the current CR3 nor any of the prev_roots use the given 1090 * PCID, then nothing needs to be done here because a resync will 1091 * happen anyway before switching to any other CR3. 1092 */ 1093 if (kvm_get_active_pcid(vcpu) == pcid) { 1094 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); 1095 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); 1096 } 1097 1098 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) 1099 if (kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd) == pcid) 1100 roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i); 1101 1102 kvm_mmu_free_roots(vcpu, mmu, roots_to_free); 1103 } 1104 1105 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) 1106 { 1107 bool skip_tlb_flush = false; 1108 unsigned long pcid = 0; 1109 #ifdef CONFIG_X86_64 1110 bool pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE); 1111 1112 if (pcid_enabled) { 1113 skip_tlb_flush = cr3 & X86_CR3_PCID_NOFLUSH; 1114 cr3 &= ~X86_CR3_PCID_NOFLUSH; 1115 pcid = cr3 & X86_CR3_PCID_MASK; 1116 } 1117 #endif 1118 1119 /* PDPTRs are always reloaded for PAE paging. */ 1120 if (cr3 == kvm_read_cr3(vcpu) && !is_pae_paging(vcpu)) 1121 goto handle_tlb_flush; 1122 1123 /* 1124 * Do not condition the GPA check on long mode, this helper is used to 1125 * stuff CR3, e.g. for RSM emulation, and there is no guarantee that 1126 * the current vCPU mode is accurate. 1127 */ 1128 if (kvm_vcpu_is_illegal_gpa(vcpu, cr3)) 1129 return 1; 1130 1131 if (is_pae_paging(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) 1132 return 1; 1133 1134 if (cr3 != kvm_read_cr3(vcpu)) 1135 kvm_mmu_new_pgd(vcpu, cr3); 1136 1137 vcpu->arch.cr3 = cr3; 1138 kvm_register_mark_available(vcpu, VCPU_EXREG_CR3); 1139 1140 handle_tlb_flush: 1141 /* 1142 * A load of CR3 that flushes the TLB flushes only the current PCID, 1143 * even if PCID is disabled, in which case PCID=0 is flushed. It's a 1144 * moot point in the end because _disabling_ PCID will flush all PCIDs, 1145 * and it's impossible to use a non-zero PCID when PCID is disabled, 1146 * i.e. only PCID=0 can be relevant. 1147 */ 1148 if (!skip_tlb_flush) 1149 kvm_invalidate_pcid(vcpu, pcid); 1150 1151 return 0; 1152 } 1153 EXPORT_SYMBOL_GPL(kvm_set_cr3); 1154 1155 int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) 1156 { 1157 if (cr8 & CR8_RESERVED_BITS) 1158 return 1; 1159 if (lapic_in_kernel(vcpu)) 1160 kvm_lapic_set_tpr(vcpu, cr8); 1161 else 1162 vcpu->arch.cr8 = cr8; 1163 return 0; 1164 } 1165 EXPORT_SYMBOL_GPL(kvm_set_cr8); 1166 1167 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu) 1168 { 1169 if (lapic_in_kernel(vcpu)) 1170 return kvm_lapic_get_cr8(vcpu); 1171 else 1172 return vcpu->arch.cr8; 1173 } 1174 EXPORT_SYMBOL_GPL(kvm_get_cr8); 1175 1176 static void kvm_update_dr0123(struct kvm_vcpu *vcpu) 1177 { 1178 int i; 1179 1180 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) { 1181 for (i = 0; i < KVM_NR_DB_REGS; i++) 1182 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; 1183 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD; 1184 } 1185 } 1186 1187 void kvm_update_dr7(struct kvm_vcpu *vcpu) 1188 { 1189 unsigned long dr7; 1190 1191 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) 1192 dr7 = vcpu->arch.guest_debug_dr7; 1193 else 1194 dr7 = vcpu->arch.dr7; 1195 static_call(kvm_x86_set_dr7)(vcpu, dr7); 1196 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED; 1197 if (dr7 & DR7_BP_EN_MASK) 1198 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED; 1199 } 1200 EXPORT_SYMBOL_GPL(kvm_update_dr7); 1201 1202 static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu) 1203 { 1204 u64 fixed = DR6_FIXED_1; 1205 1206 if (!guest_cpuid_has(vcpu, X86_FEATURE_RTM)) 1207 fixed |= DR6_RTM; 1208 1209 if (!guest_cpuid_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT)) 1210 fixed |= DR6_BUS_LOCK; 1211 return fixed; 1212 } 1213 1214 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) 1215 { 1216 size_t size = ARRAY_SIZE(vcpu->arch.db); 1217 1218 switch (dr) { 1219 case 0 ... 3: 1220 vcpu->arch.db[array_index_nospec(dr, size)] = val; 1221 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) 1222 vcpu->arch.eff_db[dr] = val; 1223 break; 1224 case 4: 1225 case 6: 1226 if (!kvm_dr6_valid(val)) 1227 return 1; /* #GP */ 1228 vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu); 1229 break; 1230 case 5: 1231 default: /* 7 */ 1232 if (!kvm_dr7_valid(val)) 1233 return 1; /* #GP */ 1234 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; 1235 kvm_update_dr7(vcpu); 1236 break; 1237 } 1238 1239 return 0; 1240 } 1241 EXPORT_SYMBOL_GPL(kvm_set_dr); 1242 1243 void kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) 1244 { 1245 size_t size = ARRAY_SIZE(vcpu->arch.db); 1246 1247 switch (dr) { 1248 case 0 ... 3: 1249 *val = vcpu->arch.db[array_index_nospec(dr, size)]; 1250 break; 1251 case 4: 1252 case 6: 1253 *val = vcpu->arch.dr6; 1254 break; 1255 case 5: 1256 default: /* 7 */ 1257 *val = vcpu->arch.dr7; 1258 break; 1259 } 1260 } 1261 EXPORT_SYMBOL_GPL(kvm_get_dr); 1262 1263 int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu) 1264 { 1265 u32 ecx = kvm_rcx_read(vcpu); 1266 u64 data; 1267 1268 if (kvm_pmu_rdpmc(vcpu, ecx, &data)) { 1269 kvm_inject_gp(vcpu, 0); 1270 return 1; 1271 } 1272 1273 kvm_rax_write(vcpu, (u32)data); 1274 kvm_rdx_write(vcpu, data >> 32); 1275 return kvm_skip_emulated_instruction(vcpu); 1276 } 1277 EXPORT_SYMBOL_GPL(kvm_emulate_rdpmc); 1278 1279 /* 1280 * List of msr numbers which we expose to userspace through KVM_GET_MSRS 1281 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. 1282 * 1283 * The three MSR lists(msrs_to_save, emulated_msrs, msr_based_features) 1284 * extract the supported MSRs from the related const lists. 1285 * msrs_to_save is selected from the msrs_to_save_all to reflect the 1286 * capabilities of the host cpu. This capabilities test skips MSRs that are 1287 * kvm-specific. Those are put in emulated_msrs_all; filtering of emulated_msrs 1288 * may depend on host virtualization features rather than host cpu features. 1289 */ 1290 1291 static const u32 msrs_to_save_all[] = { 1292 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, 1293 MSR_STAR, 1294 #ifdef CONFIG_X86_64 1295 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR, 1296 #endif 1297 MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA, 1298 MSR_IA32_FEAT_CTL, MSR_IA32_BNDCFGS, MSR_TSC_AUX, 1299 MSR_IA32_SPEC_CTRL, 1300 MSR_IA32_RTIT_CTL, MSR_IA32_RTIT_STATUS, MSR_IA32_RTIT_CR3_MATCH, 1301 MSR_IA32_RTIT_OUTPUT_BASE, MSR_IA32_RTIT_OUTPUT_MASK, 1302 MSR_IA32_RTIT_ADDR0_A, MSR_IA32_RTIT_ADDR0_B, 1303 MSR_IA32_RTIT_ADDR1_A, MSR_IA32_RTIT_ADDR1_B, 1304 MSR_IA32_RTIT_ADDR2_A, MSR_IA32_RTIT_ADDR2_B, 1305 MSR_IA32_RTIT_ADDR3_A, MSR_IA32_RTIT_ADDR3_B, 1306 MSR_IA32_UMWAIT_CONTROL, 1307 1308 MSR_ARCH_PERFMON_FIXED_CTR0, MSR_ARCH_PERFMON_FIXED_CTR1, 1309 MSR_ARCH_PERFMON_FIXED_CTR0 + 2, MSR_ARCH_PERFMON_FIXED_CTR0 + 3, 1310 MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_STATUS, 1311 MSR_CORE_PERF_GLOBAL_CTRL, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 1312 MSR_ARCH_PERFMON_PERFCTR0, MSR_ARCH_PERFMON_PERFCTR1, 1313 MSR_ARCH_PERFMON_PERFCTR0 + 2, MSR_ARCH_PERFMON_PERFCTR0 + 3, 1314 MSR_ARCH_PERFMON_PERFCTR0 + 4, MSR_ARCH_PERFMON_PERFCTR0 + 5, 1315 MSR_ARCH_PERFMON_PERFCTR0 + 6, MSR_ARCH_PERFMON_PERFCTR0 + 7, 1316 MSR_ARCH_PERFMON_PERFCTR0 + 8, MSR_ARCH_PERFMON_PERFCTR0 + 9, 1317 MSR_ARCH_PERFMON_PERFCTR0 + 10, MSR_ARCH_PERFMON_PERFCTR0 + 11, 1318 MSR_ARCH_PERFMON_PERFCTR0 + 12, MSR_ARCH_PERFMON_PERFCTR0 + 13, 1319 MSR_ARCH_PERFMON_PERFCTR0 + 14, MSR_ARCH_PERFMON_PERFCTR0 + 15, 1320 MSR_ARCH_PERFMON_PERFCTR0 + 16, MSR_ARCH_PERFMON_PERFCTR0 + 17, 1321 MSR_ARCH_PERFMON_EVENTSEL0, MSR_ARCH_PERFMON_EVENTSEL1, 1322 MSR_ARCH_PERFMON_EVENTSEL0 + 2, MSR_ARCH_PERFMON_EVENTSEL0 + 3, 1323 MSR_ARCH_PERFMON_EVENTSEL0 + 4, MSR_ARCH_PERFMON_EVENTSEL0 + 5, 1324 MSR_ARCH_PERFMON_EVENTSEL0 + 6, MSR_ARCH_PERFMON_EVENTSEL0 + 7, 1325 MSR_ARCH_PERFMON_EVENTSEL0 + 8, MSR_ARCH_PERFMON_EVENTSEL0 + 9, 1326 MSR_ARCH_PERFMON_EVENTSEL0 + 10, MSR_ARCH_PERFMON_EVENTSEL0 + 11, 1327 MSR_ARCH_PERFMON_EVENTSEL0 + 12, MSR_ARCH_PERFMON_EVENTSEL0 + 13, 1328 MSR_ARCH_PERFMON_EVENTSEL0 + 14, MSR_ARCH_PERFMON_EVENTSEL0 + 15, 1329 MSR_ARCH_PERFMON_EVENTSEL0 + 16, MSR_ARCH_PERFMON_EVENTSEL0 + 17, 1330 }; 1331 1332 static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_all)]; 1333 static unsigned num_msrs_to_save; 1334 1335 static const u32 emulated_msrs_all[] = { 1336 MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, 1337 MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW, 1338 HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL, 1339 HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC, 1340 HV_X64_MSR_TSC_FREQUENCY, HV_X64_MSR_APIC_FREQUENCY, 1341 HV_X64_MSR_CRASH_P0, HV_X64_MSR_CRASH_P1, HV_X64_MSR_CRASH_P2, 1342 HV_X64_MSR_CRASH_P3, HV_X64_MSR_CRASH_P4, HV_X64_MSR_CRASH_CTL, 1343 HV_X64_MSR_RESET, 1344 HV_X64_MSR_VP_INDEX, 1345 HV_X64_MSR_VP_RUNTIME, 1346 HV_X64_MSR_SCONTROL, 1347 HV_X64_MSR_STIMER0_CONFIG, 1348 HV_X64_MSR_VP_ASSIST_PAGE, 1349 HV_X64_MSR_REENLIGHTENMENT_CONTROL, HV_X64_MSR_TSC_EMULATION_CONTROL, 1350 HV_X64_MSR_TSC_EMULATION_STATUS, 1351 HV_X64_MSR_SYNDBG_OPTIONS, 1352 HV_X64_MSR_SYNDBG_CONTROL, HV_X64_MSR_SYNDBG_STATUS, 1353 HV_X64_MSR_SYNDBG_SEND_BUFFER, HV_X64_MSR_SYNDBG_RECV_BUFFER, 1354 HV_X64_MSR_SYNDBG_PENDING_BUFFER, 1355 1356 MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME, 1357 MSR_KVM_PV_EOI_EN, MSR_KVM_ASYNC_PF_INT, MSR_KVM_ASYNC_PF_ACK, 1358 1359 MSR_IA32_TSC_ADJUST, 1360 MSR_IA32_TSC_DEADLINE, 1361 MSR_IA32_ARCH_CAPABILITIES, 1362 MSR_IA32_PERF_CAPABILITIES, 1363 MSR_IA32_MISC_ENABLE, 1364 MSR_IA32_MCG_STATUS, 1365 MSR_IA32_MCG_CTL, 1366 MSR_IA32_MCG_EXT_CTL, 1367 MSR_IA32_SMBASE, 1368 MSR_SMI_COUNT, 1369 MSR_PLATFORM_INFO, 1370 MSR_MISC_FEATURES_ENABLES, 1371 MSR_AMD64_VIRT_SPEC_CTRL, 1372 MSR_IA32_POWER_CTL, 1373 MSR_IA32_UCODE_REV, 1374 1375 /* 1376 * The following list leaves out MSRs whose values are determined 1377 * by arch/x86/kvm/vmx/nested.c based on CPUID or other MSRs. 1378 * We always support the "true" VMX control MSRs, even if the host 1379 * processor does not, so I am putting these registers here rather 1380 * than in msrs_to_save_all. 1381 */ 1382 MSR_IA32_VMX_BASIC, 1383 MSR_IA32_VMX_TRUE_PINBASED_CTLS, 1384 MSR_IA32_VMX_TRUE_PROCBASED_CTLS, 1385 MSR_IA32_VMX_TRUE_EXIT_CTLS, 1386 MSR_IA32_VMX_TRUE_ENTRY_CTLS, 1387 MSR_IA32_VMX_MISC, 1388 MSR_IA32_VMX_CR0_FIXED0, 1389 MSR_IA32_VMX_CR4_FIXED0, 1390 MSR_IA32_VMX_VMCS_ENUM, 1391 MSR_IA32_VMX_PROCBASED_CTLS2, 1392 MSR_IA32_VMX_EPT_VPID_CAP, 1393 MSR_IA32_VMX_VMFUNC, 1394 1395 MSR_K7_HWCR, 1396 MSR_KVM_POLL_CONTROL, 1397 }; 1398 1399 static u32 emulated_msrs[ARRAY_SIZE(emulated_msrs_all)]; 1400 static unsigned num_emulated_msrs; 1401 1402 /* 1403 * List of msr numbers which are used to expose MSR-based features that 1404 * can be used by a hypervisor to validate requested CPU features. 1405 */ 1406 static const u32 msr_based_features_all[] = { 1407 MSR_IA32_VMX_BASIC, 1408 MSR_IA32_VMX_TRUE_PINBASED_CTLS, 1409 MSR_IA32_VMX_PINBASED_CTLS, 1410 MSR_IA32_VMX_TRUE_PROCBASED_CTLS, 1411 MSR_IA32_VMX_PROCBASED_CTLS, 1412 MSR_IA32_VMX_TRUE_EXIT_CTLS, 1413 MSR_IA32_VMX_EXIT_CTLS, 1414 MSR_IA32_VMX_TRUE_ENTRY_CTLS, 1415 MSR_IA32_VMX_ENTRY_CTLS, 1416 MSR_IA32_VMX_MISC, 1417 MSR_IA32_VMX_CR0_FIXED0, 1418 MSR_IA32_VMX_CR0_FIXED1, 1419 MSR_IA32_VMX_CR4_FIXED0, 1420 MSR_IA32_VMX_CR4_FIXED1, 1421 MSR_IA32_VMX_VMCS_ENUM, 1422 MSR_IA32_VMX_PROCBASED_CTLS2, 1423 MSR_IA32_VMX_EPT_VPID_CAP, 1424 MSR_IA32_VMX_VMFUNC, 1425 1426 MSR_F10H_DECFG, 1427 MSR_IA32_UCODE_REV, 1428 MSR_IA32_ARCH_CAPABILITIES, 1429 MSR_IA32_PERF_CAPABILITIES, 1430 }; 1431 1432 static u32 msr_based_features[ARRAY_SIZE(msr_based_features_all)]; 1433 static unsigned int num_msr_based_features; 1434 1435 static u64 kvm_get_arch_capabilities(void) 1436 { 1437 u64 data = 0; 1438 1439 if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) 1440 rdmsrl(MSR_IA32_ARCH_CAPABILITIES, data); 1441 1442 /* 1443 * If nx_huge_pages is enabled, KVM's shadow paging will ensure that 1444 * the nested hypervisor runs with NX huge pages. If it is not, 1445 * L1 is anyway vulnerable to ITLB_MULTIHIT exploits from other 1446 * L1 guests, so it need not worry about its own (L2) guests. 1447 */ 1448 data |= ARCH_CAP_PSCHANGE_MC_NO; 1449 1450 /* 1451 * If we're doing cache flushes (either "always" or "cond") 1452 * we will do one whenever the guest does a vmlaunch/vmresume. 1453 * If an outer hypervisor is doing the cache flush for us 1454 * (VMENTER_L1D_FLUSH_NESTED_VM), we can safely pass that 1455 * capability to the guest too, and if EPT is disabled we're not 1456 * vulnerable. Overall, only VMENTER_L1D_FLUSH_NEVER will 1457 * require a nested hypervisor to do a flush of its own. 1458 */ 1459 if (l1tf_vmx_mitigation != VMENTER_L1D_FLUSH_NEVER) 1460 data |= ARCH_CAP_SKIP_VMENTRY_L1DFLUSH; 1461 1462 if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN)) 1463 data |= ARCH_CAP_RDCL_NO; 1464 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) 1465 data |= ARCH_CAP_SSB_NO; 1466 if (!boot_cpu_has_bug(X86_BUG_MDS)) 1467 data |= ARCH_CAP_MDS_NO; 1468 1469 if (!boot_cpu_has(X86_FEATURE_RTM)) { 1470 /* 1471 * If RTM=0 because the kernel has disabled TSX, the host might 1472 * have TAA_NO or TSX_CTRL. Clear TAA_NO (the guest sees RTM=0 1473 * and therefore knows that there cannot be TAA) but keep 1474 * TSX_CTRL: some buggy userspaces leave it set on tsx=on hosts, 1475 * and we want to allow migrating those guests to tsx=off hosts. 1476 */ 1477 data &= ~ARCH_CAP_TAA_NO; 1478 } else if (!boot_cpu_has_bug(X86_BUG_TAA)) { 1479 data |= ARCH_CAP_TAA_NO; 1480 } else { 1481 /* 1482 * Nothing to do here; we emulate TSX_CTRL if present on the 1483 * host so the guest can choose between disabling TSX or 1484 * using VERW to clear CPU buffers. 1485 */ 1486 } 1487 1488 return data; 1489 } 1490 1491 static int kvm_get_msr_feature(struct kvm_msr_entry *msr) 1492 { 1493 switch (msr->index) { 1494 case MSR_IA32_ARCH_CAPABILITIES: 1495 msr->data = kvm_get_arch_capabilities(); 1496 break; 1497 case MSR_IA32_UCODE_REV: 1498 rdmsrl_safe(msr->index, &msr->data); 1499 break; 1500 default: 1501 return static_call(kvm_x86_get_msr_feature)(msr); 1502 } 1503 return 0; 1504 } 1505 1506 static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data) 1507 { 1508 struct kvm_msr_entry msr; 1509 int r; 1510 1511 msr.index = index; 1512 r = kvm_get_msr_feature(&msr); 1513 1514 if (r == KVM_MSR_RET_INVALID) { 1515 /* Unconditionally clear the output for simplicity */ 1516 *data = 0; 1517 if (kvm_msr_ignored_check(index, 0, false)) 1518 r = 0; 1519 } 1520 1521 if (r) 1522 return r; 1523 1524 *data = msr.data; 1525 1526 return 0; 1527 } 1528 1529 static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) 1530 { 1531 if (efer & EFER_FFXSR && !guest_cpuid_has(vcpu, X86_FEATURE_FXSR_OPT)) 1532 return false; 1533 1534 if (efer & EFER_SVME && !guest_cpuid_has(vcpu, X86_FEATURE_SVM)) 1535 return false; 1536 1537 if (efer & (EFER_LME | EFER_LMA) && 1538 !guest_cpuid_has(vcpu, X86_FEATURE_LM)) 1539 return false; 1540 1541 if (efer & EFER_NX && !guest_cpuid_has(vcpu, X86_FEATURE_NX)) 1542 return false; 1543 1544 return true; 1545 1546 } 1547 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) 1548 { 1549 if (efer & efer_reserved_bits) 1550 return false; 1551 1552 return __kvm_valid_efer(vcpu, efer); 1553 } 1554 EXPORT_SYMBOL_GPL(kvm_valid_efer); 1555 1556 static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 1557 { 1558 u64 old_efer = vcpu->arch.efer; 1559 u64 efer = msr_info->data; 1560 int r; 1561 1562 if (efer & efer_reserved_bits) 1563 return 1; 1564 1565 if (!msr_info->host_initiated) { 1566 if (!__kvm_valid_efer(vcpu, efer)) 1567 return 1; 1568 1569 if (is_paging(vcpu) && 1570 (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) 1571 return 1; 1572 } 1573 1574 efer &= ~EFER_LMA; 1575 efer |= vcpu->arch.efer & EFER_LMA; 1576 1577 r = static_call(kvm_x86_set_efer)(vcpu, efer); 1578 if (r) { 1579 WARN_ON(r > 0); 1580 return r; 1581 } 1582 1583 /* Update reserved bits */ 1584 if ((efer ^ old_efer) & EFER_NX) 1585 kvm_mmu_reset_context(vcpu); 1586 1587 return 0; 1588 } 1589 1590 void kvm_enable_efer_bits(u64 mask) 1591 { 1592 efer_reserved_bits &= ~mask; 1593 } 1594 EXPORT_SYMBOL_GPL(kvm_enable_efer_bits); 1595 1596 bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type) 1597 { 1598 struct kvm_x86_msr_filter *msr_filter; 1599 struct msr_bitmap_range *ranges; 1600 struct kvm *kvm = vcpu->kvm; 1601 bool allowed; 1602 int idx; 1603 u32 i; 1604 1605 /* x2APIC MSRs do not support filtering. */ 1606 if (index >= 0x800 && index <= 0x8ff) 1607 return true; 1608 1609 idx = srcu_read_lock(&kvm->srcu); 1610 1611 msr_filter = srcu_dereference(kvm->arch.msr_filter, &kvm->srcu); 1612 if (!msr_filter) { 1613 allowed = true; 1614 goto out; 1615 } 1616 1617 allowed = msr_filter->default_allow; 1618 ranges = msr_filter->ranges; 1619 1620 for (i = 0; i < msr_filter->count; i++) { 1621 u32 start = ranges[i].base; 1622 u32 end = start + ranges[i].nmsrs; 1623 u32 flags = ranges[i].flags; 1624 unsigned long *bitmap = ranges[i].bitmap; 1625 1626 if ((index >= start) && (index < end) && (flags & type)) { 1627 allowed = !!test_bit(index - start, bitmap); 1628 break; 1629 } 1630 } 1631 1632 out: 1633 srcu_read_unlock(&kvm->srcu, idx); 1634 1635 return allowed; 1636 } 1637 EXPORT_SYMBOL_GPL(kvm_msr_allowed); 1638 1639 /* 1640 * Write @data into the MSR specified by @index. Select MSR specific fault 1641 * checks are bypassed if @host_initiated is %true. 1642 * Returns 0 on success, non-0 otherwise. 1643 * Assumes vcpu_load() was already called. 1644 */ 1645 static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data, 1646 bool host_initiated) 1647 { 1648 struct msr_data msr; 1649 1650 if (!host_initiated && !kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_WRITE)) 1651 return KVM_MSR_RET_FILTERED; 1652 1653 switch (index) { 1654 case MSR_FS_BASE: 1655 case MSR_GS_BASE: 1656 case MSR_KERNEL_GS_BASE: 1657 case MSR_CSTAR: 1658 case MSR_LSTAR: 1659 if (is_noncanonical_address(data, vcpu)) 1660 return 1; 1661 break; 1662 case MSR_IA32_SYSENTER_EIP: 1663 case MSR_IA32_SYSENTER_ESP: 1664 /* 1665 * IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if 1666 * non-canonical address is written on Intel but not on 1667 * AMD (which ignores the top 32-bits, because it does 1668 * not implement 64-bit SYSENTER). 1669 * 1670 * 64-bit code should hence be able to write a non-canonical 1671 * value on AMD. Making the address canonical ensures that 1672 * vmentry does not fail on Intel after writing a non-canonical 1673 * value, and that something deterministic happens if the guest 1674 * invokes 64-bit SYSENTER. 1675 */ 1676 data = get_canonical(data, vcpu_virt_addr_bits(vcpu)); 1677 break; 1678 case MSR_TSC_AUX: 1679 if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX)) 1680 return 1; 1681 1682 if (!host_initiated && 1683 !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) && 1684 !guest_cpuid_has(vcpu, X86_FEATURE_RDPID)) 1685 return 1; 1686 1687 /* 1688 * Per Intel's SDM, bits 63:32 are reserved, but AMD's APM has 1689 * incomplete and conflicting architectural behavior. Current 1690 * AMD CPUs completely ignore bits 63:32, i.e. they aren't 1691 * reserved and always read as zeros. Enforce Intel's reserved 1692 * bits check if and only if the guest CPU is Intel, and clear 1693 * the bits in all other cases. This ensures cross-vendor 1694 * migration will provide consistent behavior for the guest. 1695 */ 1696 if (guest_cpuid_is_intel(vcpu) && (data >> 32) != 0) 1697 return 1; 1698 1699 data = (u32)data; 1700 break; 1701 } 1702 1703 msr.data = data; 1704 msr.index = index; 1705 msr.host_initiated = host_initiated; 1706 1707 return static_call(kvm_x86_set_msr)(vcpu, &msr); 1708 } 1709 1710 static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu, 1711 u32 index, u64 data, bool host_initiated) 1712 { 1713 int ret = __kvm_set_msr(vcpu, index, data, host_initiated); 1714 1715 if (ret == KVM_MSR_RET_INVALID) 1716 if (kvm_msr_ignored_check(index, data, true)) 1717 ret = 0; 1718 1719 return ret; 1720 } 1721 1722 /* 1723 * Read the MSR specified by @index into @data. Select MSR specific fault 1724 * checks are bypassed if @host_initiated is %true. 1725 * Returns 0 on success, non-0 otherwise. 1726 * Assumes vcpu_load() was already called. 1727 */ 1728 int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, 1729 bool host_initiated) 1730 { 1731 struct msr_data msr; 1732 int ret; 1733 1734 if (!host_initiated && !kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_READ)) 1735 return KVM_MSR_RET_FILTERED; 1736 1737 switch (index) { 1738 case MSR_TSC_AUX: 1739 if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX)) 1740 return 1; 1741 1742 if (!host_initiated && 1743 !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) && 1744 !guest_cpuid_has(vcpu, X86_FEATURE_RDPID)) 1745 return 1; 1746 break; 1747 } 1748 1749 msr.index = index; 1750 msr.host_initiated = host_initiated; 1751 1752 ret = static_call(kvm_x86_get_msr)(vcpu, &msr); 1753 if (!ret) 1754 *data = msr.data; 1755 return ret; 1756 } 1757 1758 static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu, 1759 u32 index, u64 *data, bool host_initiated) 1760 { 1761 int ret = __kvm_get_msr(vcpu, index, data, host_initiated); 1762 1763 if (ret == KVM_MSR_RET_INVALID) { 1764 /* Unconditionally clear *data for simplicity */ 1765 *data = 0; 1766 if (kvm_msr_ignored_check(index, 0, false)) 1767 ret = 0; 1768 } 1769 1770 return ret; 1771 } 1772 1773 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data) 1774 { 1775 return kvm_get_msr_ignored_check(vcpu, index, data, false); 1776 } 1777 EXPORT_SYMBOL_GPL(kvm_get_msr); 1778 1779 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data) 1780 { 1781 return kvm_set_msr_ignored_check(vcpu, index, data, false); 1782 } 1783 EXPORT_SYMBOL_GPL(kvm_set_msr); 1784 1785 static int complete_emulated_rdmsr(struct kvm_vcpu *vcpu) 1786 { 1787 int err = vcpu->run->msr.error; 1788 if (!err) { 1789 kvm_rax_write(vcpu, (u32)vcpu->run->msr.data); 1790 kvm_rdx_write(vcpu, vcpu->run->msr.data >> 32); 1791 } 1792 1793 return static_call(kvm_x86_complete_emulated_msr)(vcpu, err); 1794 } 1795 1796 static int complete_emulated_wrmsr(struct kvm_vcpu *vcpu) 1797 { 1798 return static_call(kvm_x86_complete_emulated_msr)(vcpu, vcpu->run->msr.error); 1799 } 1800 1801 static u64 kvm_msr_reason(int r) 1802 { 1803 switch (r) { 1804 case KVM_MSR_RET_INVALID: 1805 return KVM_MSR_EXIT_REASON_UNKNOWN; 1806 case KVM_MSR_RET_FILTERED: 1807 return KVM_MSR_EXIT_REASON_FILTER; 1808 default: 1809 return KVM_MSR_EXIT_REASON_INVAL; 1810 } 1811 } 1812 1813 static int kvm_msr_user_space(struct kvm_vcpu *vcpu, u32 index, 1814 u32 exit_reason, u64 data, 1815 int (*completion)(struct kvm_vcpu *vcpu), 1816 int r) 1817 { 1818 u64 msr_reason = kvm_msr_reason(r); 1819 1820 /* Check if the user wanted to know about this MSR fault */ 1821 if (!(vcpu->kvm->arch.user_space_msr_mask & msr_reason)) 1822 return 0; 1823 1824 vcpu->run->exit_reason = exit_reason; 1825 vcpu->run->msr.error = 0; 1826 memset(vcpu->run->msr.pad, 0, sizeof(vcpu->run->msr.pad)); 1827 vcpu->run->msr.reason = msr_reason; 1828 vcpu->run->msr.index = index; 1829 vcpu->run->msr.data = data; 1830 vcpu->arch.complete_userspace_io = completion; 1831 1832 return 1; 1833 } 1834 1835 static int kvm_get_msr_user_space(struct kvm_vcpu *vcpu, u32 index, int r) 1836 { 1837 return kvm_msr_user_space(vcpu, index, KVM_EXIT_X86_RDMSR, 0, 1838 complete_emulated_rdmsr, r); 1839 } 1840 1841 static int kvm_set_msr_user_space(struct kvm_vcpu *vcpu, u32 index, u64 data, int r) 1842 { 1843 return kvm_msr_user_space(vcpu, index, KVM_EXIT_X86_WRMSR, data, 1844 complete_emulated_wrmsr, r); 1845 } 1846 1847 int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu) 1848 { 1849 u32 ecx = kvm_rcx_read(vcpu); 1850 u64 data; 1851 int r; 1852 1853 r = kvm_get_msr(vcpu, ecx, &data); 1854 1855 /* MSR read failed? See if we should ask user space */ 1856 if (r && kvm_get_msr_user_space(vcpu, ecx, r)) { 1857 /* Bounce to user space */ 1858 return 0; 1859 } 1860 1861 if (!r) { 1862 trace_kvm_msr_read(ecx, data); 1863 1864 kvm_rax_write(vcpu, data & -1u); 1865 kvm_rdx_write(vcpu, (data >> 32) & -1u); 1866 } else { 1867 trace_kvm_msr_read_ex(ecx); 1868 } 1869 1870 return static_call(kvm_x86_complete_emulated_msr)(vcpu, r); 1871 } 1872 EXPORT_SYMBOL_GPL(kvm_emulate_rdmsr); 1873 1874 int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu) 1875 { 1876 u32 ecx = kvm_rcx_read(vcpu); 1877 u64 data = kvm_read_edx_eax(vcpu); 1878 int r; 1879 1880 r = kvm_set_msr(vcpu, ecx, data); 1881 1882 /* MSR write failed? See if we should ask user space */ 1883 if (r && kvm_set_msr_user_space(vcpu, ecx, data, r)) 1884 /* Bounce to user space */ 1885 return 0; 1886 1887 /* Signal all other negative errors to userspace */ 1888 if (r < 0) 1889 return r; 1890 1891 if (!r) 1892 trace_kvm_msr_write(ecx, data); 1893 else 1894 trace_kvm_msr_write_ex(ecx, data); 1895 1896 return static_call(kvm_x86_complete_emulated_msr)(vcpu, r); 1897 } 1898 EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr); 1899 1900 int kvm_emulate_as_nop(struct kvm_vcpu *vcpu) 1901 { 1902 return kvm_skip_emulated_instruction(vcpu); 1903 } 1904 EXPORT_SYMBOL_GPL(kvm_emulate_as_nop); 1905 1906 int kvm_emulate_invd(struct kvm_vcpu *vcpu) 1907 { 1908 /* Treat an INVD instruction as a NOP and just skip it. */ 1909 return kvm_emulate_as_nop(vcpu); 1910 } 1911 EXPORT_SYMBOL_GPL(kvm_emulate_invd); 1912 1913 int kvm_emulate_mwait(struct kvm_vcpu *vcpu) 1914 { 1915 pr_warn_once("kvm: MWAIT instruction emulated as NOP!\n"); 1916 return kvm_emulate_as_nop(vcpu); 1917 } 1918 EXPORT_SYMBOL_GPL(kvm_emulate_mwait); 1919 1920 int kvm_handle_invalid_op(struct kvm_vcpu *vcpu) 1921 { 1922 kvm_queue_exception(vcpu, UD_VECTOR); 1923 return 1; 1924 } 1925 EXPORT_SYMBOL_GPL(kvm_handle_invalid_op); 1926 1927 int kvm_emulate_monitor(struct kvm_vcpu *vcpu) 1928 { 1929 pr_warn_once("kvm: MONITOR instruction emulated as NOP!\n"); 1930 return kvm_emulate_as_nop(vcpu); 1931 } 1932 EXPORT_SYMBOL_GPL(kvm_emulate_monitor); 1933 1934 static inline bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu) 1935 { 1936 xfer_to_guest_mode_prepare(); 1937 return vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu) || 1938 xfer_to_guest_mode_work_pending(); 1939 } 1940 1941 /* 1942 * The fast path for frequent and performance sensitive wrmsr emulation, 1943 * i.e. the sending of IPI, sending IPI early in the VM-Exit flow reduces 1944 * the latency of virtual IPI by avoiding the expensive bits of transitioning 1945 * from guest to host, e.g. reacquiring KVM's SRCU lock. In contrast to the 1946 * other cases which must be called after interrupts are enabled on the host. 1947 */ 1948 static int handle_fastpath_set_x2apic_icr_irqoff(struct kvm_vcpu *vcpu, u64 data) 1949 { 1950 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(vcpu->arch.apic)) 1951 return 1; 1952 1953 if (((data & APIC_SHORT_MASK) == APIC_DEST_NOSHORT) && 1954 ((data & APIC_DEST_MASK) == APIC_DEST_PHYSICAL) && 1955 ((data & APIC_MODE_MASK) == APIC_DM_FIXED) && 1956 ((u32)(data >> 32) != X2APIC_BROADCAST)) { 1957 1958 data &= ~(1 << 12); 1959 kvm_apic_send_ipi(vcpu->arch.apic, (u32)data, (u32)(data >> 32)); 1960 kvm_lapic_set_reg(vcpu->arch.apic, APIC_ICR2, (u32)(data >> 32)); 1961 kvm_lapic_set_reg(vcpu->arch.apic, APIC_ICR, (u32)data); 1962 trace_kvm_apic_write(APIC_ICR, (u32)data); 1963 return 0; 1964 } 1965 1966 return 1; 1967 } 1968 1969 static int handle_fastpath_set_tscdeadline(struct kvm_vcpu *vcpu, u64 data) 1970 { 1971 if (!kvm_can_use_hv_timer(vcpu)) 1972 return 1; 1973 1974 kvm_set_lapic_tscdeadline_msr(vcpu, data); 1975 return 0; 1976 } 1977 1978 fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu) 1979 { 1980 u32 msr = kvm_rcx_read(vcpu); 1981 u64 data; 1982 fastpath_t ret = EXIT_FASTPATH_NONE; 1983 1984 switch (msr) { 1985 case APIC_BASE_MSR + (APIC_ICR >> 4): 1986 data = kvm_read_edx_eax(vcpu); 1987 if (!handle_fastpath_set_x2apic_icr_irqoff(vcpu, data)) { 1988 kvm_skip_emulated_instruction(vcpu); 1989 ret = EXIT_FASTPATH_EXIT_HANDLED; 1990 } 1991 break; 1992 case MSR_IA32_TSC_DEADLINE: 1993 data = kvm_read_edx_eax(vcpu); 1994 if (!handle_fastpath_set_tscdeadline(vcpu, data)) { 1995 kvm_skip_emulated_instruction(vcpu); 1996 ret = EXIT_FASTPATH_REENTER_GUEST; 1997 } 1998 break; 1999 default: 2000 break; 2001 } 2002 2003 if (ret != EXIT_FASTPATH_NONE) 2004 trace_kvm_msr_write(msr, data); 2005 2006 return ret; 2007 } 2008 EXPORT_SYMBOL_GPL(handle_fastpath_set_msr_irqoff); 2009 2010 /* 2011 * Adapt set_msr() to msr_io()'s calling convention 2012 */ 2013 static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) 2014 { 2015 return kvm_get_msr_ignored_check(vcpu, index, data, true); 2016 } 2017 2018 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) 2019 { 2020 return kvm_set_msr_ignored_check(vcpu, index, *data, true); 2021 } 2022 2023 #ifdef CONFIG_X86_64 2024 struct pvclock_clock { 2025 int vclock_mode; 2026 u64 cycle_last; 2027 u64 mask; 2028 u32 mult; 2029 u32 shift; 2030 u64 base_cycles; 2031 u64 offset; 2032 }; 2033 2034 struct pvclock_gtod_data { 2035 seqcount_t seq; 2036 2037 struct pvclock_clock clock; /* extract of a clocksource struct */ 2038 struct pvclock_clock raw_clock; /* extract of a clocksource struct */ 2039 2040 ktime_t offs_boot; 2041 u64 wall_time_sec; 2042 }; 2043 2044 static struct pvclock_gtod_data pvclock_gtod_data; 2045 2046 static void update_pvclock_gtod(struct timekeeper *tk) 2047 { 2048 struct pvclock_gtod_data *vdata = &pvclock_gtod_data; 2049 2050 write_seqcount_begin(&vdata->seq); 2051 2052 /* copy pvclock gtod data */ 2053 vdata->clock.vclock_mode = tk->tkr_mono.clock->vdso_clock_mode; 2054 vdata->clock.cycle_last = tk->tkr_mono.cycle_last; 2055 vdata->clock.mask = tk->tkr_mono.mask; 2056 vdata->clock.mult = tk->tkr_mono.mult; 2057 vdata->clock.shift = tk->tkr_mono.shift; 2058 vdata->clock.base_cycles = tk->tkr_mono.xtime_nsec; 2059 vdata->clock.offset = tk->tkr_mono.base; 2060 2061 vdata->raw_clock.vclock_mode = tk->tkr_raw.clock->vdso_clock_mode; 2062 vdata->raw_clock.cycle_last = tk->tkr_raw.cycle_last; 2063 vdata->raw_clock.mask = tk->tkr_raw.mask; 2064 vdata->raw_clock.mult = tk->tkr_raw.mult; 2065 vdata->raw_clock.shift = tk->tkr_raw.shift; 2066 vdata->raw_clock.base_cycles = tk->tkr_raw.xtime_nsec; 2067 vdata->raw_clock.offset = tk->tkr_raw.base; 2068 2069 vdata->wall_time_sec = tk->xtime_sec; 2070 2071 vdata->offs_boot = tk->offs_boot; 2072 2073 write_seqcount_end(&vdata->seq); 2074 } 2075 2076 static s64 get_kvmclock_base_ns(void) 2077 { 2078 /* Count up from boot time, but with the frequency of the raw clock. */ 2079 return ktime_to_ns(ktime_add(ktime_get_raw(), pvclock_gtod_data.offs_boot)); 2080 } 2081 #else 2082 static s64 get_kvmclock_base_ns(void) 2083 { 2084 /* Master clock not used, so we can just use CLOCK_BOOTTIME. */ 2085 return ktime_get_boottime_ns(); 2086 } 2087 #endif 2088 2089 void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock, int sec_hi_ofs) 2090 { 2091 int version; 2092 int r; 2093 struct pvclock_wall_clock wc; 2094 u32 wc_sec_hi; 2095 u64 wall_nsec; 2096 2097 if (!wall_clock) 2098 return; 2099 2100 r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version)); 2101 if (r) 2102 return; 2103 2104 if (version & 1) 2105 ++version; /* first time write, random junk */ 2106 2107 ++version; 2108 2109 if (kvm_write_guest(kvm, wall_clock, &version, sizeof(version))) 2110 return; 2111 2112 /* 2113 * The guest calculates current wall clock time by adding 2114 * system time (updated by kvm_guest_time_update below) to the 2115 * wall clock specified here. We do the reverse here. 2116 */ 2117 wall_nsec = ktime_get_real_ns() - get_kvmclock_ns(kvm); 2118 2119 wc.nsec = do_div(wall_nsec, 1000000000); 2120 wc.sec = (u32)wall_nsec; /* overflow in 2106 guest time */ 2121 wc.version = version; 2122 2123 kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc)); 2124 2125 if (sec_hi_ofs) { 2126 wc_sec_hi = wall_nsec >> 32; 2127 kvm_write_guest(kvm, wall_clock + sec_hi_ofs, 2128 &wc_sec_hi, sizeof(wc_sec_hi)); 2129 } 2130 2131 version++; 2132 kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); 2133 } 2134 2135 static void kvm_write_system_time(struct kvm_vcpu *vcpu, gpa_t system_time, 2136 bool old_msr, bool host_initiated) 2137 { 2138 struct kvm_arch *ka = &vcpu->kvm->arch; 2139 2140 if (vcpu->vcpu_id == 0 && !host_initiated) { 2141 if (ka->boot_vcpu_runs_old_kvmclock != old_msr) 2142 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); 2143 2144 ka->boot_vcpu_runs_old_kvmclock = old_msr; 2145 } 2146 2147 vcpu->arch.time = system_time; 2148 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu); 2149 2150 /* we verify if the enable bit is set... */ 2151 vcpu->arch.pv_time_enabled = false; 2152 if (!(system_time & 1)) 2153 return; 2154 2155 if (!kvm_gfn_to_hva_cache_init(vcpu->kvm, 2156 &vcpu->arch.pv_time, system_time & ~1ULL, 2157 sizeof(struct pvclock_vcpu_time_info))) 2158 vcpu->arch.pv_time_enabled = true; 2159 2160 return; 2161 } 2162 2163 static uint32_t div_frac(uint32_t dividend, uint32_t divisor) 2164 { 2165 do_shl32_div32(dividend, divisor); 2166 return dividend; 2167 } 2168 2169 static void kvm_get_time_scale(uint64_t scaled_hz, uint64_t base_hz, 2170 s8 *pshift, u32 *pmultiplier) 2171 { 2172 uint64_t scaled64; 2173 int32_t shift = 0; 2174 uint64_t tps64; 2175 uint32_t tps32; 2176 2177 tps64 = base_hz; 2178 scaled64 = scaled_hz; 2179 while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) { 2180 tps64 >>= 1; 2181 shift--; 2182 } 2183 2184 tps32 = (uint32_t)tps64; 2185 while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) { 2186 if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000) 2187 scaled64 >>= 1; 2188 else 2189 tps32 <<= 1; 2190 shift++; 2191 } 2192 2193 *pshift = shift; 2194 *pmultiplier = div_frac(scaled64, tps32); 2195 } 2196 2197 #ifdef CONFIG_X86_64 2198 static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0); 2199 #endif 2200 2201 static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz); 2202 static unsigned long max_tsc_khz; 2203 2204 static u32 adjust_tsc_khz(u32 khz, s32 ppm) 2205 { 2206 u64 v = (u64)khz * (1000000 + ppm); 2207 do_div(v, 1000000); 2208 return v; 2209 } 2210 2211 static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier); 2212 2213 static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale) 2214 { 2215 u64 ratio; 2216 2217 /* Guest TSC same frequency as host TSC? */ 2218 if (!scale) { 2219 kvm_vcpu_write_tsc_multiplier(vcpu, kvm_default_tsc_scaling_ratio); 2220 return 0; 2221 } 2222 2223 /* TSC scaling supported? */ 2224 if (!kvm_has_tsc_control) { 2225 if (user_tsc_khz > tsc_khz) { 2226 vcpu->arch.tsc_catchup = 1; 2227 vcpu->arch.tsc_always_catchup = 1; 2228 return 0; 2229 } else { 2230 pr_warn_ratelimited("user requested TSC rate below hardware speed\n"); 2231 return -1; 2232 } 2233 } 2234 2235 /* TSC scaling required - calculate ratio */ 2236 ratio = mul_u64_u32_div(1ULL << kvm_tsc_scaling_ratio_frac_bits, 2237 user_tsc_khz, tsc_khz); 2238 2239 if (ratio == 0 || ratio >= kvm_max_tsc_scaling_ratio) { 2240 pr_warn_ratelimited("Invalid TSC scaling ratio - virtual-tsc-khz=%u\n", 2241 user_tsc_khz); 2242 return -1; 2243 } 2244 2245 kvm_vcpu_write_tsc_multiplier(vcpu, ratio); 2246 return 0; 2247 } 2248 2249 static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz) 2250 { 2251 u32 thresh_lo, thresh_hi; 2252 int use_scaling = 0; 2253 2254 /* tsc_khz can be zero if TSC calibration fails */ 2255 if (user_tsc_khz == 0) { 2256 /* set tsc_scaling_ratio to a safe value */ 2257 kvm_vcpu_write_tsc_multiplier(vcpu, kvm_default_tsc_scaling_ratio); 2258 return -1; 2259 } 2260 2261 /* Compute a scale to convert nanoseconds in TSC cycles */ 2262 kvm_get_time_scale(user_tsc_khz * 1000LL, NSEC_PER_SEC, 2263 &vcpu->arch.virtual_tsc_shift, 2264 &vcpu->arch.virtual_tsc_mult); 2265 vcpu->arch.virtual_tsc_khz = user_tsc_khz; 2266 2267 /* 2268 * Compute the variation in TSC rate which is acceptable 2269 * within the range of tolerance and decide if the 2270 * rate being applied is within that bounds of the hardware 2271 * rate. If so, no scaling or compensation need be done. 2272 */ 2273 thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm); 2274 thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm); 2275 if (user_tsc_khz < thresh_lo || user_tsc_khz > thresh_hi) { 2276 pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", user_tsc_khz, thresh_lo, thresh_hi); 2277 use_scaling = 1; 2278 } 2279 return set_tsc_khz(vcpu, user_tsc_khz, use_scaling); 2280 } 2281 2282 static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns) 2283 { 2284 u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec, 2285 vcpu->arch.virtual_tsc_mult, 2286 vcpu->arch.virtual_tsc_shift); 2287 tsc += vcpu->arch.this_tsc_write; 2288 return tsc; 2289 } 2290 2291 static inline int gtod_is_based_on_tsc(int mode) 2292 { 2293 return mode == VDSO_CLOCKMODE_TSC || mode == VDSO_CLOCKMODE_HVCLOCK; 2294 } 2295 2296 static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu) 2297 { 2298 #ifdef CONFIG_X86_64 2299 bool vcpus_matched; 2300 struct kvm_arch *ka = &vcpu->kvm->arch; 2301 struct pvclock_gtod_data *gtod = &pvclock_gtod_data; 2302 2303 vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == 2304 atomic_read(&vcpu->kvm->online_vcpus)); 2305 2306 /* 2307 * Once the masterclock is enabled, always perform request in 2308 * order to update it. 2309 * 2310 * In order to enable masterclock, the host clocksource must be TSC 2311 * and the vcpus need to have matched TSCs. When that happens, 2312 * perform request to enable masterclock. 2313 */ 2314 if (ka->use_master_clock || 2315 (gtod_is_based_on_tsc(gtod->clock.vclock_mode) && vcpus_matched)) 2316 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); 2317 2318 trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc, 2319 atomic_read(&vcpu->kvm->online_vcpus), 2320 ka->use_master_clock, gtod->clock.vclock_mode); 2321 #endif 2322 } 2323 2324 /* 2325 * Multiply tsc by a fixed point number represented by ratio. 2326 * 2327 * The most significant 64-N bits (mult) of ratio represent the 2328 * integral part of the fixed point number; the remaining N bits 2329 * (frac) represent the fractional part, ie. ratio represents a fixed 2330 * point number (mult + frac * 2^(-N)). 2331 * 2332 * N equals to kvm_tsc_scaling_ratio_frac_bits. 2333 */ 2334 static inline u64 __scale_tsc(u64 ratio, u64 tsc) 2335 { 2336 return mul_u64_u64_shr(tsc, ratio, kvm_tsc_scaling_ratio_frac_bits); 2337 } 2338 2339 u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc, u64 ratio) 2340 { 2341 u64 _tsc = tsc; 2342 2343 if (ratio != kvm_default_tsc_scaling_ratio) 2344 _tsc = __scale_tsc(ratio, tsc); 2345 2346 return _tsc; 2347 } 2348 EXPORT_SYMBOL_GPL(kvm_scale_tsc); 2349 2350 static u64 kvm_compute_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) 2351 { 2352 u64 tsc; 2353 2354 tsc = kvm_scale_tsc(vcpu, rdtsc(), vcpu->arch.l1_tsc_scaling_ratio); 2355 2356 return target_tsc - tsc; 2357 } 2358 2359 u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc) 2360 { 2361 return vcpu->arch.l1_tsc_offset + 2362 kvm_scale_tsc(vcpu, host_tsc, vcpu->arch.l1_tsc_scaling_ratio); 2363 } 2364 EXPORT_SYMBOL_GPL(kvm_read_l1_tsc); 2365 2366 u64 kvm_calc_nested_tsc_offset(u64 l1_offset, u64 l2_offset, u64 l2_multiplier) 2367 { 2368 u64 nested_offset; 2369 2370 if (l2_multiplier == kvm_default_tsc_scaling_ratio) 2371 nested_offset = l1_offset; 2372 else 2373 nested_offset = mul_s64_u64_shr((s64) l1_offset, l2_multiplier, 2374 kvm_tsc_scaling_ratio_frac_bits); 2375 2376 nested_offset += l2_offset; 2377 return nested_offset; 2378 } 2379 EXPORT_SYMBOL_GPL(kvm_calc_nested_tsc_offset); 2380 2381 u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier) 2382 { 2383 if (l2_multiplier != kvm_default_tsc_scaling_ratio) 2384 return mul_u64_u64_shr(l1_multiplier, l2_multiplier, 2385 kvm_tsc_scaling_ratio_frac_bits); 2386 2387 return l1_multiplier; 2388 } 2389 EXPORT_SYMBOL_GPL(kvm_calc_nested_tsc_multiplier); 2390 2391 static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 l1_offset) 2392 { 2393 trace_kvm_write_tsc_offset(vcpu->vcpu_id, 2394 vcpu->arch.l1_tsc_offset, 2395 l1_offset); 2396 2397 vcpu->arch.l1_tsc_offset = l1_offset; 2398 2399 /* 2400 * If we are here because L1 chose not to trap WRMSR to TSC then 2401 * according to the spec this should set L1's TSC (as opposed to 2402 * setting L1's offset for L2). 2403 */ 2404 if (is_guest_mode(vcpu)) 2405 vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset( 2406 l1_offset, 2407 static_call(kvm_x86_get_l2_tsc_offset)(vcpu), 2408 static_call(kvm_x86_get_l2_tsc_multiplier)(vcpu)); 2409 else 2410 vcpu->arch.tsc_offset = l1_offset; 2411 2412 static_call(kvm_x86_write_tsc_offset)(vcpu, vcpu->arch.tsc_offset); 2413 } 2414 2415 static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier) 2416 { 2417 vcpu->arch.l1_tsc_scaling_ratio = l1_multiplier; 2418 2419 /* Userspace is changing the multiplier while L2 is active */ 2420 if (is_guest_mode(vcpu)) 2421 vcpu->arch.tsc_scaling_ratio = kvm_calc_nested_tsc_multiplier( 2422 l1_multiplier, 2423 static_call(kvm_x86_get_l2_tsc_multiplier)(vcpu)); 2424 else 2425 vcpu->arch.tsc_scaling_ratio = l1_multiplier; 2426 2427 if (kvm_has_tsc_control) 2428 static_call(kvm_x86_write_tsc_multiplier)( 2429 vcpu, vcpu->arch.tsc_scaling_ratio); 2430 } 2431 2432 static inline bool kvm_check_tsc_unstable(void) 2433 { 2434 #ifdef CONFIG_X86_64 2435 /* 2436 * TSC is marked unstable when we're running on Hyper-V, 2437 * 'TSC page' clocksource is good. 2438 */ 2439 if (pvclock_gtod_data.clock.vclock_mode == VDSO_CLOCKMODE_HVCLOCK) 2440 return false; 2441 #endif 2442 return check_tsc_unstable(); 2443 } 2444 2445 static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data) 2446 { 2447 struct kvm *kvm = vcpu->kvm; 2448 u64 offset, ns, elapsed; 2449 unsigned long flags; 2450 bool matched; 2451 bool already_matched; 2452 bool synchronizing = false; 2453 2454 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); 2455 offset = kvm_compute_l1_tsc_offset(vcpu, data); 2456 ns = get_kvmclock_base_ns(); 2457 elapsed = ns - kvm->arch.last_tsc_nsec; 2458 2459 if (vcpu->arch.virtual_tsc_khz) { 2460 if (data == 0) { 2461 /* 2462 * detection of vcpu initialization -- need to sync 2463 * with other vCPUs. This particularly helps to keep 2464 * kvm_clock stable after CPU hotplug 2465 */ 2466 synchronizing = true; 2467 } else { 2468 u64 tsc_exp = kvm->arch.last_tsc_write + 2469 nsec_to_cycles(vcpu, elapsed); 2470 u64 tsc_hz = vcpu->arch.virtual_tsc_khz * 1000LL; 2471 /* 2472 * Special case: TSC write with a small delta (1 second) 2473 * of virtual cycle time against real time is 2474 * interpreted as an attempt to synchronize the CPU. 2475 */ 2476 synchronizing = data < tsc_exp + tsc_hz && 2477 data + tsc_hz > tsc_exp; 2478 } 2479 } 2480 2481 /* 2482 * For a reliable TSC, we can match TSC offsets, and for an unstable 2483 * TSC, we add elapsed time in this computation. We could let the 2484 * compensation code attempt to catch up if we fall behind, but 2485 * it's better to try to match offsets from the beginning. 2486 */ 2487 if (synchronizing && 2488 vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) { 2489 if (!kvm_check_tsc_unstable()) { 2490 offset = kvm->arch.cur_tsc_offset; 2491 } else { 2492 u64 delta = nsec_to_cycles(vcpu, elapsed); 2493 data += delta; 2494 offset = kvm_compute_l1_tsc_offset(vcpu, data); 2495 } 2496 matched = true; 2497 already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation); 2498 } else { 2499 /* 2500 * We split periods of matched TSC writes into generations. 2501 * For each generation, we track the original measured 2502 * nanosecond time, offset, and write, so if TSCs are in 2503 * sync, we can match exact offset, and if not, we can match 2504 * exact software computation in compute_guest_tsc() 2505 * 2506 * These values are tracked in kvm->arch.cur_xxx variables. 2507 */ 2508 kvm->arch.cur_tsc_generation++; 2509 kvm->arch.cur_tsc_nsec = ns; 2510 kvm->arch.cur_tsc_write = data; 2511 kvm->arch.cur_tsc_offset = offset; 2512 matched = false; 2513 } 2514 2515 /* 2516 * We also track th most recent recorded KHZ, write and time to 2517 * allow the matching interval to be extended at each write. 2518 */ 2519 kvm->arch.last_tsc_nsec = ns; 2520 kvm->arch.last_tsc_write = data; 2521 kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; 2522 2523 vcpu->arch.last_guest_tsc = data; 2524 2525 /* Keep track of which generation this VCPU has synchronized to */ 2526 vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation; 2527 vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; 2528 vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; 2529 2530 kvm_vcpu_write_tsc_offset(vcpu, offset); 2531 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); 2532 2533 spin_lock_irqsave(&kvm->arch.pvclock_gtod_sync_lock, flags); 2534 if (!matched) { 2535 kvm->arch.nr_vcpus_matched_tsc = 0; 2536 } else if (!already_matched) { 2537 kvm->arch.nr_vcpus_matched_tsc++; 2538 } 2539 2540 kvm_track_tsc_matching(vcpu); 2541 spin_unlock_irqrestore(&kvm->arch.pvclock_gtod_sync_lock, flags); 2542 } 2543 2544 static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, 2545 s64 adjustment) 2546 { 2547 u64 tsc_offset = vcpu->arch.l1_tsc_offset; 2548 kvm_vcpu_write_tsc_offset(vcpu, tsc_offset + adjustment); 2549 } 2550 2551 static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment) 2552 { 2553 if (vcpu->arch.l1_tsc_scaling_ratio != kvm_default_tsc_scaling_ratio) 2554 WARN_ON(adjustment < 0); 2555 adjustment = kvm_scale_tsc(vcpu, (u64) adjustment, 2556 vcpu->arch.l1_tsc_scaling_ratio); 2557 adjust_tsc_offset_guest(vcpu, adjustment); 2558 } 2559 2560 #ifdef CONFIG_X86_64 2561 2562 static u64 read_tsc(void) 2563 { 2564 u64 ret = (u64)rdtsc_ordered(); 2565 u64 last = pvclock_gtod_data.clock.cycle_last; 2566 2567 if (likely(ret >= last)) 2568 return ret; 2569 2570 /* 2571 * GCC likes to generate cmov here, but this branch is extremely 2572 * predictable (it's just a function of time and the likely is 2573 * very likely) and there's a data dependence, so force GCC 2574 * to generate a branch instead. I don't barrier() because 2575 * we don't actually need a barrier, and if this function 2576 * ever gets inlined it will generate worse code. 2577 */ 2578 asm volatile (""); 2579 return last; 2580 } 2581 2582 static inline u64 vgettsc(struct pvclock_clock *clock, u64 *tsc_timestamp, 2583 int *mode) 2584 { 2585 long v; 2586 u64 tsc_pg_val; 2587 2588 switch (clock->vclock_mode) { 2589 case VDSO_CLOCKMODE_HVCLOCK: 2590 tsc_pg_val = hv_read_tsc_page_tsc(hv_get_tsc_page(), 2591 tsc_timestamp); 2592 if (tsc_pg_val != U64_MAX) { 2593 /* TSC page valid */ 2594 *mode = VDSO_CLOCKMODE_HVCLOCK; 2595 v = (tsc_pg_val - clock->cycle_last) & 2596 clock->mask; 2597 } else { 2598 /* TSC page invalid */ 2599 *mode = VDSO_CLOCKMODE_NONE; 2600 } 2601 break; 2602 case VDSO_CLOCKMODE_TSC: 2603 *mode = VDSO_CLOCKMODE_TSC; 2604 *tsc_timestamp = read_tsc(); 2605 v = (*tsc_timestamp - clock->cycle_last) & 2606 clock->mask; 2607 break; 2608 default: 2609 *mode = VDSO_CLOCKMODE_NONE; 2610 } 2611 2612 if (*mode == VDSO_CLOCKMODE_NONE) 2613 *tsc_timestamp = v = 0; 2614 2615 return v * clock->mult; 2616 } 2617 2618 static int do_monotonic_raw(s64 *t, u64 *tsc_timestamp) 2619 { 2620 struct pvclock_gtod_data *gtod = &pvclock_gtod_data; 2621 unsigned long seq; 2622 int mode; 2623 u64 ns; 2624 2625 do { 2626 seq = read_seqcount_begin(>od->seq); 2627 ns = gtod->raw_clock.base_cycles; 2628 ns += vgettsc(>od->raw_clock, tsc_timestamp, &mode); 2629 ns >>= gtod->raw_clock.shift; 2630 ns += ktime_to_ns(ktime_add(gtod->raw_clock.offset, gtod->offs_boot)); 2631 } while (unlikely(read_seqcount_retry(>od->seq, seq))); 2632 *t = ns; 2633 2634 return mode; 2635 } 2636 2637 static int do_realtime(struct timespec64 *ts, u64 *tsc_timestamp) 2638 { 2639 struct pvclock_gtod_data *gtod = &pvclock_gtod_data; 2640 unsigned long seq; 2641 int mode; 2642 u64 ns; 2643 2644 do { 2645 seq = read_seqcount_begin(>od->seq); 2646 ts->tv_sec = gtod->wall_time_sec; 2647 ns = gtod->clock.base_cycles; 2648 ns += vgettsc(>od->clock, tsc_timestamp, &mode); 2649 ns >>= gtod->clock.shift; 2650 } while (unlikely(read_seqcount_retry(>od->seq, seq))); 2651 2652 ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); 2653 ts->tv_nsec = ns; 2654 2655 return mode; 2656 } 2657 2658 /* returns true if host is using TSC based clocksource */ 2659 static bool kvm_get_time_and_clockread(s64 *kernel_ns, u64 *tsc_timestamp) 2660 { 2661 /* checked again under seqlock below */ 2662 if (!gtod_is_based_on_tsc(pvclock_gtod_data.clock.vclock_mode)) 2663 return false; 2664 2665 return gtod_is_based_on_tsc(do_monotonic_raw(kernel_ns, 2666 tsc_timestamp)); 2667 } 2668 2669 /* returns true if host is using TSC based clocksource */ 2670 static bool kvm_get_walltime_and_clockread(struct timespec64 *ts, 2671 u64 *tsc_timestamp) 2672 { 2673 /* checked again under seqlock below */ 2674 if (!gtod_is_based_on_tsc(pvclock_gtod_data.clock.vclock_mode)) 2675 return false; 2676 2677 return gtod_is_based_on_tsc(do_realtime(ts, tsc_timestamp)); 2678 } 2679 #endif 2680 2681 /* 2682 * 2683 * Assuming a stable TSC across physical CPUS, and a stable TSC 2684 * across virtual CPUs, the following condition is possible. 2685 * Each numbered line represents an event visible to both 2686 * CPUs at the next numbered event. 2687 * 2688 * "timespecX" represents host monotonic time. "tscX" represents 2689 * RDTSC value. 2690 * 2691 * VCPU0 on CPU0 | VCPU1 on CPU1 2692 * 2693 * 1. read timespec0,tsc0 2694 * 2. | timespec1 = timespec0 + N 2695 * | tsc1 = tsc0 + M 2696 * 3. transition to guest | transition to guest 2697 * 4. ret0 = timespec0 + (rdtsc - tsc0) | 2698 * 5. | ret1 = timespec1 + (rdtsc - tsc1) 2699 * | ret1 = timespec0 + N + (rdtsc - (tsc0 + M)) 2700 * 2701 * Since ret0 update is visible to VCPU1 at time 5, to obey monotonicity: 2702 * 2703 * - ret0 < ret1 2704 * - timespec0 + (rdtsc - tsc0) < timespec0 + N + (rdtsc - (tsc0 + M)) 2705 * ... 2706 * - 0 < N - M => M < N 2707 * 2708 * That is, when timespec0 != timespec1, M < N. Unfortunately that is not 2709 * always the case (the difference between two distinct xtime instances 2710 * might be smaller then the difference between corresponding TSC reads, 2711 * when updating guest vcpus pvclock areas). 2712 * 2713 * To avoid that problem, do not allow visibility of distinct 2714 * system_timestamp/tsc_timestamp values simultaneously: use a master 2715 * copy of host monotonic time values. Update that master copy 2716 * in lockstep. 2717 * 2718 * Rely on synchronization of host TSCs and guest TSCs for monotonicity. 2719 * 2720 */ 2721 2722 static void pvclock_update_vm_gtod_copy(struct kvm *kvm) 2723 { 2724 #ifdef CONFIG_X86_64 2725 struct kvm_arch *ka = &kvm->arch; 2726 int vclock_mode; 2727 bool host_tsc_clocksource, vcpus_matched; 2728 2729 vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == 2730 atomic_read(&kvm->online_vcpus)); 2731 2732 /* 2733 * If the host uses TSC clock, then passthrough TSC as stable 2734 * to the guest. 2735 */ 2736 host_tsc_clocksource = kvm_get_time_and_clockread( 2737 &ka->master_kernel_ns, 2738 &ka->master_cycle_now); 2739 2740 ka->use_master_clock = host_tsc_clocksource && vcpus_matched 2741 && !ka->backwards_tsc_observed 2742 && !ka->boot_vcpu_runs_old_kvmclock; 2743 2744 if (ka->use_master_clock) 2745 atomic_set(&kvm_guest_has_master_clock, 1); 2746 2747 vclock_mode = pvclock_gtod_data.clock.vclock_mode; 2748 trace_kvm_update_master_clock(ka->use_master_clock, vclock_mode, 2749 vcpus_matched); 2750 #endif 2751 } 2752 2753 void kvm_make_mclock_inprogress_request(struct kvm *kvm) 2754 { 2755 kvm_make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS); 2756 } 2757 2758 static void kvm_gen_update_masterclock(struct kvm *kvm) 2759 { 2760 #ifdef CONFIG_X86_64 2761 int i; 2762 struct kvm_vcpu *vcpu; 2763 struct kvm_arch *ka = &kvm->arch; 2764 unsigned long flags; 2765 2766 kvm_hv_invalidate_tsc_page(kvm); 2767 2768 kvm_make_mclock_inprogress_request(kvm); 2769 2770 /* no guest entries from this point */ 2771 spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags); 2772 pvclock_update_vm_gtod_copy(kvm); 2773 spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags); 2774 2775 kvm_for_each_vcpu(i, vcpu, kvm) 2776 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 2777 2778 /* guest entries allowed */ 2779 kvm_for_each_vcpu(i, vcpu, kvm) 2780 kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu); 2781 #endif 2782 } 2783 2784 u64 get_kvmclock_ns(struct kvm *kvm) 2785 { 2786 struct kvm_arch *ka = &kvm->arch; 2787 struct pvclock_vcpu_time_info hv_clock; 2788 unsigned long flags; 2789 u64 ret; 2790 2791 spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags); 2792 if (!ka->use_master_clock) { 2793 spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags); 2794 return get_kvmclock_base_ns() + ka->kvmclock_offset; 2795 } 2796 2797 hv_clock.tsc_timestamp = ka->master_cycle_now; 2798 hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset; 2799 spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags); 2800 2801 /* both __this_cpu_read() and rdtsc() should be on the same cpu */ 2802 get_cpu(); 2803 2804 if (__this_cpu_read(cpu_tsc_khz)) { 2805 kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL, 2806 &hv_clock.tsc_shift, 2807 &hv_clock.tsc_to_system_mul); 2808 ret = __pvclock_read_cycles(&hv_clock, rdtsc()); 2809 } else 2810 ret = get_kvmclock_base_ns() + ka->kvmclock_offset; 2811 2812 put_cpu(); 2813 2814 return ret; 2815 } 2816 2817 static void kvm_setup_pvclock_page(struct kvm_vcpu *v, 2818 struct gfn_to_hva_cache *cache, 2819 unsigned int offset) 2820 { 2821 struct kvm_vcpu_arch *vcpu = &v->arch; 2822 struct pvclock_vcpu_time_info guest_hv_clock; 2823 2824 if (unlikely(kvm_read_guest_offset_cached(v->kvm, cache, 2825 &guest_hv_clock, offset, sizeof(guest_hv_clock)))) 2826 return; 2827 2828 /* This VCPU is paused, but it's legal for a guest to read another 2829 * VCPU's kvmclock, so we really have to follow the specification where 2830 * it says that version is odd if data is being modified, and even after 2831 * it is consistent. 2832 * 2833 * Version field updates must be kept separate. This is because 2834 * kvm_write_guest_cached might use a "rep movs" instruction, and 2835 * writes within a string instruction are weakly ordered. So there 2836 * are three writes overall. 2837 * 2838 * As a small optimization, only write the version field in the first 2839 * and third write. The vcpu->pv_time cache is still valid, because the 2840 * version field is the first in the struct. 2841 */ 2842 BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0); 2843 2844 if (guest_hv_clock.version & 1) 2845 ++guest_hv_clock.version; /* first time write, random junk */ 2846 2847 vcpu->hv_clock.version = guest_hv_clock.version + 1; 2848 kvm_write_guest_offset_cached(v->kvm, cache, 2849 &vcpu->hv_clock, offset, 2850 sizeof(vcpu->hv_clock.version)); 2851 2852 smp_wmb(); 2853 2854 /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */ 2855 vcpu->hv_clock.flags |= (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED); 2856 2857 if (vcpu->pvclock_set_guest_stopped_request) { 2858 vcpu->hv_clock.flags |= PVCLOCK_GUEST_STOPPED; 2859 vcpu->pvclock_set_guest_stopped_request = false; 2860 } 2861 2862 trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock); 2863 2864 kvm_write_guest_offset_cached(v->kvm, cache, 2865 &vcpu->hv_clock, offset, 2866 sizeof(vcpu->hv_clock)); 2867 2868 smp_wmb(); 2869 2870 vcpu->hv_clock.version++; 2871 kvm_write_guest_offset_cached(v->kvm, cache, 2872 &vcpu->hv_clock, offset, 2873 sizeof(vcpu->hv_clock.version)); 2874 } 2875 2876 static int kvm_guest_time_update(struct kvm_vcpu *v) 2877 { 2878 unsigned long flags, tgt_tsc_khz; 2879 struct kvm_vcpu_arch *vcpu = &v->arch; 2880 struct kvm_arch *ka = &v->kvm->arch; 2881 s64 kernel_ns; 2882 u64 tsc_timestamp, host_tsc; 2883 u8 pvclock_flags; 2884 bool use_master_clock; 2885 2886 kernel_ns = 0; 2887 host_tsc = 0; 2888 2889 /* 2890 * If the host uses TSC clock, then passthrough TSC as stable 2891 * to the guest. 2892 */ 2893 spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags); 2894 use_master_clock = ka->use_master_clock; 2895 if (use_master_clock) { 2896 host_tsc = ka->master_cycle_now; 2897 kernel_ns = ka->master_kernel_ns; 2898 } 2899 spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags); 2900 2901 /* Keep irq disabled to prevent changes to the clock */ 2902 local_irq_save(flags); 2903 tgt_tsc_khz = __this_cpu_read(cpu_tsc_khz); 2904 if (unlikely(tgt_tsc_khz == 0)) { 2905 local_irq_restore(flags); 2906 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); 2907 return 1; 2908 } 2909 if (!use_master_clock) { 2910 host_tsc = rdtsc(); 2911 kernel_ns = get_kvmclock_base_ns(); 2912 } 2913 2914 tsc_timestamp = kvm_read_l1_tsc(v, host_tsc); 2915 2916 /* 2917 * We may have to catch up the TSC to match elapsed wall clock 2918 * time for two reasons, even if kvmclock is used. 2919 * 1) CPU could have been running below the maximum TSC rate 2920 * 2) Broken TSC compensation resets the base at each VCPU 2921 * entry to avoid unknown leaps of TSC even when running 2922 * again on the same CPU. This may cause apparent elapsed 2923 * time to disappear, and the guest to stand still or run 2924 * very slowly. 2925 */ 2926 if (vcpu->tsc_catchup) { 2927 u64 tsc = compute_guest_tsc(v, kernel_ns); 2928 if (tsc > tsc_timestamp) { 2929 adjust_tsc_offset_guest(v, tsc - tsc_timestamp); 2930 tsc_timestamp = tsc; 2931 } 2932 } 2933 2934 local_irq_restore(flags); 2935 2936 /* With all the info we got, fill in the values */ 2937 2938 if (kvm_has_tsc_control) 2939 tgt_tsc_khz = kvm_scale_tsc(v, tgt_tsc_khz, 2940 v->arch.l1_tsc_scaling_ratio); 2941 2942 if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) { 2943 kvm_get_time_scale(NSEC_PER_SEC, tgt_tsc_khz * 1000LL, 2944 &vcpu->hv_clock.tsc_shift, 2945 &vcpu->hv_clock.tsc_to_system_mul); 2946 vcpu->hw_tsc_khz = tgt_tsc_khz; 2947 } 2948 2949 vcpu->hv_clock.tsc_timestamp = tsc_timestamp; 2950 vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset; 2951 vcpu->last_guest_tsc = tsc_timestamp; 2952 2953 /* If the host uses TSC clocksource, then it is stable */ 2954 pvclock_flags = 0; 2955 if (use_master_clock) 2956 pvclock_flags |= PVCLOCK_TSC_STABLE_BIT; 2957 2958 vcpu->hv_clock.flags = pvclock_flags; 2959 2960 if (vcpu->pv_time_enabled) 2961 kvm_setup_pvclock_page(v, &vcpu->pv_time, 0); 2962 if (vcpu->xen.vcpu_info_set) 2963 kvm_setup_pvclock_page(v, &vcpu->xen.vcpu_info_cache, 2964 offsetof(struct compat_vcpu_info, time)); 2965 if (vcpu->xen.vcpu_time_info_set) 2966 kvm_setup_pvclock_page(v, &vcpu->xen.vcpu_time_info_cache, 0); 2967 if (v == kvm_get_vcpu(v->kvm, 0)) 2968 kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock); 2969 return 0; 2970 } 2971 2972 /* 2973 * kvmclock updates which are isolated to a given vcpu, such as 2974 * vcpu->cpu migration, should not allow system_timestamp from 2975 * the rest of the vcpus to remain static. Otherwise ntp frequency 2976 * correction applies to one vcpu's system_timestamp but not 2977 * the others. 2978 * 2979 * So in those cases, request a kvmclock update for all vcpus. 2980 * We need to rate-limit these requests though, as they can 2981 * considerably slow guests that have a large number of vcpus. 2982 * The time for a remote vcpu to update its kvmclock is bound 2983 * by the delay we use to rate-limit the updates. 2984 */ 2985 2986 #define KVMCLOCK_UPDATE_DELAY msecs_to_jiffies(100) 2987 2988 static void kvmclock_update_fn(struct work_struct *work) 2989 { 2990 int i; 2991 struct delayed_work *dwork = to_delayed_work(work); 2992 struct kvm_arch *ka = container_of(dwork, struct kvm_arch, 2993 kvmclock_update_work); 2994 struct kvm *kvm = container_of(ka, struct kvm, arch); 2995 struct kvm_vcpu *vcpu; 2996 2997 kvm_for_each_vcpu(i, vcpu, kvm) { 2998 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 2999 kvm_vcpu_kick(vcpu); 3000 } 3001 } 3002 3003 static void kvm_gen_kvmclock_update(struct kvm_vcpu *v) 3004 { 3005 struct kvm *kvm = v->kvm; 3006 3007 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); 3008 schedule_delayed_work(&kvm->arch.kvmclock_update_work, 3009 KVMCLOCK_UPDATE_DELAY); 3010 } 3011 3012 #define KVMCLOCK_SYNC_PERIOD (300 * HZ) 3013 3014 static void kvmclock_sync_fn(struct work_struct *work) 3015 { 3016 struct delayed_work *dwork = to_delayed_work(work); 3017 struct kvm_arch *ka = container_of(dwork, struct kvm_arch, 3018 kvmclock_sync_work); 3019 struct kvm *kvm = container_of(ka, struct kvm, arch); 3020 3021 if (!kvmclock_periodic_sync) 3022 return; 3023 3024 schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0); 3025 schedule_delayed_work(&kvm->arch.kvmclock_sync_work, 3026 KVMCLOCK_SYNC_PERIOD); 3027 } 3028 3029 /* 3030 * On AMD, HWCR[McStatusWrEn] controls whether setting MCi_STATUS results in #GP. 3031 */ 3032 static bool can_set_mci_status(struct kvm_vcpu *vcpu) 3033 { 3034 /* McStatusWrEn enabled? */ 3035 if (guest_cpuid_is_amd_or_hygon(vcpu)) 3036 return !!(vcpu->arch.msr_hwcr & BIT_ULL(18)); 3037 3038 return false; 3039 } 3040 3041 static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 3042 { 3043 u64 mcg_cap = vcpu->arch.mcg_cap; 3044 unsigned bank_num = mcg_cap & 0xff; 3045 u32 msr = msr_info->index; 3046 u64 data = msr_info->data; 3047 3048 switch (msr) { 3049 case MSR_IA32_MCG_STATUS: 3050 vcpu->arch.mcg_status = data; 3051 break; 3052 case MSR_IA32_MCG_CTL: 3053 if (!(mcg_cap & MCG_CTL_P) && 3054 (data || !msr_info->host_initiated)) 3055 return 1; 3056 if (data != 0 && data != ~(u64)0) 3057 return 1; 3058 vcpu->arch.mcg_ctl = data; 3059 break; 3060 default: 3061 if (msr >= MSR_IA32_MC0_CTL && 3062 msr < MSR_IA32_MCx_CTL(bank_num)) { 3063 u32 offset = array_index_nospec( 3064 msr - MSR_IA32_MC0_CTL, 3065 MSR_IA32_MCx_CTL(bank_num) - MSR_IA32_MC0_CTL); 3066 3067 /* only 0 or all 1s can be written to IA32_MCi_CTL 3068 * some Linux kernels though clear bit 10 in bank 4 to 3069 * workaround a BIOS/GART TBL issue on AMD K8s, ignore 3070 * this to avoid an uncatched #GP in the guest 3071 */ 3072 if ((offset & 0x3) == 0 && 3073 data != 0 && (data | (1 << 10)) != ~(u64)0) 3074 return -1; 3075 3076 /* MCi_STATUS */ 3077 if (!msr_info->host_initiated && 3078 (offset & 0x3) == 1 && data != 0) { 3079 if (!can_set_mci_status(vcpu)) 3080 return -1; 3081 } 3082 3083 vcpu->arch.mce_banks[offset] = data; 3084 break; 3085 } 3086 return 1; 3087 } 3088 return 0; 3089 } 3090 3091 static inline bool kvm_pv_async_pf_enabled(struct kvm_vcpu *vcpu) 3092 { 3093 u64 mask = KVM_ASYNC_PF_ENABLED | KVM_ASYNC_PF_DELIVERY_AS_INT; 3094 3095 return (vcpu->arch.apf.msr_en_val & mask) == mask; 3096 } 3097 3098 static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) 3099 { 3100 gpa_t gpa = data & ~0x3f; 3101 3102 /* Bits 4:5 are reserved, Should be zero */ 3103 if (data & 0x30) 3104 return 1; 3105 3106 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_VMEXIT) && 3107 (data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT)) 3108 return 1; 3109 3110 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT) && 3111 (data & KVM_ASYNC_PF_DELIVERY_AS_INT)) 3112 return 1; 3113 3114 if (!lapic_in_kernel(vcpu)) 3115 return data ? 1 : 0; 3116 3117 vcpu->arch.apf.msr_en_val = data; 3118 3119 if (!kvm_pv_async_pf_enabled(vcpu)) { 3120 kvm_clear_async_pf_completion_queue(vcpu); 3121 kvm_async_pf_hash_reset(vcpu); 3122 return 0; 3123 } 3124 3125 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, 3126 sizeof(u64))) 3127 return 1; 3128 3129 vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); 3130 vcpu->arch.apf.delivery_as_pf_vmexit = data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT; 3131 3132 kvm_async_pf_wakeup_all(vcpu); 3133 3134 return 0; 3135 } 3136 3137 static int kvm_pv_enable_async_pf_int(struct kvm_vcpu *vcpu, u64 data) 3138 { 3139 /* Bits 8-63 are reserved */ 3140 if (data >> 8) 3141 return 1; 3142 3143 if (!lapic_in_kernel(vcpu)) 3144 return 1; 3145 3146 vcpu->arch.apf.msr_int_val = data; 3147 3148 vcpu->arch.apf.vec = data & KVM_ASYNC_PF_VEC_MASK; 3149 3150 return 0; 3151 } 3152 3153 static void kvmclock_reset(struct kvm_vcpu *vcpu) 3154 { 3155 vcpu->arch.pv_time_enabled = false; 3156 vcpu->arch.time = 0; 3157 } 3158 3159 static void kvm_vcpu_flush_tlb_all(struct kvm_vcpu *vcpu) 3160 { 3161 ++vcpu->stat.tlb_flush; 3162 static_call(kvm_x86_tlb_flush_all)(vcpu); 3163 } 3164 3165 static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu) 3166 { 3167 ++vcpu->stat.tlb_flush; 3168 3169 if (!tdp_enabled) { 3170 /* 3171 * A TLB flush on behalf of the guest is equivalent to 3172 * INVPCID(all), toggling CR4.PGE, etc., which requires 3173 * a forced sync of the shadow page tables. Unload the 3174 * entire MMU here and the subsequent load will sync the 3175 * shadow page tables, and also flush the TLB. 3176 */ 3177 kvm_mmu_unload(vcpu); 3178 return; 3179 } 3180 3181 static_call(kvm_x86_tlb_flush_guest)(vcpu); 3182 } 3183 3184 static void record_steal_time(struct kvm_vcpu *vcpu) 3185 { 3186 struct kvm_host_map map; 3187 struct kvm_steal_time *st; 3188 3189 if (kvm_xen_msr_enabled(vcpu->kvm)) { 3190 kvm_xen_runstate_set_running(vcpu); 3191 return; 3192 } 3193 3194 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) 3195 return; 3196 3197 /* -EAGAIN is returned in atomic context so we can just return. */ 3198 if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, 3199 &map, &vcpu->arch.st.cache, false)) 3200 return; 3201 3202 st = map.hva + 3203 offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS); 3204 3205 /* 3206 * Doing a TLB flush here, on the guest's behalf, can avoid 3207 * expensive IPIs. 3208 */ 3209 if (guest_pv_has(vcpu, KVM_FEATURE_PV_TLB_FLUSH)) { 3210 u8 st_preempted = xchg(&st->preempted, 0); 3211 3212 trace_kvm_pv_tlb_flush(vcpu->vcpu_id, 3213 st_preempted & KVM_VCPU_FLUSH_TLB); 3214 if (st_preempted & KVM_VCPU_FLUSH_TLB) 3215 kvm_vcpu_flush_tlb_guest(vcpu); 3216 } else { 3217 st->preempted = 0; 3218 } 3219 3220 vcpu->arch.st.preempted = 0; 3221 3222 if (st->version & 1) 3223 st->version += 1; /* first time write, random junk */ 3224 3225 st->version += 1; 3226 3227 smp_wmb(); 3228 3229 st->steal += current->sched_info.run_delay - 3230 vcpu->arch.st.last_steal; 3231 vcpu->arch.st.last_steal = current->sched_info.run_delay; 3232 3233 smp_wmb(); 3234 3235 st->version += 1; 3236 3237 kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, false); 3238 } 3239 3240 int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 3241 { 3242 bool pr = false; 3243 u32 msr = msr_info->index; 3244 u64 data = msr_info->data; 3245 3246 if (msr && msr == vcpu->kvm->arch.xen_hvm_config.msr) 3247 return kvm_xen_write_hypercall_page(vcpu, data); 3248 3249 switch (msr) { 3250 case MSR_AMD64_NB_CFG: 3251 case MSR_IA32_UCODE_WRITE: 3252 case MSR_VM_HSAVE_PA: 3253 case MSR_AMD64_PATCH_LOADER: 3254 case MSR_AMD64_BU_CFG2: 3255 case MSR_AMD64_DC_CFG: 3256 case MSR_F15H_EX_CFG: 3257 break; 3258 3259 case MSR_IA32_UCODE_REV: 3260 if (msr_info->host_initiated) 3261 vcpu->arch.microcode_version = data; 3262 break; 3263 case MSR_IA32_ARCH_CAPABILITIES: 3264 if (!msr_info->host_initiated) 3265 return 1; 3266 vcpu->arch.arch_capabilities = data; 3267 break; 3268 case MSR_IA32_PERF_CAPABILITIES: { 3269 struct kvm_msr_entry msr_ent = {.index = msr, .data = 0}; 3270 3271 if (!msr_info->host_initiated) 3272 return 1; 3273 if (guest_cpuid_has(vcpu, X86_FEATURE_PDCM) && kvm_get_msr_feature(&msr_ent)) 3274 return 1; 3275 if (data & ~msr_ent.data) 3276 return 1; 3277 3278 vcpu->arch.perf_capabilities = data; 3279 3280 return 0; 3281 } 3282 case MSR_EFER: 3283 return set_efer(vcpu, msr_info); 3284 case MSR_K7_HWCR: 3285 data &= ~(u64)0x40; /* ignore flush filter disable */ 3286 data &= ~(u64)0x100; /* ignore ignne emulation enable */ 3287 data &= ~(u64)0x8; /* ignore TLB cache disable */ 3288 3289 /* Handle McStatusWrEn */ 3290 if (data == BIT_ULL(18)) { 3291 vcpu->arch.msr_hwcr = data; 3292 } else if (data != 0) { 3293 vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n", 3294 data); 3295 return 1; 3296 } 3297 break; 3298 case MSR_FAM10H_MMIO_CONF_BASE: 3299 if (data != 0) { 3300 vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: " 3301 "0x%llx\n", data); 3302 return 1; 3303 } 3304 break; 3305 case 0x200 ... 0x2ff: 3306 return kvm_mtrr_set_msr(vcpu, msr, data); 3307 case MSR_IA32_APICBASE: 3308 return kvm_set_apic_base(vcpu, msr_info); 3309 case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff: 3310 return kvm_x2apic_msr_write(vcpu, msr, data); 3311 case MSR_IA32_TSC_DEADLINE: 3312 kvm_set_lapic_tscdeadline_msr(vcpu, data); 3313 break; 3314 case MSR_IA32_TSC_ADJUST: 3315 if (guest_cpuid_has(vcpu, X86_FEATURE_TSC_ADJUST)) { 3316 if (!msr_info->host_initiated) { 3317 s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr; 3318 adjust_tsc_offset_guest(vcpu, adj); 3319 } 3320 vcpu->arch.ia32_tsc_adjust_msr = data; 3321 } 3322 break; 3323 case MSR_IA32_MISC_ENABLE: 3324 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT) && 3325 ((vcpu->arch.ia32_misc_enable_msr ^ data) & MSR_IA32_MISC_ENABLE_MWAIT)) { 3326 if (!guest_cpuid_has(vcpu, X86_FEATURE_XMM3)) 3327 return 1; 3328 vcpu->arch.ia32_misc_enable_msr = data; 3329 kvm_update_cpuid_runtime(vcpu); 3330 } else { 3331 vcpu->arch.ia32_misc_enable_msr = data; 3332 } 3333 break; 3334 case MSR_IA32_SMBASE: 3335 if (!msr_info->host_initiated) 3336 return 1; 3337 vcpu->arch.smbase = data; 3338 break; 3339 case MSR_IA32_POWER_CTL: 3340 vcpu->arch.msr_ia32_power_ctl = data; 3341 break; 3342 case MSR_IA32_TSC: 3343 if (msr_info->host_initiated) { 3344 kvm_synchronize_tsc(vcpu, data); 3345 } else { 3346 u64 adj = kvm_compute_l1_tsc_offset(vcpu, data) - vcpu->arch.l1_tsc_offset; 3347 adjust_tsc_offset_guest(vcpu, adj); 3348 vcpu->arch.ia32_tsc_adjust_msr += adj; 3349 } 3350 break; 3351 case MSR_IA32_XSS: 3352 if (!msr_info->host_initiated && 3353 !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)) 3354 return 1; 3355 /* 3356 * KVM supports exposing PT to the guest, but does not support 3357 * IA32_XSS[bit 8]. Guests have to use RDMSR/WRMSR rather than 3358 * XSAVES/XRSTORS to save/restore PT MSRs. 3359 */ 3360 if (data & ~supported_xss) 3361 return 1; 3362 vcpu->arch.ia32_xss = data; 3363 break; 3364 case MSR_SMI_COUNT: 3365 if (!msr_info->host_initiated) 3366 return 1; 3367 vcpu->arch.smi_count = data; 3368 break; 3369 case MSR_KVM_WALL_CLOCK_NEW: 3370 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) 3371 return 1; 3372 3373 vcpu->kvm->arch.wall_clock = data; 3374 kvm_write_wall_clock(vcpu->kvm, data, 0); 3375 break; 3376 case MSR_KVM_WALL_CLOCK: 3377 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) 3378 return 1; 3379 3380 vcpu->kvm->arch.wall_clock = data; 3381 kvm_write_wall_clock(vcpu->kvm, data, 0); 3382 break; 3383 case MSR_KVM_SYSTEM_TIME_NEW: 3384 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) 3385 return 1; 3386 3387 kvm_write_system_time(vcpu, data, false, msr_info->host_initiated); 3388 break; 3389 case MSR_KVM_SYSTEM_TIME: 3390 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) 3391 return 1; 3392 3393 kvm_write_system_time(vcpu, data, true, msr_info->host_initiated); 3394 break; 3395 case MSR_KVM_ASYNC_PF_EN: 3396 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF)) 3397 return 1; 3398 3399 if (kvm_pv_enable_async_pf(vcpu, data)) 3400 return 1; 3401 break; 3402 case MSR_KVM_ASYNC_PF_INT: 3403 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) 3404 return 1; 3405 3406 if (kvm_pv_enable_async_pf_int(vcpu, data)) 3407 return 1; 3408 break; 3409 case MSR_KVM_ASYNC_PF_ACK: 3410 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) 3411 return 1; 3412 if (data & 0x1) { 3413 vcpu->arch.apf.pageready_pending = false; 3414 kvm_check_async_pf_completion(vcpu); 3415 } 3416 break; 3417 case MSR_KVM_STEAL_TIME: 3418 if (!guest_pv_has(vcpu, KVM_FEATURE_STEAL_TIME)) 3419 return 1; 3420 3421 if (unlikely(!sched_info_on())) 3422 return 1; 3423 3424 if (data & KVM_STEAL_RESERVED_MASK) 3425 return 1; 3426 3427 vcpu->arch.st.msr_val = data; 3428 3429 if (!(data & KVM_MSR_ENABLED)) 3430 break; 3431 3432 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); 3433 3434 break; 3435 case MSR_KVM_PV_EOI_EN: 3436 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_EOI)) 3437 return 1; 3438 3439 if (kvm_lapic_enable_pv_eoi(vcpu, data, sizeof(u8))) 3440 return 1; 3441 break; 3442 3443 case MSR_KVM_POLL_CONTROL: 3444 if (!guest_pv_has(vcpu, KVM_FEATURE_POLL_CONTROL)) 3445 return 1; 3446 3447 /* only enable bit supported */ 3448 if (data & (-1ULL << 1)) 3449 return 1; 3450 3451 vcpu->arch.msr_kvm_poll_control = data; 3452 break; 3453 3454 case MSR_IA32_MCG_CTL: 3455 case MSR_IA32_MCG_STATUS: 3456 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: 3457 return set_msr_mce(vcpu, msr_info); 3458 3459 case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3: 3460 case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1: 3461 pr = true; 3462 fallthrough; 3463 case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3: 3464 case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1: 3465 if (kvm_pmu_is_valid_msr(vcpu, msr)) 3466 return kvm_pmu_set_msr(vcpu, msr_info); 3467 3468 if (pr || data != 0) 3469 vcpu_unimpl(vcpu, "disabled perfctr wrmsr: " 3470 "0x%x data 0x%llx\n", msr, data); 3471 break; 3472 case MSR_K7_CLK_CTL: 3473 /* 3474 * Ignore all writes to this no longer documented MSR. 3475 * Writes are only relevant for old K7 processors, 3476 * all pre-dating SVM, but a recommended workaround from 3477 * AMD for these chips. It is possible to specify the 3478 * affected processor models on the command line, hence 3479 * the need to ignore the workaround. 3480 */ 3481 break; 3482 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: 3483 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER: 3484 case HV_X64_MSR_SYNDBG_OPTIONS: 3485 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: 3486 case HV_X64_MSR_CRASH_CTL: 3487 case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT: 3488 case HV_X64_MSR_REENLIGHTENMENT_CONTROL: 3489 case HV_X64_MSR_TSC_EMULATION_CONTROL: 3490 case HV_X64_MSR_TSC_EMULATION_STATUS: 3491 return kvm_hv_set_msr_common(vcpu, msr, data, 3492 msr_info->host_initiated); 3493 case MSR_IA32_BBL_CR_CTL3: 3494 /* Drop writes to this legacy MSR -- see rdmsr 3495 * counterpart for further detail. 3496 */ 3497 if (report_ignored_msrs) 3498 vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n", 3499 msr, data); 3500 break; 3501 case MSR_AMD64_OSVW_ID_LENGTH: 3502 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) 3503 return 1; 3504 vcpu->arch.osvw.length = data; 3505 break; 3506 case MSR_AMD64_OSVW_STATUS: 3507 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) 3508 return 1; 3509 vcpu->arch.osvw.status = data; 3510 break; 3511 case MSR_PLATFORM_INFO: 3512 if (!msr_info->host_initiated || 3513 (!(data & MSR_PLATFORM_INFO_CPUID_FAULT) && 3514 cpuid_fault_enabled(vcpu))) 3515 return 1; 3516 vcpu->arch.msr_platform_info = data; 3517 break; 3518 case MSR_MISC_FEATURES_ENABLES: 3519 if (data & ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT || 3520 (data & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT && 3521 !supports_cpuid_fault(vcpu))) 3522 return 1; 3523 vcpu->arch.msr_misc_features_enables = data; 3524 break; 3525 default: 3526 if (kvm_pmu_is_valid_msr(vcpu, msr)) 3527 return kvm_pmu_set_msr(vcpu, msr_info); 3528 return KVM_MSR_RET_INVALID; 3529 } 3530 return 0; 3531 } 3532 EXPORT_SYMBOL_GPL(kvm_set_msr_common); 3533 3534 static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) 3535 { 3536 u64 data; 3537 u64 mcg_cap = vcpu->arch.mcg_cap; 3538 unsigned bank_num = mcg_cap & 0xff; 3539 3540 switch (msr) { 3541 case MSR_IA32_P5_MC_ADDR: 3542 case MSR_IA32_P5_MC_TYPE: 3543 data = 0; 3544 break; 3545 case MSR_IA32_MCG_CAP: 3546 data = vcpu->arch.mcg_cap; 3547 break; 3548 case MSR_IA32_MCG_CTL: 3549 if (!(mcg_cap & MCG_CTL_P) && !host) 3550 return 1; 3551 data = vcpu->arch.mcg_ctl; 3552 break; 3553 case MSR_IA32_MCG_STATUS: 3554 data = vcpu->arch.mcg_status; 3555 break; 3556 default: 3557 if (msr >= MSR_IA32_MC0_CTL && 3558 msr < MSR_IA32_MCx_CTL(bank_num)) { 3559 u32 offset = array_index_nospec( 3560 msr - MSR_IA32_MC0_CTL, 3561 MSR_IA32_MCx_CTL(bank_num) - MSR_IA32_MC0_CTL); 3562 3563 data = vcpu->arch.mce_banks[offset]; 3564 break; 3565 } 3566 return 1; 3567 } 3568 *pdata = data; 3569 return 0; 3570 } 3571 3572 int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 3573 { 3574 switch (msr_info->index) { 3575 case MSR_IA32_PLATFORM_ID: 3576 case MSR_IA32_EBL_CR_POWERON: 3577 case MSR_IA32_LASTBRANCHFROMIP: 3578 case MSR_IA32_LASTBRANCHTOIP: 3579 case MSR_IA32_LASTINTFROMIP: 3580 case MSR_IA32_LASTINTTOIP: 3581 case MSR_AMD64_SYSCFG: 3582 case MSR_K8_TSEG_ADDR: 3583 case MSR_K8_TSEG_MASK: 3584 case MSR_VM_HSAVE_PA: 3585 case MSR_K8_INT_PENDING_MSG: 3586 case MSR_AMD64_NB_CFG: 3587 case MSR_FAM10H_MMIO_CONF_BASE: 3588 case MSR_AMD64_BU_CFG2: 3589 case MSR_IA32_PERF_CTL: 3590 case MSR_AMD64_DC_CFG: 3591 case MSR_F15H_EX_CFG: 3592 /* 3593 * Intel Sandy Bridge CPUs must support the RAPL (running average power 3594 * limit) MSRs. Just return 0, as we do not want to expose the host 3595 * data here. Do not conditionalize this on CPUID, as KVM does not do 3596 * so for existing CPU-specific MSRs. 3597 */ 3598 case MSR_RAPL_POWER_UNIT: 3599 case MSR_PP0_ENERGY_STATUS: /* Power plane 0 (core) */ 3600 case MSR_PP1_ENERGY_STATUS: /* Power plane 1 (graphics uncore) */ 3601 case MSR_PKG_ENERGY_STATUS: /* Total package */ 3602 case MSR_DRAM_ENERGY_STATUS: /* DRAM controller */ 3603 msr_info->data = 0; 3604 break; 3605 case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5: 3606 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) 3607 return kvm_pmu_get_msr(vcpu, msr_info); 3608 if (!msr_info->host_initiated) 3609 return 1; 3610 msr_info->data = 0; 3611 break; 3612 case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3: 3613 case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3: 3614 case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1: 3615 case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1: 3616 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) 3617 return kvm_pmu_get_msr(vcpu, msr_info); 3618 msr_info->data = 0; 3619 break; 3620 case MSR_IA32_UCODE_REV: 3621 msr_info->data = vcpu->arch.microcode_version; 3622 break; 3623 case MSR_IA32_ARCH_CAPABILITIES: 3624 if (!msr_info->host_initiated && 3625 !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES)) 3626 return 1; 3627 msr_info->data = vcpu->arch.arch_capabilities; 3628 break; 3629 case MSR_IA32_PERF_CAPABILITIES: 3630 if (!msr_info->host_initiated && 3631 !guest_cpuid_has(vcpu, X86_FEATURE_PDCM)) 3632 return 1; 3633 msr_info->data = vcpu->arch.perf_capabilities; 3634 break; 3635 case MSR_IA32_POWER_CTL: 3636 msr_info->data = vcpu->arch.msr_ia32_power_ctl; 3637 break; 3638 case MSR_IA32_TSC: { 3639 /* 3640 * Intel SDM states that MSR_IA32_TSC read adds the TSC offset 3641 * even when not intercepted. AMD manual doesn't explicitly 3642 * state this but appears to behave the same. 3643 * 3644 * On userspace reads and writes, however, we unconditionally 3645 * return L1's TSC value to ensure backwards-compatible 3646 * behavior for migration. 3647 */ 3648 u64 offset, ratio; 3649 3650 if (msr_info->host_initiated) { 3651 offset = vcpu->arch.l1_tsc_offset; 3652 ratio = vcpu->arch.l1_tsc_scaling_ratio; 3653 } else { 3654 offset = vcpu->arch.tsc_offset; 3655 ratio = vcpu->arch.tsc_scaling_ratio; 3656 } 3657 3658 msr_info->data = kvm_scale_tsc(vcpu, rdtsc(), ratio) + offset; 3659 break; 3660 } 3661 case MSR_MTRRcap: 3662 case 0x200 ... 0x2ff: 3663 return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data); 3664 case 0xcd: /* fsb frequency */ 3665 msr_info->data = 3; 3666 break; 3667 /* 3668 * MSR_EBC_FREQUENCY_ID 3669 * Conservative value valid for even the basic CPU models. 3670 * Models 0,1: 000 in bits 23:21 indicating a bus speed of 3671 * 100MHz, model 2 000 in bits 18:16 indicating 100MHz, 3672 * and 266MHz for model 3, or 4. Set Core Clock 3673 * Frequency to System Bus Frequency Ratio to 1 (bits 3674 * 31:24) even though these are only valid for CPU 3675 * models > 2, however guests may end up dividing or 3676 * multiplying by zero otherwise. 3677 */ 3678 case MSR_EBC_FREQUENCY_ID: 3679 msr_info->data = 1 << 24; 3680 break; 3681 case MSR_IA32_APICBASE: 3682 msr_info->data = kvm_get_apic_base(vcpu); 3683 break; 3684 case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff: 3685 return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data); 3686 case MSR_IA32_TSC_DEADLINE: 3687 msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu); 3688 break; 3689 case MSR_IA32_TSC_ADJUST: 3690 msr_info->data = (u64)vcpu->arch.ia32_tsc_adjust_msr; 3691 break; 3692 case MSR_IA32_MISC_ENABLE: 3693 msr_info->data = vcpu->arch.ia32_misc_enable_msr; 3694 break; 3695 case MSR_IA32_SMBASE: 3696 if (!msr_info->host_initiated) 3697 return 1; 3698 msr_info->data = vcpu->arch.smbase; 3699 break; 3700 case MSR_SMI_COUNT: 3701 msr_info->data = vcpu->arch.smi_count; 3702 break; 3703 case MSR_IA32_PERF_STATUS: 3704 /* TSC increment by tick */ 3705 msr_info->data = 1000ULL; 3706 /* CPU multiplier */ 3707 msr_info->data |= (((uint64_t)4ULL) << 40); 3708 break; 3709 case MSR_EFER: 3710 msr_info->data = vcpu->arch.efer; 3711 break; 3712 case MSR_KVM_WALL_CLOCK: 3713 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) 3714 return 1; 3715 3716 msr_info->data = vcpu->kvm->arch.wall_clock; 3717 break; 3718 case MSR_KVM_WALL_CLOCK_NEW: 3719 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) 3720 return 1; 3721 3722 msr_info->data = vcpu->kvm->arch.wall_clock; 3723 break; 3724 case MSR_KVM_SYSTEM_TIME: 3725 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) 3726 return 1; 3727 3728 msr_info->data = vcpu->arch.time; 3729 break; 3730 case MSR_KVM_SYSTEM_TIME_NEW: 3731 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) 3732 return 1; 3733 3734 msr_info->data = vcpu->arch.time; 3735 break; 3736 case MSR_KVM_ASYNC_PF_EN: 3737 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF)) 3738 return 1; 3739 3740 msr_info->data = vcpu->arch.apf.msr_en_val; 3741 break; 3742 case MSR_KVM_ASYNC_PF_INT: 3743 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) 3744 return 1; 3745 3746 msr_info->data = vcpu->arch.apf.msr_int_val; 3747 break; 3748 case MSR_KVM_ASYNC_PF_ACK: 3749 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) 3750 return 1; 3751 3752 msr_info->data = 0; 3753 break; 3754 case MSR_KVM_STEAL_TIME: 3755 if (!guest_pv_has(vcpu, KVM_FEATURE_STEAL_TIME)) 3756 return 1; 3757 3758 msr_info->data = vcpu->arch.st.msr_val; 3759 break; 3760 case MSR_KVM_PV_EOI_EN: 3761 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_EOI)) 3762 return 1; 3763 3764 msr_info->data = vcpu->arch.pv_eoi.msr_val; 3765 break; 3766 case MSR_KVM_POLL_CONTROL: 3767 if (!guest_pv_has(vcpu, KVM_FEATURE_POLL_CONTROL)) 3768 return 1; 3769 3770 msr_info->data = vcpu->arch.msr_kvm_poll_control; 3771 break; 3772 case MSR_IA32_P5_MC_ADDR: 3773 case MSR_IA32_P5_MC_TYPE: 3774 case MSR_IA32_MCG_CAP: 3775 case MSR_IA32_MCG_CTL: 3776 case MSR_IA32_MCG_STATUS: 3777 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: 3778 return get_msr_mce(vcpu, msr_info->index, &msr_info->data, 3779 msr_info->host_initiated); 3780 case MSR_IA32_XSS: 3781 if (!msr_info->host_initiated && 3782 !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)) 3783 return 1; 3784 msr_info->data = vcpu->arch.ia32_xss; 3785 break; 3786 case MSR_K7_CLK_CTL: 3787 /* 3788 * Provide expected ramp-up count for K7. All other 3789 * are set to zero, indicating minimum divisors for 3790 * every field. 3791 * 3792 * This prevents guest kernels on AMD host with CPU 3793 * type 6, model 8 and higher from exploding due to 3794 * the rdmsr failing. 3795 */ 3796 msr_info->data = 0x20000000; 3797 break; 3798 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: 3799 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER: 3800 case HV_X64_MSR_SYNDBG_OPTIONS: 3801 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: 3802 case HV_X64_MSR_CRASH_CTL: 3803 case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT: 3804 case HV_X64_MSR_REENLIGHTENMENT_CONTROL: 3805 case HV_X64_MSR_TSC_EMULATION_CONTROL: 3806 case HV_X64_MSR_TSC_EMULATION_STATUS: 3807 return kvm_hv_get_msr_common(vcpu, 3808 msr_info->index, &msr_info->data, 3809 msr_info->host_initiated); 3810 case MSR_IA32_BBL_CR_CTL3: 3811 /* This legacy MSR exists but isn't fully documented in current 3812 * silicon. It is however accessed by winxp in very narrow 3813 * scenarios where it sets bit #19, itself documented as 3814 * a "reserved" bit. Best effort attempt to source coherent 3815 * read data here should the balance of the register be 3816 * interpreted by the guest: 3817 * 3818 * L2 cache control register 3: 64GB range, 256KB size, 3819 * enabled, latency 0x1, configured 3820 */ 3821 msr_info->data = 0xbe702111; 3822 break; 3823 case MSR_AMD64_OSVW_ID_LENGTH: 3824 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) 3825 return 1; 3826 msr_info->data = vcpu->arch.osvw.length; 3827 break; 3828 case MSR_AMD64_OSVW_STATUS: 3829 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) 3830 return 1; 3831 msr_info->data = vcpu->arch.osvw.status; 3832 break; 3833 case MSR_PLATFORM_INFO: 3834 if (!msr_info->host_initiated && 3835 !vcpu->kvm->arch.guest_can_read_msr_platform_info) 3836 return 1; 3837 msr_info->data = vcpu->arch.msr_platform_info; 3838 break; 3839 case MSR_MISC_FEATURES_ENABLES: 3840 msr_info->data = vcpu->arch.msr_misc_features_enables; 3841 break; 3842 case MSR_K7_HWCR: 3843 msr_info->data = vcpu->arch.msr_hwcr; 3844 break; 3845 default: 3846 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) 3847 return kvm_pmu_get_msr(vcpu, msr_info); 3848 return KVM_MSR_RET_INVALID; 3849 } 3850 return 0; 3851 } 3852 EXPORT_SYMBOL_GPL(kvm_get_msr_common); 3853 3854 /* 3855 * Read or write a bunch of msrs. All parameters are kernel addresses. 3856 * 3857 * @return number of msrs set successfully. 3858 */ 3859 static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs, 3860 struct kvm_msr_entry *entries, 3861 int (*do_msr)(struct kvm_vcpu *vcpu, 3862 unsigned index, u64 *data)) 3863 { 3864 int i; 3865 3866 for (i = 0; i < msrs->nmsrs; ++i) 3867 if (do_msr(vcpu, entries[i].index, &entries[i].data)) 3868 break; 3869 3870 return i; 3871 } 3872 3873 /* 3874 * Read or write a bunch of msrs. Parameters are user addresses. 3875 * 3876 * @return number of msrs set successfully. 3877 */ 3878 static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs, 3879 int (*do_msr)(struct kvm_vcpu *vcpu, 3880 unsigned index, u64 *data), 3881 int writeback) 3882 { 3883 struct kvm_msrs msrs; 3884 struct kvm_msr_entry *entries; 3885 int r, n; 3886 unsigned size; 3887 3888 r = -EFAULT; 3889 if (copy_from_user(&msrs, user_msrs, sizeof(msrs))) 3890 goto out; 3891 3892 r = -E2BIG; 3893 if (msrs.nmsrs >= MAX_IO_MSRS) 3894 goto out; 3895 3896 size = sizeof(struct kvm_msr_entry) * msrs.nmsrs; 3897 entries = memdup_user(user_msrs->entries, size); 3898 if (IS_ERR(entries)) { 3899 r = PTR_ERR(entries); 3900 goto out; 3901 } 3902 3903 r = n = __msr_io(vcpu, &msrs, entries, do_msr); 3904 if (r < 0) 3905 goto out_free; 3906 3907 r = -EFAULT; 3908 if (writeback && copy_to_user(user_msrs->entries, entries, size)) 3909 goto out_free; 3910 3911 r = n; 3912 3913 out_free: 3914 kfree(entries); 3915 out: 3916 return r; 3917 } 3918 3919 static inline bool kvm_can_mwait_in_guest(void) 3920 { 3921 return boot_cpu_has(X86_FEATURE_MWAIT) && 3922 !boot_cpu_has_bug(X86_BUG_MONITOR) && 3923 boot_cpu_has(X86_FEATURE_ARAT); 3924 } 3925 3926 static int kvm_ioctl_get_supported_hv_cpuid(struct kvm_vcpu *vcpu, 3927 struct kvm_cpuid2 __user *cpuid_arg) 3928 { 3929 struct kvm_cpuid2 cpuid; 3930 int r; 3931 3932 r = -EFAULT; 3933 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) 3934 return r; 3935 3936 r = kvm_get_hv_cpuid(vcpu, &cpuid, cpuid_arg->entries); 3937 if (r) 3938 return r; 3939 3940 r = -EFAULT; 3941 if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid))) 3942 return r; 3943 3944 return 0; 3945 } 3946 3947 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) 3948 { 3949 int r = 0; 3950 3951 switch (ext) { 3952 case KVM_CAP_IRQCHIP: 3953 case KVM_CAP_HLT: 3954 case KVM_CAP_MMU_SHADOW_CACHE_CONTROL: 3955 case KVM_CAP_SET_TSS_ADDR: 3956 case KVM_CAP_EXT_CPUID: 3957 case KVM_CAP_EXT_EMUL_CPUID: 3958 case KVM_CAP_CLOCKSOURCE: 3959 case KVM_CAP_PIT: 3960 case KVM_CAP_NOP_IO_DELAY: 3961 case KVM_CAP_MP_STATE: 3962 case KVM_CAP_SYNC_MMU: 3963 case KVM_CAP_USER_NMI: 3964 case KVM_CAP_REINJECT_CONTROL: 3965 case KVM_CAP_IRQ_INJECT_STATUS: 3966 case KVM_CAP_IOEVENTFD: 3967 case KVM_CAP_IOEVENTFD_NO_LENGTH: 3968 case KVM_CAP_PIT2: 3969 case KVM_CAP_PIT_STATE2: 3970 case KVM_CAP_SET_IDENTITY_MAP_ADDR: 3971 case KVM_CAP_VCPU_EVENTS: 3972 case KVM_CAP_HYPERV: 3973 case KVM_CAP_HYPERV_VAPIC: 3974 case KVM_CAP_HYPERV_SPIN: 3975 case KVM_CAP_HYPERV_SYNIC: 3976 case KVM_CAP_HYPERV_SYNIC2: 3977 case KVM_CAP_HYPERV_VP_INDEX: 3978 case KVM_CAP_HYPERV_EVENTFD: 3979 case KVM_CAP_HYPERV_TLBFLUSH: 3980 case KVM_CAP_HYPERV_SEND_IPI: 3981 case KVM_CAP_HYPERV_CPUID: 3982 case KVM_CAP_HYPERV_ENFORCE_CPUID: 3983 case KVM_CAP_SYS_HYPERV_CPUID: 3984 case KVM_CAP_PCI_SEGMENT: 3985 case KVM_CAP_DEBUGREGS: 3986 case KVM_CAP_X86_ROBUST_SINGLESTEP: 3987 case KVM_CAP_XSAVE: 3988 case KVM_CAP_ASYNC_PF: 3989 case KVM_CAP_ASYNC_PF_INT: 3990 case KVM_CAP_GET_TSC_KHZ: 3991 case KVM_CAP_KVMCLOCK_CTRL: 3992 case KVM_CAP_READONLY_MEM: 3993 case KVM_CAP_HYPERV_TIME: 3994 case KVM_CAP_IOAPIC_POLARITY_IGNORED: 3995 case KVM_CAP_TSC_DEADLINE_TIMER: 3996 case KVM_CAP_DISABLE_QUIRKS: 3997 case KVM_CAP_SET_BOOT_CPU_ID: 3998 case KVM_CAP_SPLIT_IRQCHIP: 3999 case KVM_CAP_IMMEDIATE_EXIT: 4000 case KVM_CAP_PMU_EVENT_FILTER: 4001 case KVM_CAP_GET_MSR_FEATURES: 4002 case KVM_CAP_MSR_PLATFORM_INFO: 4003 case KVM_CAP_EXCEPTION_PAYLOAD: 4004 case KVM_CAP_SET_GUEST_DEBUG: 4005 case KVM_CAP_LAST_CPU: 4006 case KVM_CAP_X86_USER_SPACE_MSR: 4007 case KVM_CAP_X86_MSR_FILTER: 4008 case KVM_CAP_ENFORCE_PV_FEATURE_CPUID: 4009 #ifdef CONFIG_X86_SGX_KVM 4010 case KVM_CAP_SGX_ATTRIBUTE: 4011 #endif 4012 case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM: 4013 case KVM_CAP_SREGS2: 4014 case KVM_CAP_EXIT_ON_EMULATION_FAILURE: 4015 r = 1; 4016 break; 4017 case KVM_CAP_EXIT_HYPERCALL: 4018 r = KVM_EXIT_HYPERCALL_VALID_MASK; 4019 break; 4020 case KVM_CAP_SET_GUEST_DEBUG2: 4021 return KVM_GUESTDBG_VALID_MASK; 4022 #ifdef CONFIG_KVM_XEN 4023 case KVM_CAP_XEN_HVM: 4024 r = KVM_XEN_HVM_CONFIG_HYPERCALL_MSR | 4025 KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL | 4026 KVM_XEN_HVM_CONFIG_SHARED_INFO; 4027 if (sched_info_on()) 4028 r |= KVM_XEN_HVM_CONFIG_RUNSTATE; 4029 break; 4030 #endif 4031 case KVM_CAP_SYNC_REGS: 4032 r = KVM_SYNC_X86_VALID_FIELDS; 4033 break; 4034 case KVM_CAP_ADJUST_CLOCK: 4035 r = KVM_CLOCK_TSC_STABLE; 4036 break; 4037 case KVM_CAP_X86_DISABLE_EXITS: 4038 r |= KVM_X86_DISABLE_EXITS_HLT | KVM_X86_DISABLE_EXITS_PAUSE | 4039 KVM_X86_DISABLE_EXITS_CSTATE; 4040 if(kvm_can_mwait_in_guest()) 4041 r |= KVM_X86_DISABLE_EXITS_MWAIT; 4042 break; 4043 case KVM_CAP_X86_SMM: 4044 /* SMBASE is usually relocated above 1M on modern chipsets, 4045 * and SMM handlers might indeed rely on 4G segment limits, 4046 * so do not report SMM to be available if real mode is 4047 * emulated via vm86 mode. Still, do not go to great lengths 4048 * to avoid userspace's usage of the feature, because it is a 4049 * fringe case that is not enabled except via specific settings 4050 * of the module parameters. 4051 */ 4052 r = static_call(kvm_x86_has_emulated_msr)(kvm, MSR_IA32_SMBASE); 4053 break; 4054 case KVM_CAP_VAPIC: 4055 r = !static_call(kvm_x86_cpu_has_accelerated_tpr)(); 4056 break; 4057 case KVM_CAP_NR_VCPUS: 4058 r = KVM_SOFT_MAX_VCPUS; 4059 break; 4060 case KVM_CAP_MAX_VCPUS: 4061 r = KVM_MAX_VCPUS; 4062 break; 4063 case KVM_CAP_MAX_VCPU_ID: 4064 r = KVM_MAX_VCPU_ID; 4065 break; 4066 case KVM_CAP_PV_MMU: /* obsolete */ 4067 r = 0; 4068 break; 4069 case KVM_CAP_MCE: 4070 r = KVM_MAX_MCE_BANKS; 4071 break; 4072 case KVM_CAP_XCRS: 4073 r = boot_cpu_has(X86_FEATURE_XSAVE); 4074 break; 4075 case KVM_CAP_TSC_CONTROL: 4076 r = kvm_has_tsc_control; 4077 break; 4078 case KVM_CAP_X2APIC_API: 4079 r = KVM_X2APIC_API_VALID_FLAGS; 4080 break; 4081 case KVM_CAP_NESTED_STATE: 4082 r = kvm_x86_ops.nested_ops->get_state ? 4083 kvm_x86_ops.nested_ops->get_state(NULL, NULL, 0) : 0; 4084 break; 4085 case KVM_CAP_HYPERV_DIRECT_TLBFLUSH: 4086 r = kvm_x86_ops.enable_direct_tlbflush != NULL; 4087 break; 4088 case KVM_CAP_HYPERV_ENLIGHTENED_VMCS: 4089 r = kvm_x86_ops.nested_ops->enable_evmcs != NULL; 4090 break; 4091 case KVM_CAP_SMALLER_MAXPHYADDR: 4092 r = (int) allow_smaller_maxphyaddr; 4093 break; 4094 case KVM_CAP_STEAL_TIME: 4095 r = sched_info_on(); 4096 break; 4097 case KVM_CAP_X86_BUS_LOCK_EXIT: 4098 if (kvm_has_bus_lock_exit) 4099 r = KVM_BUS_LOCK_DETECTION_OFF | 4100 KVM_BUS_LOCK_DETECTION_EXIT; 4101 else 4102 r = 0; 4103 break; 4104 default: 4105 break; 4106 } 4107 return r; 4108 4109 } 4110 4111 long kvm_arch_dev_ioctl(struct file *filp, 4112 unsigned int ioctl, unsigned long arg) 4113 { 4114 void __user *argp = (void __user *)arg; 4115 long r; 4116 4117 switch (ioctl) { 4118 case KVM_GET_MSR_INDEX_LIST: { 4119 struct kvm_msr_list __user *user_msr_list = argp; 4120 struct kvm_msr_list msr_list; 4121 unsigned n; 4122 4123 r = -EFAULT; 4124 if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list))) 4125 goto out; 4126 n = msr_list.nmsrs; 4127 msr_list.nmsrs = num_msrs_to_save + num_emulated_msrs; 4128 if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list))) 4129 goto out; 4130 r = -E2BIG; 4131 if (n < msr_list.nmsrs) 4132 goto out; 4133 r = -EFAULT; 4134 if (copy_to_user(user_msr_list->indices, &msrs_to_save, 4135 num_msrs_to_save * sizeof(u32))) 4136 goto out; 4137 if (copy_to_user(user_msr_list->indices + num_msrs_to_save, 4138 &emulated_msrs, 4139 num_emulated_msrs * sizeof(u32))) 4140 goto out; 4141 r = 0; 4142 break; 4143 } 4144 case KVM_GET_SUPPORTED_CPUID: 4145 case KVM_GET_EMULATED_CPUID: { 4146 struct kvm_cpuid2 __user *cpuid_arg = argp; 4147 struct kvm_cpuid2 cpuid; 4148 4149 r = -EFAULT; 4150 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) 4151 goto out; 4152 4153 r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries, 4154 ioctl); 4155 if (r) 4156 goto out; 4157 4158 r = -EFAULT; 4159 if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid))) 4160 goto out; 4161 r = 0; 4162 break; 4163 } 4164 case KVM_X86_GET_MCE_CAP_SUPPORTED: 4165 r = -EFAULT; 4166 if (copy_to_user(argp, &kvm_mce_cap_supported, 4167 sizeof(kvm_mce_cap_supported))) 4168 goto out; 4169 r = 0; 4170 break; 4171 case KVM_GET_MSR_FEATURE_INDEX_LIST: { 4172 struct kvm_msr_list __user *user_msr_list = argp; 4173 struct kvm_msr_list msr_list; 4174 unsigned int n; 4175 4176 r = -EFAULT; 4177 if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list))) 4178 goto out; 4179 n = msr_list.nmsrs; 4180 msr_list.nmsrs = num_msr_based_features; 4181 if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list))) 4182 goto out; 4183 r = -E2BIG; 4184 if (n < msr_list.nmsrs) 4185 goto out; 4186 r = -EFAULT; 4187 if (copy_to_user(user_msr_list->indices, &msr_based_features, 4188 num_msr_based_features * sizeof(u32))) 4189 goto out; 4190 r = 0; 4191 break; 4192 } 4193 case KVM_GET_MSRS: 4194 r = msr_io(NULL, argp, do_get_msr_feature, 1); 4195 break; 4196 case KVM_GET_SUPPORTED_HV_CPUID: 4197 r = kvm_ioctl_get_supported_hv_cpuid(NULL, argp); 4198 break; 4199 default: 4200 r = -EINVAL; 4201 break; 4202 } 4203 out: 4204 return r; 4205 } 4206 4207 static void wbinvd_ipi(void *garbage) 4208 { 4209 wbinvd(); 4210 } 4211 4212 static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu) 4213 { 4214 return kvm_arch_has_noncoherent_dma(vcpu->kvm); 4215 } 4216 4217 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 4218 { 4219 /* Address WBINVD may be executed by guest */ 4220 if (need_emulate_wbinvd(vcpu)) { 4221 if (static_call(kvm_x86_has_wbinvd_exit)()) 4222 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); 4223 else if (vcpu->cpu != -1 && vcpu->cpu != cpu) 4224 smp_call_function_single(vcpu->cpu, 4225 wbinvd_ipi, NULL, 1); 4226 } 4227 4228 static_call(kvm_x86_vcpu_load)(vcpu, cpu); 4229 4230 /* Save host pkru register if supported */ 4231 vcpu->arch.host_pkru = read_pkru(); 4232 4233 /* Apply any externally detected TSC adjustments (due to suspend) */ 4234 if (unlikely(vcpu->arch.tsc_offset_adjustment)) { 4235 adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment); 4236 vcpu->arch.tsc_offset_adjustment = 0; 4237 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 4238 } 4239 4240 if (unlikely(vcpu->cpu != cpu) || kvm_check_tsc_unstable()) { 4241 s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 : 4242 rdtsc() - vcpu->arch.last_host_tsc; 4243 if (tsc_delta < 0) 4244 mark_tsc_unstable("KVM discovered backwards TSC"); 4245 4246 if (kvm_check_tsc_unstable()) { 4247 u64 offset = kvm_compute_l1_tsc_offset(vcpu, 4248 vcpu->arch.last_guest_tsc); 4249 kvm_vcpu_write_tsc_offset(vcpu, offset); 4250 vcpu->arch.tsc_catchup = 1; 4251 } 4252 4253 if (kvm_lapic_hv_timer_in_use(vcpu)) 4254 kvm_lapic_restart_hv_timer(vcpu); 4255 4256 /* 4257 * On a host with synchronized TSC, there is no need to update 4258 * kvmclock on vcpu->cpu migration 4259 */ 4260 if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1) 4261 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu); 4262 if (vcpu->cpu != cpu) 4263 kvm_make_request(KVM_REQ_MIGRATE_TIMER, vcpu); 4264 vcpu->cpu = cpu; 4265 } 4266 4267 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); 4268 } 4269 4270 static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) 4271 { 4272 struct kvm_host_map map; 4273 struct kvm_steal_time *st; 4274 4275 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) 4276 return; 4277 4278 if (vcpu->arch.st.preempted) 4279 return; 4280 4281 if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map, 4282 &vcpu->arch.st.cache, true)) 4283 return; 4284 4285 st = map.hva + 4286 offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS); 4287 4288 st->preempted = vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED; 4289 4290 kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true); 4291 } 4292 4293 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 4294 { 4295 int idx; 4296 4297 if (vcpu->preempted && !vcpu->arch.guest_state_protected) 4298 vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu); 4299 4300 /* 4301 * Take the srcu lock as memslots will be accessed to check the gfn 4302 * cache generation against the memslots generation. 4303 */ 4304 idx = srcu_read_lock(&vcpu->kvm->srcu); 4305 if (kvm_xen_msr_enabled(vcpu->kvm)) 4306 kvm_xen_runstate_set_preempted(vcpu); 4307 else 4308 kvm_steal_time_set_preempted(vcpu); 4309 srcu_read_unlock(&vcpu->kvm->srcu, idx); 4310 4311 static_call(kvm_x86_vcpu_put)(vcpu); 4312 vcpu->arch.last_host_tsc = rdtsc(); 4313 /* 4314 * If userspace has set any breakpoints or watchpoints, dr6 is restored 4315 * on every vmexit, but if not, we might have a stale dr6 from the 4316 * guest. do_debug expects dr6 to be cleared after it runs, do the same. 4317 */ 4318 set_debugreg(0, 6); 4319 } 4320 4321 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, 4322 struct kvm_lapic_state *s) 4323 { 4324 if (vcpu->arch.apicv_active) 4325 static_call(kvm_x86_sync_pir_to_irr)(vcpu); 4326 4327 return kvm_apic_get_state(vcpu, s); 4328 } 4329 4330 static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu, 4331 struct kvm_lapic_state *s) 4332 { 4333 int r; 4334 4335 r = kvm_apic_set_state(vcpu, s); 4336 if (r) 4337 return r; 4338 update_cr8_intercept(vcpu); 4339 4340 return 0; 4341 } 4342 4343 static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu) 4344 { 4345 /* 4346 * We can accept userspace's request for interrupt injection 4347 * as long as we have a place to store the interrupt number. 4348 * The actual injection will happen when the CPU is able to 4349 * deliver the interrupt. 4350 */ 4351 if (kvm_cpu_has_extint(vcpu)) 4352 return false; 4353 4354 /* Acknowledging ExtINT does not happen if LINT0 is masked. */ 4355 return (!lapic_in_kernel(vcpu) || 4356 kvm_apic_accept_pic_intr(vcpu)); 4357 } 4358 4359 static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu) 4360 { 4361 /* 4362 * Do not cause an interrupt window exit if an exception 4363 * is pending or an event needs reinjection; userspace 4364 * might want to inject the interrupt manually using KVM_SET_REGS 4365 * or KVM_SET_SREGS. For that to work, we must be at an 4366 * instruction boundary and with no events half-injected. 4367 */ 4368 return (kvm_arch_interrupt_allowed(vcpu) && 4369 kvm_cpu_accept_dm_intr(vcpu) && 4370 !kvm_event_needs_reinjection(vcpu) && 4371 !vcpu->arch.exception.pending); 4372 } 4373 4374 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, 4375 struct kvm_interrupt *irq) 4376 { 4377 if (irq->irq >= KVM_NR_INTERRUPTS) 4378 return -EINVAL; 4379 4380 if (!irqchip_in_kernel(vcpu->kvm)) { 4381 kvm_queue_interrupt(vcpu, irq->irq, false); 4382 kvm_make_request(KVM_REQ_EVENT, vcpu); 4383 return 0; 4384 } 4385 4386 /* 4387 * With in-kernel LAPIC, we only use this to inject EXTINT, so 4388 * fail for in-kernel 8259. 4389 */ 4390 if (pic_in_kernel(vcpu->kvm)) 4391 return -ENXIO; 4392 4393 if (vcpu->arch.pending_external_vector != -1) 4394 return -EEXIST; 4395 4396 vcpu->arch.pending_external_vector = irq->irq; 4397 kvm_make_request(KVM_REQ_EVENT, vcpu); 4398 return 0; 4399 } 4400 4401 static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu) 4402 { 4403 kvm_inject_nmi(vcpu); 4404 4405 return 0; 4406 } 4407 4408 static int kvm_vcpu_ioctl_smi(struct kvm_vcpu *vcpu) 4409 { 4410 kvm_make_request(KVM_REQ_SMI, vcpu); 4411 4412 return 0; 4413 } 4414 4415 static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu, 4416 struct kvm_tpr_access_ctl *tac) 4417 { 4418 if (tac->flags) 4419 return -EINVAL; 4420 vcpu->arch.tpr_access_reporting = !!tac->enabled; 4421 return 0; 4422 } 4423 4424 static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu, 4425 u64 mcg_cap) 4426 { 4427 int r; 4428 unsigned bank_num = mcg_cap & 0xff, bank; 4429 4430 r = -EINVAL; 4431 if (!bank_num || bank_num > KVM_MAX_MCE_BANKS) 4432 goto out; 4433 if (mcg_cap & ~(kvm_mce_cap_supported | 0xff | 0xff0000)) 4434 goto out; 4435 r = 0; 4436 vcpu->arch.mcg_cap = mcg_cap; 4437 /* Init IA32_MCG_CTL to all 1s */ 4438 if (mcg_cap & MCG_CTL_P) 4439 vcpu->arch.mcg_ctl = ~(u64)0; 4440 /* Init IA32_MCi_CTL to all 1s */ 4441 for (bank = 0; bank < bank_num; bank++) 4442 vcpu->arch.mce_banks[bank*4] = ~(u64)0; 4443 4444 static_call(kvm_x86_setup_mce)(vcpu); 4445 out: 4446 return r; 4447 } 4448 4449 static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu, 4450 struct kvm_x86_mce *mce) 4451 { 4452 u64 mcg_cap = vcpu->arch.mcg_cap; 4453 unsigned bank_num = mcg_cap & 0xff; 4454 u64 *banks = vcpu->arch.mce_banks; 4455 4456 if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL)) 4457 return -EINVAL; 4458 /* 4459 * if IA32_MCG_CTL is not all 1s, the uncorrected error 4460 * reporting is disabled 4461 */ 4462 if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) && 4463 vcpu->arch.mcg_ctl != ~(u64)0) 4464 return 0; 4465 banks += 4 * mce->bank; 4466 /* 4467 * if IA32_MCi_CTL is not all 1s, the uncorrected error 4468 * reporting is disabled for the bank 4469 */ 4470 if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0) 4471 return 0; 4472 if (mce->status & MCI_STATUS_UC) { 4473 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) || 4474 !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) { 4475 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 4476 return 0; 4477 } 4478 if (banks[1] & MCI_STATUS_VAL) 4479 mce->status |= MCI_STATUS_OVER; 4480 banks[2] = mce->addr; 4481 banks[3] = mce->misc; 4482 vcpu->arch.mcg_status = mce->mcg_status; 4483 banks[1] = mce->status; 4484 kvm_queue_exception(vcpu, MC_VECTOR); 4485 } else if (!(banks[1] & MCI_STATUS_VAL) 4486 || !(banks[1] & MCI_STATUS_UC)) { 4487 if (banks[1] & MCI_STATUS_VAL) 4488 mce->status |= MCI_STATUS_OVER; 4489 banks[2] = mce->addr; 4490 banks[3] = mce->misc; 4491 banks[1] = mce->status; 4492 } else 4493 banks[1] |= MCI_STATUS_OVER; 4494 return 0; 4495 } 4496 4497 static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, 4498 struct kvm_vcpu_events *events) 4499 { 4500 process_nmi(vcpu); 4501 4502 if (kvm_check_request(KVM_REQ_SMI, vcpu)) 4503 process_smi(vcpu); 4504 4505 /* 4506 * In guest mode, payload delivery should be deferred, 4507 * so that the L1 hypervisor can intercept #PF before 4508 * CR2 is modified (or intercept #DB before DR6 is 4509 * modified under nVMX). Unless the per-VM capability, 4510 * KVM_CAP_EXCEPTION_PAYLOAD, is set, we may not defer the delivery of 4511 * an exception payload and handle after a KVM_GET_VCPU_EVENTS. Since we 4512 * opportunistically defer the exception payload, deliver it if the 4513 * capability hasn't been requested before processing a 4514 * KVM_GET_VCPU_EVENTS. 4515 */ 4516 if (!vcpu->kvm->arch.exception_payload_enabled && 4517 vcpu->arch.exception.pending && vcpu->arch.exception.has_payload) 4518 kvm_deliver_exception_payload(vcpu); 4519 4520 /* 4521 * The API doesn't provide the instruction length for software 4522 * exceptions, so don't report them. As long as the guest RIP 4523 * isn't advanced, we should expect to encounter the exception 4524 * again. 4525 */ 4526 if (kvm_exception_is_soft(vcpu->arch.exception.nr)) { 4527 events->exception.injected = 0; 4528 events->exception.pending = 0; 4529 } else { 4530 events->exception.injected = vcpu->arch.exception.injected; 4531 events->exception.pending = vcpu->arch.exception.pending; 4532 /* 4533 * For ABI compatibility, deliberately conflate 4534 * pending and injected exceptions when 4535 * KVM_CAP_EXCEPTION_PAYLOAD isn't enabled. 4536 */ 4537 if (!vcpu->kvm->arch.exception_payload_enabled) 4538 events->exception.injected |= 4539 vcpu->arch.exception.pending; 4540 } 4541 events->exception.nr = vcpu->arch.exception.nr; 4542 events->exception.has_error_code = vcpu->arch.exception.has_error_code; 4543 events->exception.error_code = vcpu->arch.exception.error_code; 4544 events->exception_has_payload = vcpu->arch.exception.has_payload; 4545 events->exception_payload = vcpu->arch.exception.payload; 4546 4547 events->interrupt.injected = 4548 vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft; 4549 events->interrupt.nr = vcpu->arch.interrupt.nr; 4550 events->interrupt.soft = 0; 4551 events->interrupt.shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu); 4552 4553 events->nmi.injected = vcpu->arch.nmi_injected; 4554 events->nmi.pending = vcpu->arch.nmi_pending != 0; 4555 events->nmi.masked = static_call(kvm_x86_get_nmi_mask)(vcpu); 4556 events->nmi.pad = 0; 4557 4558 events->sipi_vector = 0; /* never valid when reporting to user space */ 4559 4560 events->smi.smm = is_smm(vcpu); 4561 events->smi.pending = vcpu->arch.smi_pending; 4562 events->smi.smm_inside_nmi = 4563 !!(vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK); 4564 events->smi.latched_init = kvm_lapic_latched_init(vcpu); 4565 4566 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING 4567 | KVM_VCPUEVENT_VALID_SHADOW 4568 | KVM_VCPUEVENT_VALID_SMM); 4569 if (vcpu->kvm->arch.exception_payload_enabled) 4570 events->flags |= KVM_VCPUEVENT_VALID_PAYLOAD; 4571 4572 memset(&events->reserved, 0, sizeof(events->reserved)); 4573 } 4574 4575 static void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm); 4576 4577 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, 4578 struct kvm_vcpu_events *events) 4579 { 4580 if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING 4581 | KVM_VCPUEVENT_VALID_SIPI_VECTOR 4582 | KVM_VCPUEVENT_VALID_SHADOW 4583 | KVM_VCPUEVENT_VALID_SMM 4584 | KVM_VCPUEVENT_VALID_PAYLOAD)) 4585 return -EINVAL; 4586 4587 if (events->flags & KVM_VCPUEVENT_VALID_PAYLOAD) { 4588 if (!vcpu->kvm->arch.exception_payload_enabled) 4589 return -EINVAL; 4590 if (events->exception.pending) 4591 events->exception.injected = 0; 4592 else 4593 events->exception_has_payload = 0; 4594 } else { 4595 events->exception.pending = 0; 4596 events->exception_has_payload = 0; 4597 } 4598 4599 if ((events->exception.injected || events->exception.pending) && 4600 (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR)) 4601 return -EINVAL; 4602 4603 /* INITs are latched while in SMM */ 4604 if (events->flags & KVM_VCPUEVENT_VALID_SMM && 4605 (events->smi.smm || events->smi.pending) && 4606 vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) 4607 return -EINVAL; 4608 4609 process_nmi(vcpu); 4610 vcpu->arch.exception.injected = events->exception.injected; 4611 vcpu->arch.exception.pending = events->exception.pending; 4612 vcpu->arch.exception.nr = events->exception.nr; 4613 vcpu->arch.exception.has_error_code = events->exception.has_error_code; 4614 vcpu->arch.exception.error_code = events->exception.error_code; 4615 vcpu->arch.exception.has_payload = events->exception_has_payload; 4616 vcpu->arch.exception.payload = events->exception_payload; 4617 4618 vcpu->arch.interrupt.injected = events->interrupt.injected; 4619 vcpu->arch.interrupt.nr = events->interrupt.nr; 4620 vcpu->arch.interrupt.soft = events->interrupt.soft; 4621 if (events->flags & KVM_VCPUEVENT_VALID_SHADOW) 4622 static_call(kvm_x86_set_interrupt_shadow)(vcpu, 4623 events->interrupt.shadow); 4624 4625 vcpu->arch.nmi_injected = events->nmi.injected; 4626 if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) 4627 vcpu->arch.nmi_pending = events->nmi.pending; 4628 static_call(kvm_x86_set_nmi_mask)(vcpu, events->nmi.masked); 4629 4630 if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR && 4631 lapic_in_kernel(vcpu)) 4632 vcpu->arch.apic->sipi_vector = events->sipi_vector; 4633 4634 if (events->flags & KVM_VCPUEVENT_VALID_SMM) { 4635 if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) 4636 kvm_smm_changed(vcpu, events->smi.smm); 4637 4638 vcpu->arch.smi_pending = events->smi.pending; 4639 4640 if (events->smi.smm) { 4641 if (events->smi.smm_inside_nmi) 4642 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; 4643 else 4644 vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK; 4645 } 4646 4647 if (lapic_in_kernel(vcpu)) { 4648 if (events->smi.latched_init) 4649 set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); 4650 else 4651 clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); 4652 } 4653 } 4654 4655 kvm_make_request(KVM_REQ_EVENT, vcpu); 4656 4657 return 0; 4658 } 4659 4660 static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu, 4661 struct kvm_debugregs *dbgregs) 4662 { 4663 unsigned long val; 4664 4665 memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db)); 4666 kvm_get_dr(vcpu, 6, &val); 4667 dbgregs->dr6 = val; 4668 dbgregs->dr7 = vcpu->arch.dr7; 4669 dbgregs->flags = 0; 4670 memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved)); 4671 } 4672 4673 static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, 4674 struct kvm_debugregs *dbgregs) 4675 { 4676 if (dbgregs->flags) 4677 return -EINVAL; 4678 4679 if (!kvm_dr6_valid(dbgregs->dr6)) 4680 return -EINVAL; 4681 if (!kvm_dr7_valid(dbgregs->dr7)) 4682 return -EINVAL; 4683 4684 memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); 4685 kvm_update_dr0123(vcpu); 4686 vcpu->arch.dr6 = dbgregs->dr6; 4687 vcpu->arch.dr7 = dbgregs->dr7; 4688 kvm_update_dr7(vcpu); 4689 4690 return 0; 4691 } 4692 4693 #define XSTATE_COMPACTION_ENABLED (1ULL << 63) 4694 4695 static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu) 4696 { 4697 struct xregs_state *xsave = &vcpu->arch.guest_fpu->state.xsave; 4698 u64 xstate_bv = xsave->header.xfeatures; 4699 u64 valid; 4700 4701 /* 4702 * Copy legacy XSAVE area, to avoid complications with CPUID 4703 * leaves 0 and 1 in the loop below. 4704 */ 4705 memcpy(dest, xsave, XSAVE_HDR_OFFSET); 4706 4707 /* Set XSTATE_BV */ 4708 xstate_bv &= vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FPSSE; 4709 *(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv; 4710 4711 /* 4712 * Copy each region from the possibly compacted offset to the 4713 * non-compacted offset. 4714 */ 4715 valid = xstate_bv & ~XFEATURE_MASK_FPSSE; 4716 while (valid) { 4717 u32 size, offset, ecx, edx; 4718 u64 xfeature_mask = valid & -valid; 4719 int xfeature_nr = fls64(xfeature_mask) - 1; 4720 void *src; 4721 4722 cpuid_count(XSTATE_CPUID, xfeature_nr, 4723 &size, &offset, &ecx, &edx); 4724 4725 if (xfeature_nr == XFEATURE_PKRU) { 4726 memcpy(dest + offset, &vcpu->arch.pkru, 4727 sizeof(vcpu->arch.pkru)); 4728 } else { 4729 src = get_xsave_addr(xsave, xfeature_nr); 4730 if (src) 4731 memcpy(dest + offset, src, size); 4732 } 4733 4734 valid -= xfeature_mask; 4735 } 4736 } 4737 4738 static void load_xsave(struct kvm_vcpu *vcpu, u8 *src) 4739 { 4740 struct xregs_state *xsave = &vcpu->arch.guest_fpu->state.xsave; 4741 u64 xstate_bv = *(u64 *)(src + XSAVE_HDR_OFFSET); 4742 u64 valid; 4743 4744 /* 4745 * Copy legacy XSAVE area, to avoid complications with CPUID 4746 * leaves 0 and 1 in the loop below. 4747 */ 4748 memcpy(xsave, src, XSAVE_HDR_OFFSET); 4749 4750 /* Set XSTATE_BV and possibly XCOMP_BV. */ 4751 xsave->header.xfeatures = xstate_bv; 4752 if (boot_cpu_has(X86_FEATURE_XSAVES)) 4753 xsave->header.xcomp_bv = host_xcr0 | XSTATE_COMPACTION_ENABLED; 4754 4755 /* 4756 * Copy each region from the non-compacted offset to the 4757 * possibly compacted offset. 4758 */ 4759 valid = xstate_bv & ~XFEATURE_MASK_FPSSE; 4760 while (valid) { 4761 u32 size, offset, ecx, edx; 4762 u64 xfeature_mask = valid & -valid; 4763 int xfeature_nr = fls64(xfeature_mask) - 1; 4764 4765 cpuid_count(XSTATE_CPUID, xfeature_nr, 4766 &size, &offset, &ecx, &edx); 4767 4768 if (xfeature_nr == XFEATURE_PKRU) { 4769 memcpy(&vcpu->arch.pkru, src + offset, 4770 sizeof(vcpu->arch.pkru)); 4771 } else { 4772 void *dest = get_xsave_addr(xsave, xfeature_nr); 4773 4774 if (dest) 4775 memcpy(dest, src + offset, size); 4776 } 4777 4778 valid -= xfeature_mask; 4779 } 4780 } 4781 4782 static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, 4783 struct kvm_xsave *guest_xsave) 4784 { 4785 if (!vcpu->arch.guest_fpu) 4786 return; 4787 4788 if (boot_cpu_has(X86_FEATURE_XSAVE)) { 4789 memset(guest_xsave, 0, sizeof(struct kvm_xsave)); 4790 fill_xsave((u8 *) guest_xsave->region, vcpu); 4791 } else { 4792 memcpy(guest_xsave->region, 4793 &vcpu->arch.guest_fpu->state.fxsave, 4794 sizeof(struct fxregs_state)); 4795 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] = 4796 XFEATURE_MASK_FPSSE; 4797 } 4798 } 4799 4800 #define XSAVE_MXCSR_OFFSET 24 4801 4802 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, 4803 struct kvm_xsave *guest_xsave) 4804 { 4805 u64 xstate_bv; 4806 u32 mxcsr; 4807 4808 if (!vcpu->arch.guest_fpu) 4809 return 0; 4810 4811 xstate_bv = *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)]; 4812 mxcsr = *(u32 *)&guest_xsave->region[XSAVE_MXCSR_OFFSET / sizeof(u32)]; 4813 4814 if (boot_cpu_has(X86_FEATURE_XSAVE)) { 4815 /* 4816 * Here we allow setting states that are not present in 4817 * CPUID leaf 0xD, index 0, EDX:EAX. This is for compatibility 4818 * with old userspace. 4819 */ 4820 if (xstate_bv & ~supported_xcr0 || mxcsr & ~mxcsr_feature_mask) 4821 return -EINVAL; 4822 load_xsave(vcpu, (u8 *)guest_xsave->region); 4823 } else { 4824 if (xstate_bv & ~XFEATURE_MASK_FPSSE || 4825 mxcsr & ~mxcsr_feature_mask) 4826 return -EINVAL; 4827 memcpy(&vcpu->arch.guest_fpu->state.fxsave, 4828 guest_xsave->region, sizeof(struct fxregs_state)); 4829 } 4830 return 0; 4831 } 4832 4833 static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu, 4834 struct kvm_xcrs *guest_xcrs) 4835 { 4836 if (!boot_cpu_has(X86_FEATURE_XSAVE)) { 4837 guest_xcrs->nr_xcrs = 0; 4838 return; 4839 } 4840 4841 guest_xcrs->nr_xcrs = 1; 4842 guest_xcrs->flags = 0; 4843 guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK; 4844 guest_xcrs->xcrs[0].value = vcpu->arch.xcr0; 4845 } 4846 4847 static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu, 4848 struct kvm_xcrs *guest_xcrs) 4849 { 4850 int i, r = 0; 4851 4852 if (!boot_cpu_has(X86_FEATURE_XSAVE)) 4853 return -EINVAL; 4854 4855 if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags) 4856 return -EINVAL; 4857 4858 for (i = 0; i < guest_xcrs->nr_xcrs; i++) 4859 /* Only support XCR0 currently */ 4860 if (guest_xcrs->xcrs[i].xcr == XCR_XFEATURE_ENABLED_MASK) { 4861 r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK, 4862 guest_xcrs->xcrs[i].value); 4863 break; 4864 } 4865 if (r) 4866 r = -EINVAL; 4867 return r; 4868 } 4869 4870 /* 4871 * kvm_set_guest_paused() indicates to the guest kernel that it has been 4872 * stopped by the hypervisor. This function will be called from the host only. 4873 * EINVAL is returned when the host attempts to set the flag for a guest that 4874 * does not support pv clocks. 4875 */ 4876 static int kvm_set_guest_paused(struct kvm_vcpu *vcpu) 4877 { 4878 if (!vcpu->arch.pv_time_enabled) 4879 return -EINVAL; 4880 vcpu->arch.pvclock_set_guest_stopped_request = true; 4881 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 4882 return 0; 4883 } 4884 4885 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, 4886 struct kvm_enable_cap *cap) 4887 { 4888 int r; 4889 uint16_t vmcs_version; 4890 void __user *user_ptr; 4891 4892 if (cap->flags) 4893 return -EINVAL; 4894 4895 switch (cap->cap) { 4896 case KVM_CAP_HYPERV_SYNIC2: 4897 if (cap->args[0]) 4898 return -EINVAL; 4899 fallthrough; 4900 4901 case KVM_CAP_HYPERV_SYNIC: 4902 if (!irqchip_in_kernel(vcpu->kvm)) 4903 return -EINVAL; 4904 return kvm_hv_activate_synic(vcpu, cap->cap == 4905 KVM_CAP_HYPERV_SYNIC2); 4906 case KVM_CAP_HYPERV_ENLIGHTENED_VMCS: 4907 if (!kvm_x86_ops.nested_ops->enable_evmcs) 4908 return -ENOTTY; 4909 r = kvm_x86_ops.nested_ops->enable_evmcs(vcpu, &vmcs_version); 4910 if (!r) { 4911 user_ptr = (void __user *)(uintptr_t)cap->args[0]; 4912 if (copy_to_user(user_ptr, &vmcs_version, 4913 sizeof(vmcs_version))) 4914 r = -EFAULT; 4915 } 4916 return r; 4917 case KVM_CAP_HYPERV_DIRECT_TLBFLUSH: 4918 if (!kvm_x86_ops.enable_direct_tlbflush) 4919 return -ENOTTY; 4920 4921 return static_call(kvm_x86_enable_direct_tlbflush)(vcpu); 4922 4923 case KVM_CAP_HYPERV_ENFORCE_CPUID: 4924 return kvm_hv_set_enforce_cpuid(vcpu, cap->args[0]); 4925 4926 case KVM_CAP_ENFORCE_PV_FEATURE_CPUID: 4927 vcpu->arch.pv_cpuid.enforce = cap->args[0]; 4928 if (vcpu->arch.pv_cpuid.enforce) 4929 kvm_update_pv_runtime(vcpu); 4930 4931 return 0; 4932 default: 4933 return -EINVAL; 4934 } 4935 } 4936 4937 long kvm_arch_vcpu_ioctl(struct file *filp, 4938 unsigned int ioctl, unsigned long arg) 4939 { 4940 struct kvm_vcpu *vcpu = filp->private_data; 4941 void __user *argp = (void __user *)arg; 4942 int r; 4943 union { 4944 struct kvm_sregs2 *sregs2; 4945 struct kvm_lapic_state *lapic; 4946 struct kvm_xsave *xsave; 4947 struct kvm_xcrs *xcrs; 4948 void *buffer; 4949 } u; 4950 4951 vcpu_load(vcpu); 4952 4953 u.buffer = NULL; 4954 switch (ioctl) { 4955 case KVM_GET_LAPIC: { 4956 r = -EINVAL; 4957 if (!lapic_in_kernel(vcpu)) 4958 goto out; 4959 u.lapic = kzalloc(sizeof(struct kvm_lapic_state), 4960 GFP_KERNEL_ACCOUNT); 4961 4962 r = -ENOMEM; 4963 if (!u.lapic) 4964 goto out; 4965 r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic); 4966 if (r) 4967 goto out; 4968 r = -EFAULT; 4969 if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state))) 4970 goto out; 4971 r = 0; 4972 break; 4973 } 4974 case KVM_SET_LAPIC: { 4975 r = -EINVAL; 4976 if (!lapic_in_kernel(vcpu)) 4977 goto out; 4978 u.lapic = memdup_user(argp, sizeof(*u.lapic)); 4979 if (IS_ERR(u.lapic)) { 4980 r = PTR_ERR(u.lapic); 4981 goto out_nofree; 4982 } 4983 4984 r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic); 4985 break; 4986 } 4987 case KVM_INTERRUPT: { 4988 struct kvm_interrupt irq; 4989 4990 r = -EFAULT; 4991 if (copy_from_user(&irq, argp, sizeof(irq))) 4992 goto out; 4993 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); 4994 break; 4995 } 4996 case KVM_NMI: { 4997 r = kvm_vcpu_ioctl_nmi(vcpu); 4998 break; 4999 } 5000 case KVM_SMI: { 5001 r = kvm_vcpu_ioctl_smi(vcpu); 5002 break; 5003 } 5004 case KVM_SET_CPUID: { 5005 struct kvm_cpuid __user *cpuid_arg = argp; 5006 struct kvm_cpuid cpuid; 5007 5008 r = -EFAULT; 5009 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) 5010 goto out; 5011 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries); 5012 break; 5013 } 5014 case KVM_SET_CPUID2: { 5015 struct kvm_cpuid2 __user *cpuid_arg = argp; 5016 struct kvm_cpuid2 cpuid; 5017 5018 r = -EFAULT; 5019 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) 5020 goto out; 5021 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid, 5022 cpuid_arg->entries); 5023 break; 5024 } 5025 case KVM_GET_CPUID2: { 5026 struct kvm_cpuid2 __user *cpuid_arg = argp; 5027 struct kvm_cpuid2 cpuid; 5028 5029 r = -EFAULT; 5030 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) 5031 goto out; 5032 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid, 5033 cpuid_arg->entries); 5034 if (r) 5035 goto out; 5036 r = -EFAULT; 5037 if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid))) 5038 goto out; 5039 r = 0; 5040 break; 5041 } 5042 case KVM_GET_MSRS: { 5043 int idx = srcu_read_lock(&vcpu->kvm->srcu); 5044 r = msr_io(vcpu, argp, do_get_msr, 1); 5045 srcu_read_unlock(&vcpu->kvm->srcu, idx); 5046 break; 5047 } 5048 case KVM_SET_MSRS: { 5049 int idx = srcu_read_lock(&vcpu->kvm->srcu); 5050 r = msr_io(vcpu, argp, do_set_msr, 0); 5051 srcu_read_unlock(&vcpu->kvm->srcu, idx); 5052 break; 5053 } 5054 case KVM_TPR_ACCESS_REPORTING: { 5055 struct kvm_tpr_access_ctl tac; 5056 5057 r = -EFAULT; 5058 if (copy_from_user(&tac, argp, sizeof(tac))) 5059 goto out; 5060 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac); 5061 if (r) 5062 goto out; 5063 r = -EFAULT; 5064 if (copy_to_user(argp, &tac, sizeof(tac))) 5065 goto out; 5066 r = 0; 5067 break; 5068 }; 5069 case KVM_SET_VAPIC_ADDR: { 5070 struct kvm_vapic_addr va; 5071 int idx; 5072 5073 r = -EINVAL; 5074 if (!lapic_in_kernel(vcpu)) 5075 goto out; 5076 r = -EFAULT; 5077 if (copy_from_user(&va, argp, sizeof(va))) 5078 goto out; 5079 idx = srcu_read_lock(&vcpu->kvm->srcu); 5080 r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); 5081 srcu_read_unlock(&vcpu->kvm->srcu, idx); 5082 break; 5083 } 5084 case KVM_X86_SETUP_MCE: { 5085 u64 mcg_cap; 5086 5087 r = -EFAULT; 5088 if (copy_from_user(&mcg_cap, argp, sizeof(mcg_cap))) 5089 goto out; 5090 r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap); 5091 break; 5092 } 5093 case KVM_X86_SET_MCE: { 5094 struct kvm_x86_mce mce; 5095 5096 r = -EFAULT; 5097 if (copy_from_user(&mce, argp, sizeof(mce))) 5098 goto out; 5099 r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce); 5100 break; 5101 } 5102 case KVM_GET_VCPU_EVENTS: { 5103 struct kvm_vcpu_events events; 5104 5105 kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events); 5106 5107 r = -EFAULT; 5108 if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events))) 5109 break; 5110 r = 0; 5111 break; 5112 } 5113 case KVM_SET_VCPU_EVENTS: { 5114 struct kvm_vcpu_events events; 5115 5116 r = -EFAULT; 5117 if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events))) 5118 break; 5119 5120 r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events); 5121 break; 5122 } 5123 case KVM_GET_DEBUGREGS: { 5124 struct kvm_debugregs dbgregs; 5125 5126 kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs); 5127 5128 r = -EFAULT; 5129 if (copy_to_user(argp, &dbgregs, 5130 sizeof(struct kvm_debugregs))) 5131 break; 5132 r = 0; 5133 break; 5134 } 5135 case KVM_SET_DEBUGREGS: { 5136 struct kvm_debugregs dbgregs; 5137 5138 r = -EFAULT; 5139 if (copy_from_user(&dbgregs, argp, 5140 sizeof(struct kvm_debugregs))) 5141 break; 5142 5143 r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs); 5144 break; 5145 } 5146 case KVM_GET_XSAVE: { 5147 u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL_ACCOUNT); 5148 r = -ENOMEM; 5149 if (!u.xsave) 5150 break; 5151 5152 kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave); 5153 5154 r = -EFAULT; 5155 if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave))) 5156 break; 5157 r = 0; 5158 break; 5159 } 5160 case KVM_SET_XSAVE: { 5161 u.xsave = memdup_user(argp, sizeof(*u.xsave)); 5162 if (IS_ERR(u.xsave)) { 5163 r = PTR_ERR(u.xsave); 5164 goto out_nofree; 5165 } 5166 5167 r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave); 5168 break; 5169 } 5170 case KVM_GET_XCRS: { 5171 u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL_ACCOUNT); 5172 r = -ENOMEM; 5173 if (!u.xcrs) 5174 break; 5175 5176 kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs); 5177 5178 r = -EFAULT; 5179 if (copy_to_user(argp, u.xcrs, 5180 sizeof(struct kvm_xcrs))) 5181 break; 5182 r = 0; 5183 break; 5184 } 5185 case KVM_SET_XCRS: { 5186 u.xcrs = memdup_user(argp, sizeof(*u.xcrs)); 5187 if (IS_ERR(u.xcrs)) { 5188 r = PTR_ERR(u.xcrs); 5189 goto out_nofree; 5190 } 5191 5192 r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs); 5193 break; 5194 } 5195 case KVM_SET_TSC_KHZ: { 5196 u32 user_tsc_khz; 5197 5198 r = -EINVAL; 5199 user_tsc_khz = (u32)arg; 5200 5201 if (kvm_has_tsc_control && 5202 user_tsc_khz >= kvm_max_guest_tsc_khz) 5203 goto out; 5204 5205 if (user_tsc_khz == 0) 5206 user_tsc_khz = tsc_khz; 5207 5208 if (!kvm_set_tsc_khz(vcpu, user_tsc_khz)) 5209 r = 0; 5210 5211 goto out; 5212 } 5213 case KVM_GET_TSC_KHZ: { 5214 r = vcpu->arch.virtual_tsc_khz; 5215 goto out; 5216 } 5217 case KVM_KVMCLOCK_CTRL: { 5218 r = kvm_set_guest_paused(vcpu); 5219 goto out; 5220 } 5221 case KVM_ENABLE_CAP: { 5222 struct kvm_enable_cap cap; 5223 5224 r = -EFAULT; 5225 if (copy_from_user(&cap, argp, sizeof(cap))) 5226 goto out; 5227 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); 5228 break; 5229 } 5230 case KVM_GET_NESTED_STATE: { 5231 struct kvm_nested_state __user *user_kvm_nested_state = argp; 5232 u32 user_data_size; 5233 5234 r = -EINVAL; 5235 if (!kvm_x86_ops.nested_ops->get_state) 5236 break; 5237 5238 BUILD_BUG_ON(sizeof(user_data_size) != sizeof(user_kvm_nested_state->size)); 5239 r = -EFAULT; 5240 if (get_user(user_data_size, &user_kvm_nested_state->size)) 5241 break; 5242 5243 r = kvm_x86_ops.nested_ops->get_state(vcpu, user_kvm_nested_state, 5244 user_data_size); 5245 if (r < 0) 5246 break; 5247 5248 if (r > user_data_size) { 5249 if (put_user(r, &user_kvm_nested_state->size)) 5250 r = -EFAULT; 5251 else 5252 r = -E2BIG; 5253 break; 5254 } 5255 5256 r = 0; 5257 break; 5258 } 5259 case KVM_SET_NESTED_STATE: { 5260 struct kvm_nested_state __user *user_kvm_nested_state = argp; 5261 struct kvm_nested_state kvm_state; 5262 int idx; 5263 5264 r = -EINVAL; 5265 if (!kvm_x86_ops.nested_ops->set_state) 5266 break; 5267 5268 r = -EFAULT; 5269 if (copy_from_user(&kvm_state, user_kvm_nested_state, sizeof(kvm_state))) 5270 break; 5271 5272 r = -EINVAL; 5273 if (kvm_state.size < sizeof(kvm_state)) 5274 break; 5275 5276 if (kvm_state.flags & 5277 ~(KVM_STATE_NESTED_RUN_PENDING | KVM_STATE_NESTED_GUEST_MODE 5278 | KVM_STATE_NESTED_EVMCS | KVM_STATE_NESTED_MTF_PENDING 5279 | KVM_STATE_NESTED_GIF_SET)) 5280 break; 5281 5282 /* nested_run_pending implies guest_mode. */ 5283 if ((kvm_state.flags & KVM_STATE_NESTED_RUN_PENDING) 5284 && !(kvm_state.flags & KVM_STATE_NESTED_GUEST_MODE)) 5285 break; 5286 5287 idx = srcu_read_lock(&vcpu->kvm->srcu); 5288 r = kvm_x86_ops.nested_ops->set_state(vcpu, user_kvm_nested_state, &kvm_state); 5289 srcu_read_unlock(&vcpu->kvm->srcu, idx); 5290 break; 5291 } 5292 case KVM_GET_SUPPORTED_HV_CPUID: 5293 r = kvm_ioctl_get_supported_hv_cpuid(vcpu, argp); 5294 break; 5295 #ifdef CONFIG_KVM_XEN 5296 case KVM_XEN_VCPU_GET_ATTR: { 5297 struct kvm_xen_vcpu_attr xva; 5298 5299 r = -EFAULT; 5300 if (copy_from_user(&xva, argp, sizeof(xva))) 5301 goto out; 5302 r = kvm_xen_vcpu_get_attr(vcpu, &xva); 5303 if (!r && copy_to_user(argp, &xva, sizeof(xva))) 5304 r = -EFAULT; 5305 break; 5306 } 5307 case KVM_XEN_VCPU_SET_ATTR: { 5308 struct kvm_xen_vcpu_attr xva; 5309 5310 r = -EFAULT; 5311 if (copy_from_user(&xva, argp, sizeof(xva))) 5312 goto out; 5313 r = kvm_xen_vcpu_set_attr(vcpu, &xva); 5314 break; 5315 } 5316 #endif 5317 case KVM_GET_SREGS2: { 5318 u.sregs2 = kzalloc(sizeof(struct kvm_sregs2), GFP_KERNEL); 5319 r = -ENOMEM; 5320 if (!u.sregs2) 5321 goto out; 5322 __get_sregs2(vcpu, u.sregs2); 5323 r = -EFAULT; 5324 if (copy_to_user(argp, u.sregs2, sizeof(struct kvm_sregs2))) 5325 goto out; 5326 r = 0; 5327 break; 5328 } 5329 case KVM_SET_SREGS2: { 5330 u.sregs2 = memdup_user(argp, sizeof(struct kvm_sregs2)); 5331 if (IS_ERR(u.sregs2)) { 5332 r = PTR_ERR(u.sregs2); 5333 u.sregs2 = NULL; 5334 goto out; 5335 } 5336 r = __set_sregs2(vcpu, u.sregs2); 5337 break; 5338 } 5339 default: 5340 r = -EINVAL; 5341 } 5342 out: 5343 kfree(u.buffer); 5344 out_nofree: 5345 vcpu_put(vcpu); 5346 return r; 5347 } 5348 5349 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) 5350 { 5351 return VM_FAULT_SIGBUS; 5352 } 5353 5354 static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr) 5355 { 5356 int ret; 5357 5358 if (addr > (unsigned int)(-3 * PAGE_SIZE)) 5359 return -EINVAL; 5360 ret = static_call(kvm_x86_set_tss_addr)(kvm, addr); 5361 return ret; 5362 } 5363 5364 static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm, 5365 u64 ident_addr) 5366 { 5367 return static_call(kvm_x86_set_identity_map_addr)(kvm, ident_addr); 5368 } 5369 5370 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, 5371 unsigned long kvm_nr_mmu_pages) 5372 { 5373 if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES) 5374 return -EINVAL; 5375 5376 mutex_lock(&kvm->slots_lock); 5377 5378 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages); 5379 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; 5380 5381 mutex_unlock(&kvm->slots_lock); 5382 return 0; 5383 } 5384 5385 static unsigned long kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm) 5386 { 5387 return kvm->arch.n_max_mmu_pages; 5388 } 5389 5390 static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) 5391 { 5392 struct kvm_pic *pic = kvm->arch.vpic; 5393 int r; 5394 5395 r = 0; 5396 switch (chip->chip_id) { 5397 case KVM_IRQCHIP_PIC_MASTER: 5398 memcpy(&chip->chip.pic, &pic->pics[0], 5399 sizeof(struct kvm_pic_state)); 5400 break; 5401 case KVM_IRQCHIP_PIC_SLAVE: 5402 memcpy(&chip->chip.pic, &pic->pics[1], 5403 sizeof(struct kvm_pic_state)); 5404 break; 5405 case KVM_IRQCHIP_IOAPIC: 5406 kvm_get_ioapic(kvm, &chip->chip.ioapic); 5407 break; 5408 default: 5409 r = -EINVAL; 5410 break; 5411 } 5412 return r; 5413 } 5414 5415 static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) 5416 { 5417 struct kvm_pic *pic = kvm->arch.vpic; 5418 int r; 5419 5420 r = 0; 5421 switch (chip->chip_id) { 5422 case KVM_IRQCHIP_PIC_MASTER: 5423 spin_lock(&pic->lock); 5424 memcpy(&pic->pics[0], &chip->chip.pic, 5425 sizeof(struct kvm_pic_state)); 5426 spin_unlock(&pic->lock); 5427 break; 5428 case KVM_IRQCHIP_PIC_SLAVE: 5429 spin_lock(&pic->lock); 5430 memcpy(&pic->pics[1], &chip->chip.pic, 5431 sizeof(struct kvm_pic_state)); 5432 spin_unlock(&pic->lock); 5433 break; 5434 case KVM_IRQCHIP_IOAPIC: 5435 kvm_set_ioapic(kvm, &chip->chip.ioapic); 5436 break; 5437 default: 5438 r = -EINVAL; 5439 break; 5440 } 5441 kvm_pic_update_irq(pic); 5442 return r; 5443 } 5444 5445 static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps) 5446 { 5447 struct kvm_kpit_state *kps = &kvm->arch.vpit->pit_state; 5448 5449 BUILD_BUG_ON(sizeof(*ps) != sizeof(kps->channels)); 5450 5451 mutex_lock(&kps->lock); 5452 memcpy(ps, &kps->channels, sizeof(*ps)); 5453 mutex_unlock(&kps->lock); 5454 return 0; 5455 } 5456 5457 static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps) 5458 { 5459 int i; 5460 struct kvm_pit *pit = kvm->arch.vpit; 5461 5462 mutex_lock(&pit->pit_state.lock); 5463 memcpy(&pit->pit_state.channels, ps, sizeof(*ps)); 5464 for (i = 0; i < 3; i++) 5465 kvm_pit_load_count(pit, i, ps->channels[i].count, 0); 5466 mutex_unlock(&pit->pit_state.lock); 5467 return 0; 5468 } 5469 5470 static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) 5471 { 5472 mutex_lock(&kvm->arch.vpit->pit_state.lock); 5473 memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels, 5474 sizeof(ps->channels)); 5475 ps->flags = kvm->arch.vpit->pit_state.flags; 5476 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 5477 memset(&ps->reserved, 0, sizeof(ps->reserved)); 5478 return 0; 5479 } 5480 5481 static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) 5482 { 5483 int start = 0; 5484 int i; 5485 u32 prev_legacy, cur_legacy; 5486 struct kvm_pit *pit = kvm->arch.vpit; 5487 5488 mutex_lock(&pit->pit_state.lock); 5489 prev_legacy = pit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY; 5490 cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY; 5491 if (!prev_legacy && cur_legacy) 5492 start = 1; 5493 memcpy(&pit->pit_state.channels, &ps->channels, 5494 sizeof(pit->pit_state.channels)); 5495 pit->pit_state.flags = ps->flags; 5496 for (i = 0; i < 3; i++) 5497 kvm_pit_load_count(pit, i, pit->pit_state.channels[i].count, 5498 start && i == 0); 5499 mutex_unlock(&pit->pit_state.lock); 5500 return 0; 5501 } 5502 5503 static int kvm_vm_ioctl_reinject(struct kvm *kvm, 5504 struct kvm_reinject_control *control) 5505 { 5506 struct kvm_pit *pit = kvm->arch.vpit; 5507 5508 /* pit->pit_state.lock was overloaded to prevent userspace from getting 5509 * an inconsistent state after running multiple KVM_REINJECT_CONTROL 5510 * ioctls in parallel. Use a separate lock if that ioctl isn't rare. 5511 */ 5512 mutex_lock(&pit->pit_state.lock); 5513 kvm_pit_set_reinject(pit, control->pit_reinject); 5514 mutex_unlock(&pit->pit_state.lock); 5515 5516 return 0; 5517 } 5518 5519 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) 5520 { 5521 5522 /* 5523 * Flush all CPUs' dirty log buffers to the dirty_bitmap. Called 5524 * before reporting dirty_bitmap to userspace. KVM flushes the buffers 5525 * on all VM-Exits, thus we only need to kick running vCPUs to force a 5526 * VM-Exit. 5527 */ 5528 struct kvm_vcpu *vcpu; 5529 int i; 5530 5531 kvm_for_each_vcpu(i, vcpu, kvm) 5532 kvm_vcpu_kick(vcpu); 5533 } 5534 5535 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, 5536 bool line_status) 5537 { 5538 if (!irqchip_in_kernel(kvm)) 5539 return -ENXIO; 5540 5541 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 5542 irq_event->irq, irq_event->level, 5543 line_status); 5544 return 0; 5545 } 5546 5547 int kvm_vm_ioctl_enable_cap(struct kvm *kvm, 5548 struct kvm_enable_cap *cap) 5549 { 5550 int r; 5551 5552 if (cap->flags) 5553 return -EINVAL; 5554 5555 switch (cap->cap) { 5556 case KVM_CAP_DISABLE_QUIRKS: 5557 kvm->arch.disabled_quirks = cap->args[0]; 5558 r = 0; 5559 break; 5560 case KVM_CAP_SPLIT_IRQCHIP: { 5561 mutex_lock(&kvm->lock); 5562 r = -EINVAL; 5563 if (cap->args[0] > MAX_NR_RESERVED_IOAPIC_PINS) 5564 goto split_irqchip_unlock; 5565 r = -EEXIST; 5566 if (irqchip_in_kernel(kvm)) 5567 goto split_irqchip_unlock; 5568 if (kvm->created_vcpus) 5569 goto split_irqchip_unlock; 5570 r = kvm_setup_empty_irq_routing(kvm); 5571 if (r) 5572 goto split_irqchip_unlock; 5573 /* Pairs with irqchip_in_kernel. */ 5574 smp_wmb(); 5575 kvm->arch.irqchip_mode = KVM_IRQCHIP_SPLIT; 5576 kvm->arch.nr_reserved_ioapic_pins = cap->args[0]; 5577 r = 0; 5578 split_irqchip_unlock: 5579 mutex_unlock(&kvm->lock); 5580 break; 5581 } 5582 case KVM_CAP_X2APIC_API: 5583 r = -EINVAL; 5584 if (cap->args[0] & ~KVM_X2APIC_API_VALID_FLAGS) 5585 break; 5586 5587 if (cap->args[0] & KVM_X2APIC_API_USE_32BIT_IDS) 5588 kvm->arch.x2apic_format = true; 5589 if (cap->args[0] & KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK) 5590 kvm->arch.x2apic_broadcast_quirk_disabled = true; 5591 5592 r = 0; 5593 break; 5594 case KVM_CAP_X86_DISABLE_EXITS: 5595 r = -EINVAL; 5596 if (cap->args[0] & ~KVM_X86_DISABLE_VALID_EXITS) 5597 break; 5598 5599 if ((cap->args[0] & KVM_X86_DISABLE_EXITS_MWAIT) && 5600 kvm_can_mwait_in_guest()) 5601 kvm->arch.mwait_in_guest = true; 5602 if (cap->args[0] & KVM_X86_DISABLE_EXITS_HLT) 5603 kvm->arch.hlt_in_guest = true; 5604 if (cap->args[0] & KVM_X86_DISABLE_EXITS_PAUSE) 5605 kvm->arch.pause_in_guest = true; 5606 if (cap->args[0] & KVM_X86_DISABLE_EXITS_CSTATE) 5607 kvm->arch.cstate_in_guest = true; 5608 r = 0; 5609 break; 5610 case KVM_CAP_MSR_PLATFORM_INFO: 5611 kvm->arch.guest_can_read_msr_platform_info = cap->args[0]; 5612 r = 0; 5613 break; 5614 case KVM_CAP_EXCEPTION_PAYLOAD: 5615 kvm->arch.exception_payload_enabled = cap->args[0]; 5616 r = 0; 5617 break; 5618 case KVM_CAP_X86_USER_SPACE_MSR: 5619 kvm->arch.user_space_msr_mask = cap->args[0]; 5620 r = 0; 5621 break; 5622 case KVM_CAP_X86_BUS_LOCK_EXIT: 5623 r = -EINVAL; 5624 if (cap->args[0] & ~KVM_BUS_LOCK_DETECTION_VALID_MODE) 5625 break; 5626 5627 if ((cap->args[0] & KVM_BUS_LOCK_DETECTION_OFF) && 5628 (cap->args[0] & KVM_BUS_LOCK_DETECTION_EXIT)) 5629 break; 5630 5631 if (kvm_has_bus_lock_exit && 5632 cap->args[0] & KVM_BUS_LOCK_DETECTION_EXIT) 5633 kvm->arch.bus_lock_detection_enabled = true; 5634 r = 0; 5635 break; 5636 #ifdef CONFIG_X86_SGX_KVM 5637 case KVM_CAP_SGX_ATTRIBUTE: { 5638 unsigned long allowed_attributes = 0; 5639 5640 r = sgx_set_attribute(&allowed_attributes, cap->args[0]); 5641 if (r) 5642 break; 5643 5644 /* KVM only supports the PROVISIONKEY privileged attribute. */ 5645 if ((allowed_attributes & SGX_ATTR_PROVISIONKEY) && 5646 !(allowed_attributes & ~SGX_ATTR_PROVISIONKEY)) 5647 kvm->arch.sgx_provisioning_allowed = true; 5648 else 5649 r = -EINVAL; 5650 break; 5651 } 5652 #endif 5653 case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM: 5654 r = -EINVAL; 5655 if (kvm_x86_ops.vm_copy_enc_context_from) 5656 r = kvm_x86_ops.vm_copy_enc_context_from(kvm, cap->args[0]); 5657 return r; 5658 case KVM_CAP_EXIT_HYPERCALL: 5659 if (cap->args[0] & ~KVM_EXIT_HYPERCALL_VALID_MASK) { 5660 r = -EINVAL; 5661 break; 5662 } 5663 kvm->arch.hypercall_exit_enabled = cap->args[0]; 5664 r = 0; 5665 break; 5666 case KVM_CAP_EXIT_ON_EMULATION_FAILURE: 5667 r = -EINVAL; 5668 if (cap->args[0] & ~1) 5669 break; 5670 kvm->arch.exit_on_emulation_error = cap->args[0]; 5671 r = 0; 5672 break; 5673 default: 5674 r = -EINVAL; 5675 break; 5676 } 5677 return r; 5678 } 5679 5680 static struct kvm_x86_msr_filter *kvm_alloc_msr_filter(bool default_allow) 5681 { 5682 struct kvm_x86_msr_filter *msr_filter; 5683 5684 msr_filter = kzalloc(sizeof(*msr_filter), GFP_KERNEL_ACCOUNT); 5685 if (!msr_filter) 5686 return NULL; 5687 5688 msr_filter->default_allow = default_allow; 5689 return msr_filter; 5690 } 5691 5692 static void kvm_free_msr_filter(struct kvm_x86_msr_filter *msr_filter) 5693 { 5694 u32 i; 5695 5696 if (!msr_filter) 5697 return; 5698 5699 for (i = 0; i < msr_filter->count; i++) 5700 kfree(msr_filter->ranges[i].bitmap); 5701 5702 kfree(msr_filter); 5703 } 5704 5705 static int kvm_add_msr_filter(struct kvm_x86_msr_filter *msr_filter, 5706 struct kvm_msr_filter_range *user_range) 5707 { 5708 unsigned long *bitmap = NULL; 5709 size_t bitmap_size; 5710 5711 if (!user_range->nmsrs) 5712 return 0; 5713 5714 if (user_range->flags & ~(KVM_MSR_FILTER_READ | KVM_MSR_FILTER_WRITE)) 5715 return -EINVAL; 5716 5717 if (!user_range->flags) 5718 return -EINVAL; 5719 5720 bitmap_size = BITS_TO_LONGS(user_range->nmsrs) * sizeof(long); 5721 if (!bitmap_size || bitmap_size > KVM_MSR_FILTER_MAX_BITMAP_SIZE) 5722 return -EINVAL; 5723 5724 bitmap = memdup_user((__user u8*)user_range->bitmap, bitmap_size); 5725 if (IS_ERR(bitmap)) 5726 return PTR_ERR(bitmap); 5727 5728 msr_filter->ranges[msr_filter->count] = (struct msr_bitmap_range) { 5729 .flags = user_range->flags, 5730 .base = user_range->base, 5731 .nmsrs = user_range->nmsrs, 5732 .bitmap = bitmap, 5733 }; 5734 5735 msr_filter->count++; 5736 return 0; 5737 } 5738 5739 static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp) 5740 { 5741 struct kvm_msr_filter __user *user_msr_filter = argp; 5742 struct kvm_x86_msr_filter *new_filter, *old_filter; 5743 struct kvm_msr_filter filter; 5744 bool default_allow; 5745 bool empty = true; 5746 int r = 0; 5747 u32 i; 5748 5749 if (copy_from_user(&filter, user_msr_filter, sizeof(filter))) 5750 return -EFAULT; 5751 5752 for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) 5753 empty &= !filter.ranges[i].nmsrs; 5754 5755 default_allow = !(filter.flags & KVM_MSR_FILTER_DEFAULT_DENY); 5756 if (empty && !default_allow) 5757 return -EINVAL; 5758 5759 new_filter = kvm_alloc_msr_filter(default_allow); 5760 if (!new_filter) 5761 return -ENOMEM; 5762 5763 for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) { 5764 r = kvm_add_msr_filter(new_filter, &filter.ranges[i]); 5765 if (r) { 5766 kvm_free_msr_filter(new_filter); 5767 return r; 5768 } 5769 } 5770 5771 mutex_lock(&kvm->lock); 5772 5773 /* The per-VM filter is protected by kvm->lock... */ 5774 old_filter = srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1); 5775 5776 rcu_assign_pointer(kvm->arch.msr_filter, new_filter); 5777 synchronize_srcu(&kvm->srcu); 5778 5779 kvm_free_msr_filter(old_filter); 5780 5781 kvm_make_all_cpus_request(kvm, KVM_REQ_MSR_FILTER_CHANGED); 5782 mutex_unlock(&kvm->lock); 5783 5784 return 0; 5785 } 5786 5787 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER 5788 static int kvm_arch_suspend_notifier(struct kvm *kvm) 5789 { 5790 struct kvm_vcpu *vcpu; 5791 int i, ret = 0; 5792 5793 mutex_lock(&kvm->lock); 5794 kvm_for_each_vcpu(i, vcpu, kvm) { 5795 if (!vcpu->arch.pv_time_enabled) 5796 continue; 5797 5798 ret = kvm_set_guest_paused(vcpu); 5799 if (ret) { 5800 kvm_err("Failed to pause guest VCPU%d: %d\n", 5801 vcpu->vcpu_id, ret); 5802 break; 5803 } 5804 } 5805 mutex_unlock(&kvm->lock); 5806 5807 return ret ? NOTIFY_BAD : NOTIFY_DONE; 5808 } 5809 5810 int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state) 5811 { 5812 switch (state) { 5813 case PM_HIBERNATION_PREPARE: 5814 case PM_SUSPEND_PREPARE: 5815 return kvm_arch_suspend_notifier(kvm); 5816 } 5817 5818 return NOTIFY_DONE; 5819 } 5820 #endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */ 5821 5822 long kvm_arch_vm_ioctl(struct file *filp, 5823 unsigned int ioctl, unsigned long arg) 5824 { 5825 struct kvm *kvm = filp->private_data; 5826 void __user *argp = (void __user *)arg; 5827 int r = -ENOTTY; 5828 /* 5829 * This union makes it completely explicit to gcc-3.x 5830 * that these two variables' stack usage should be 5831 * combined, not added together. 5832 */ 5833 union { 5834 struct kvm_pit_state ps; 5835 struct kvm_pit_state2 ps2; 5836 struct kvm_pit_config pit_config; 5837 } u; 5838 5839 switch (ioctl) { 5840 case KVM_SET_TSS_ADDR: 5841 r = kvm_vm_ioctl_set_tss_addr(kvm, arg); 5842 break; 5843 case KVM_SET_IDENTITY_MAP_ADDR: { 5844 u64 ident_addr; 5845 5846 mutex_lock(&kvm->lock); 5847 r = -EINVAL; 5848 if (kvm->created_vcpus) 5849 goto set_identity_unlock; 5850 r = -EFAULT; 5851 if (copy_from_user(&ident_addr, argp, sizeof(ident_addr))) 5852 goto set_identity_unlock; 5853 r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr); 5854 set_identity_unlock: 5855 mutex_unlock(&kvm->lock); 5856 break; 5857 } 5858 case KVM_SET_NR_MMU_PAGES: 5859 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg); 5860 break; 5861 case KVM_GET_NR_MMU_PAGES: 5862 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm); 5863 break; 5864 case KVM_CREATE_IRQCHIP: { 5865 mutex_lock(&kvm->lock); 5866 5867 r = -EEXIST; 5868 if (irqchip_in_kernel(kvm)) 5869 goto create_irqchip_unlock; 5870 5871 r = -EINVAL; 5872 if (kvm->created_vcpus) 5873 goto create_irqchip_unlock; 5874 5875 r = kvm_pic_init(kvm); 5876 if (r) 5877 goto create_irqchip_unlock; 5878 5879 r = kvm_ioapic_init(kvm); 5880 if (r) { 5881 kvm_pic_destroy(kvm); 5882 goto create_irqchip_unlock; 5883 } 5884 5885 r = kvm_setup_default_irq_routing(kvm); 5886 if (r) { 5887 kvm_ioapic_destroy(kvm); 5888 kvm_pic_destroy(kvm); 5889 goto create_irqchip_unlock; 5890 } 5891 /* Write kvm->irq_routing before enabling irqchip_in_kernel. */ 5892 smp_wmb(); 5893 kvm->arch.irqchip_mode = KVM_IRQCHIP_KERNEL; 5894 create_irqchip_unlock: 5895 mutex_unlock(&kvm->lock); 5896 break; 5897 } 5898 case KVM_CREATE_PIT: 5899 u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY; 5900 goto create_pit; 5901 case KVM_CREATE_PIT2: 5902 r = -EFAULT; 5903 if (copy_from_user(&u.pit_config, argp, 5904 sizeof(struct kvm_pit_config))) 5905 goto out; 5906 create_pit: 5907 mutex_lock(&kvm->lock); 5908 r = -EEXIST; 5909 if (kvm->arch.vpit) 5910 goto create_pit_unlock; 5911 r = -ENOMEM; 5912 kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags); 5913 if (kvm->arch.vpit) 5914 r = 0; 5915 create_pit_unlock: 5916 mutex_unlock(&kvm->lock); 5917 break; 5918 case KVM_GET_IRQCHIP: { 5919 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ 5920 struct kvm_irqchip *chip; 5921 5922 chip = memdup_user(argp, sizeof(*chip)); 5923 if (IS_ERR(chip)) { 5924 r = PTR_ERR(chip); 5925 goto out; 5926 } 5927 5928 r = -ENXIO; 5929 if (!irqchip_kernel(kvm)) 5930 goto get_irqchip_out; 5931 r = kvm_vm_ioctl_get_irqchip(kvm, chip); 5932 if (r) 5933 goto get_irqchip_out; 5934 r = -EFAULT; 5935 if (copy_to_user(argp, chip, sizeof(*chip))) 5936 goto get_irqchip_out; 5937 r = 0; 5938 get_irqchip_out: 5939 kfree(chip); 5940 break; 5941 } 5942 case KVM_SET_IRQCHIP: { 5943 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ 5944 struct kvm_irqchip *chip; 5945 5946 chip = memdup_user(argp, sizeof(*chip)); 5947 if (IS_ERR(chip)) { 5948 r = PTR_ERR(chip); 5949 goto out; 5950 } 5951 5952 r = -ENXIO; 5953 if (!irqchip_kernel(kvm)) 5954 goto set_irqchip_out; 5955 r = kvm_vm_ioctl_set_irqchip(kvm, chip); 5956 set_irqchip_out: 5957 kfree(chip); 5958 break; 5959 } 5960 case KVM_GET_PIT: { 5961 r = -EFAULT; 5962 if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state))) 5963 goto out; 5964 r = -ENXIO; 5965 if (!kvm->arch.vpit) 5966 goto out; 5967 r = kvm_vm_ioctl_get_pit(kvm, &u.ps); 5968 if (r) 5969 goto out; 5970 r = -EFAULT; 5971 if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state))) 5972 goto out; 5973 r = 0; 5974 break; 5975 } 5976 case KVM_SET_PIT: { 5977 r = -EFAULT; 5978 if (copy_from_user(&u.ps, argp, sizeof(u.ps))) 5979 goto out; 5980 mutex_lock(&kvm->lock); 5981 r = -ENXIO; 5982 if (!kvm->arch.vpit) 5983 goto set_pit_out; 5984 r = kvm_vm_ioctl_set_pit(kvm, &u.ps); 5985 set_pit_out: 5986 mutex_unlock(&kvm->lock); 5987 break; 5988 } 5989 case KVM_GET_PIT2: { 5990 r = -ENXIO; 5991 if (!kvm->arch.vpit) 5992 goto out; 5993 r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2); 5994 if (r) 5995 goto out; 5996 r = -EFAULT; 5997 if (copy_to_user(argp, &u.ps2, sizeof(u.ps2))) 5998 goto out; 5999 r = 0; 6000 break; 6001 } 6002 case KVM_SET_PIT2: { 6003 r = -EFAULT; 6004 if (copy_from_user(&u.ps2, argp, sizeof(u.ps2))) 6005 goto out; 6006 mutex_lock(&kvm->lock); 6007 r = -ENXIO; 6008 if (!kvm->arch.vpit) 6009 goto set_pit2_out; 6010 r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2); 6011 set_pit2_out: 6012 mutex_unlock(&kvm->lock); 6013 break; 6014 } 6015 case KVM_REINJECT_CONTROL: { 6016 struct kvm_reinject_control control; 6017 r = -EFAULT; 6018 if (copy_from_user(&control, argp, sizeof(control))) 6019 goto out; 6020 r = -ENXIO; 6021 if (!kvm->arch.vpit) 6022 goto out; 6023 r = kvm_vm_ioctl_reinject(kvm, &control); 6024 break; 6025 } 6026 case KVM_SET_BOOT_CPU_ID: 6027 r = 0; 6028 mutex_lock(&kvm->lock); 6029 if (kvm->created_vcpus) 6030 r = -EBUSY; 6031 else 6032 kvm->arch.bsp_vcpu_id = arg; 6033 mutex_unlock(&kvm->lock); 6034 break; 6035 #ifdef CONFIG_KVM_XEN 6036 case KVM_XEN_HVM_CONFIG: { 6037 struct kvm_xen_hvm_config xhc; 6038 r = -EFAULT; 6039 if (copy_from_user(&xhc, argp, sizeof(xhc))) 6040 goto out; 6041 r = kvm_xen_hvm_config(kvm, &xhc); 6042 break; 6043 } 6044 case KVM_XEN_HVM_GET_ATTR: { 6045 struct kvm_xen_hvm_attr xha; 6046 6047 r = -EFAULT; 6048 if (copy_from_user(&xha, argp, sizeof(xha))) 6049 goto out; 6050 r = kvm_xen_hvm_get_attr(kvm, &xha); 6051 if (!r && copy_to_user(argp, &xha, sizeof(xha))) 6052 r = -EFAULT; 6053 break; 6054 } 6055 case KVM_XEN_HVM_SET_ATTR: { 6056 struct kvm_xen_hvm_attr xha; 6057 6058 r = -EFAULT; 6059 if (copy_from_user(&xha, argp, sizeof(xha))) 6060 goto out; 6061 r = kvm_xen_hvm_set_attr(kvm, &xha); 6062 break; 6063 } 6064 #endif 6065 case KVM_SET_CLOCK: { 6066 struct kvm_arch *ka = &kvm->arch; 6067 struct kvm_clock_data user_ns; 6068 u64 now_ns; 6069 6070 r = -EFAULT; 6071 if (copy_from_user(&user_ns, argp, sizeof(user_ns))) 6072 goto out; 6073 6074 r = -EINVAL; 6075 if (user_ns.flags) 6076 goto out; 6077 6078 r = 0; 6079 /* 6080 * TODO: userspace has to take care of races with VCPU_RUN, so 6081 * kvm_gen_update_masterclock() can be cut down to locked 6082 * pvclock_update_vm_gtod_copy(). 6083 */ 6084 kvm_gen_update_masterclock(kvm); 6085 6086 /* 6087 * This pairs with kvm_guest_time_update(): when masterclock is 6088 * in use, we use master_kernel_ns + kvmclock_offset to set 6089 * unsigned 'system_time' so if we use get_kvmclock_ns() (which 6090 * is slightly ahead) here we risk going negative on unsigned 6091 * 'system_time' when 'user_ns.clock' is very small. 6092 */ 6093 spin_lock_irq(&ka->pvclock_gtod_sync_lock); 6094 if (kvm->arch.use_master_clock) 6095 now_ns = ka->master_kernel_ns; 6096 else 6097 now_ns = get_kvmclock_base_ns(); 6098 ka->kvmclock_offset = user_ns.clock - now_ns; 6099 spin_unlock_irq(&ka->pvclock_gtod_sync_lock); 6100 6101 kvm_make_all_cpus_request(kvm, KVM_REQ_CLOCK_UPDATE); 6102 break; 6103 } 6104 case KVM_GET_CLOCK: { 6105 struct kvm_clock_data user_ns; 6106 u64 now_ns; 6107 6108 now_ns = get_kvmclock_ns(kvm); 6109 user_ns.clock = now_ns; 6110 user_ns.flags = kvm->arch.use_master_clock ? KVM_CLOCK_TSC_STABLE : 0; 6111 memset(&user_ns.pad, 0, sizeof(user_ns.pad)); 6112 6113 r = -EFAULT; 6114 if (copy_to_user(argp, &user_ns, sizeof(user_ns))) 6115 goto out; 6116 r = 0; 6117 break; 6118 } 6119 case KVM_MEMORY_ENCRYPT_OP: { 6120 r = -ENOTTY; 6121 if (kvm_x86_ops.mem_enc_op) 6122 r = static_call(kvm_x86_mem_enc_op)(kvm, argp); 6123 break; 6124 } 6125 case KVM_MEMORY_ENCRYPT_REG_REGION: { 6126 struct kvm_enc_region region; 6127 6128 r = -EFAULT; 6129 if (copy_from_user(®ion, argp, sizeof(region))) 6130 goto out; 6131 6132 r = -ENOTTY; 6133 if (kvm_x86_ops.mem_enc_reg_region) 6134 r = static_call(kvm_x86_mem_enc_reg_region)(kvm, ®ion); 6135 break; 6136 } 6137 case KVM_MEMORY_ENCRYPT_UNREG_REGION: { 6138 struct kvm_enc_region region; 6139 6140 r = -EFAULT; 6141 if (copy_from_user(®ion, argp, sizeof(region))) 6142 goto out; 6143 6144 r = -ENOTTY; 6145 if (kvm_x86_ops.mem_enc_unreg_region) 6146 r = static_call(kvm_x86_mem_enc_unreg_region)(kvm, ®ion); 6147 break; 6148 } 6149 case KVM_HYPERV_EVENTFD: { 6150 struct kvm_hyperv_eventfd hvevfd; 6151 6152 r = -EFAULT; 6153 if (copy_from_user(&hvevfd, argp, sizeof(hvevfd))) 6154 goto out; 6155 r = kvm_vm_ioctl_hv_eventfd(kvm, &hvevfd); 6156 break; 6157 } 6158 case KVM_SET_PMU_EVENT_FILTER: 6159 r = kvm_vm_ioctl_set_pmu_event_filter(kvm, argp); 6160 break; 6161 case KVM_X86_SET_MSR_FILTER: 6162 r = kvm_vm_ioctl_set_msr_filter(kvm, argp); 6163 break; 6164 default: 6165 r = -ENOTTY; 6166 } 6167 out: 6168 return r; 6169 } 6170 6171 static void kvm_init_msr_list(void) 6172 { 6173 struct x86_pmu_capability x86_pmu; 6174 u32 dummy[2]; 6175 unsigned i; 6176 6177 BUILD_BUG_ON_MSG(INTEL_PMC_MAX_FIXED != 4, 6178 "Please update the fixed PMCs in msrs_to_saved_all[]"); 6179 6180 perf_get_x86_pmu_capability(&x86_pmu); 6181 6182 num_msrs_to_save = 0; 6183 num_emulated_msrs = 0; 6184 num_msr_based_features = 0; 6185 6186 for (i = 0; i < ARRAY_SIZE(msrs_to_save_all); i++) { 6187 if (rdmsr_safe(msrs_to_save_all[i], &dummy[0], &dummy[1]) < 0) 6188 continue; 6189 6190 /* 6191 * Even MSRs that are valid in the host may not be exposed 6192 * to the guests in some cases. 6193 */ 6194 switch (msrs_to_save_all[i]) { 6195 case MSR_IA32_BNDCFGS: 6196 if (!kvm_mpx_supported()) 6197 continue; 6198 break; 6199 case MSR_TSC_AUX: 6200 if (!kvm_cpu_cap_has(X86_FEATURE_RDTSCP) && 6201 !kvm_cpu_cap_has(X86_FEATURE_RDPID)) 6202 continue; 6203 break; 6204 case MSR_IA32_UMWAIT_CONTROL: 6205 if (!kvm_cpu_cap_has(X86_FEATURE_WAITPKG)) 6206 continue; 6207 break; 6208 case MSR_IA32_RTIT_CTL: 6209 case MSR_IA32_RTIT_STATUS: 6210 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT)) 6211 continue; 6212 break; 6213 case MSR_IA32_RTIT_CR3_MATCH: 6214 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) || 6215 !intel_pt_validate_hw_cap(PT_CAP_cr3_filtering)) 6216 continue; 6217 break; 6218 case MSR_IA32_RTIT_OUTPUT_BASE: 6219 case MSR_IA32_RTIT_OUTPUT_MASK: 6220 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) || 6221 (!intel_pt_validate_hw_cap(PT_CAP_topa_output) && 6222 !intel_pt_validate_hw_cap(PT_CAP_single_range_output))) 6223 continue; 6224 break; 6225 case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: 6226 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) || 6227 msrs_to_save_all[i] - MSR_IA32_RTIT_ADDR0_A >= 6228 intel_pt_validate_hw_cap(PT_CAP_num_address_ranges) * 2) 6229 continue; 6230 break; 6231 case MSR_ARCH_PERFMON_PERFCTR0 ... MSR_ARCH_PERFMON_PERFCTR0 + 17: 6232 if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_PERFCTR0 >= 6233 min(INTEL_PMC_MAX_GENERIC, x86_pmu.num_counters_gp)) 6234 continue; 6235 break; 6236 case MSR_ARCH_PERFMON_EVENTSEL0 ... MSR_ARCH_PERFMON_EVENTSEL0 + 17: 6237 if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_EVENTSEL0 >= 6238 min(INTEL_PMC_MAX_GENERIC, x86_pmu.num_counters_gp)) 6239 continue; 6240 break; 6241 default: 6242 break; 6243 } 6244 6245 msrs_to_save[num_msrs_to_save++] = msrs_to_save_all[i]; 6246 } 6247 6248 for (i = 0; i < ARRAY_SIZE(emulated_msrs_all); i++) { 6249 if (!static_call(kvm_x86_has_emulated_msr)(NULL, emulated_msrs_all[i])) 6250 continue; 6251 6252 emulated_msrs[num_emulated_msrs++] = emulated_msrs_all[i]; 6253 } 6254 6255 for (i = 0; i < ARRAY_SIZE(msr_based_features_all); i++) { 6256 struct kvm_msr_entry msr; 6257 6258 msr.index = msr_based_features_all[i]; 6259 if (kvm_get_msr_feature(&msr)) 6260 continue; 6261 6262 msr_based_features[num_msr_based_features++] = msr_based_features_all[i]; 6263 } 6264 } 6265 6266 static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len, 6267 const void *v) 6268 { 6269 int handled = 0; 6270 int n; 6271 6272 do { 6273 n = min(len, 8); 6274 if (!(lapic_in_kernel(vcpu) && 6275 !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v)) 6276 && kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, n, v)) 6277 break; 6278 handled += n; 6279 addr += n; 6280 len -= n; 6281 v += n; 6282 } while (len); 6283 6284 return handled; 6285 } 6286 6287 static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v) 6288 { 6289 int handled = 0; 6290 int n; 6291 6292 do { 6293 n = min(len, 8); 6294 if (!(lapic_in_kernel(vcpu) && 6295 !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev, 6296 addr, n, v)) 6297 && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v)) 6298 break; 6299 trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, v); 6300 handled += n; 6301 addr += n; 6302 len -= n; 6303 v += n; 6304 } while (len); 6305 6306 return handled; 6307 } 6308 6309 static void kvm_set_segment(struct kvm_vcpu *vcpu, 6310 struct kvm_segment *var, int seg) 6311 { 6312 static_call(kvm_x86_set_segment)(vcpu, var, seg); 6313 } 6314 6315 void kvm_get_segment(struct kvm_vcpu *vcpu, 6316 struct kvm_segment *var, int seg) 6317 { 6318 static_call(kvm_x86_get_segment)(vcpu, var, seg); 6319 } 6320 6321 gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, 6322 struct x86_exception *exception) 6323 { 6324 gpa_t t_gpa; 6325 6326 BUG_ON(!mmu_is_nested(vcpu)); 6327 6328 /* NPT walks are always user-walks */ 6329 access |= PFERR_USER_MASK; 6330 t_gpa = vcpu->arch.mmu->gva_to_gpa(vcpu, gpa, access, exception); 6331 6332 return t_gpa; 6333 } 6334 6335 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, 6336 struct x86_exception *exception) 6337 { 6338 u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; 6339 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); 6340 } 6341 EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_read); 6342 6343 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, 6344 struct x86_exception *exception) 6345 { 6346 u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; 6347 access |= PFERR_FETCH_MASK; 6348 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); 6349 } 6350 6351 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, 6352 struct x86_exception *exception) 6353 { 6354 u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; 6355 access |= PFERR_WRITE_MASK; 6356 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); 6357 } 6358 EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_write); 6359 6360 /* uses this to access any guest's mapped memory without checking CPL */ 6361 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, 6362 struct x86_exception *exception) 6363 { 6364 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception); 6365 } 6366 6367 static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, 6368 struct kvm_vcpu *vcpu, u32 access, 6369 struct x86_exception *exception) 6370 { 6371 void *data = val; 6372 int r = X86EMUL_CONTINUE; 6373 6374 while (bytes) { 6375 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access, 6376 exception); 6377 unsigned offset = addr & (PAGE_SIZE-1); 6378 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset); 6379 int ret; 6380 6381 if (gpa == UNMAPPED_GVA) 6382 return X86EMUL_PROPAGATE_FAULT; 6383 ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, data, 6384 offset, toread); 6385 if (ret < 0) { 6386 r = X86EMUL_IO_NEEDED; 6387 goto out; 6388 } 6389 6390 bytes -= toread; 6391 data += toread; 6392 addr += toread; 6393 } 6394 out: 6395 return r; 6396 } 6397 6398 /* used for instruction fetching */ 6399 static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt, 6400 gva_t addr, void *val, unsigned int bytes, 6401 struct x86_exception *exception) 6402 { 6403 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 6404 u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; 6405 unsigned offset; 6406 int ret; 6407 6408 /* Inline kvm_read_guest_virt_helper for speed. */ 6409 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access|PFERR_FETCH_MASK, 6410 exception); 6411 if (unlikely(gpa == UNMAPPED_GVA)) 6412 return X86EMUL_PROPAGATE_FAULT; 6413 6414 offset = addr & (PAGE_SIZE-1); 6415 if (WARN_ON(offset + bytes > PAGE_SIZE)) 6416 bytes = (unsigned)PAGE_SIZE - offset; 6417 ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, val, 6418 offset, bytes); 6419 if (unlikely(ret < 0)) 6420 return X86EMUL_IO_NEEDED; 6421 6422 return X86EMUL_CONTINUE; 6423 } 6424 6425 int kvm_read_guest_virt(struct kvm_vcpu *vcpu, 6426 gva_t addr, void *val, unsigned int bytes, 6427 struct x86_exception *exception) 6428 { 6429 u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; 6430 6431 /* 6432 * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED 6433 * is returned, but our callers are not ready for that and they blindly 6434 * call kvm_inject_page_fault. Ensure that they at least do not leak 6435 * uninitialized kernel stack memory into cr2 and error code. 6436 */ 6437 memset(exception, 0, sizeof(*exception)); 6438 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, 6439 exception); 6440 } 6441 EXPORT_SYMBOL_GPL(kvm_read_guest_virt); 6442 6443 static int emulator_read_std(struct x86_emulate_ctxt *ctxt, 6444 gva_t addr, void *val, unsigned int bytes, 6445 struct x86_exception *exception, bool system) 6446 { 6447 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 6448 u32 access = 0; 6449 6450 if (!system && static_call(kvm_x86_get_cpl)(vcpu) == 3) 6451 access |= PFERR_USER_MASK; 6452 6453 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception); 6454 } 6455 6456 static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt, 6457 unsigned long addr, void *val, unsigned int bytes) 6458 { 6459 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 6460 int r = kvm_vcpu_read_guest(vcpu, addr, val, bytes); 6461 6462 return r < 0 ? X86EMUL_IO_NEEDED : X86EMUL_CONTINUE; 6463 } 6464 6465 static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, 6466 struct kvm_vcpu *vcpu, u32 access, 6467 struct x86_exception *exception) 6468 { 6469 void *data = val; 6470 int r = X86EMUL_CONTINUE; 6471 6472 while (bytes) { 6473 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, 6474 access, 6475 exception); 6476 unsigned offset = addr & (PAGE_SIZE-1); 6477 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset); 6478 int ret; 6479 6480 if (gpa == UNMAPPED_GVA) 6481 return X86EMUL_PROPAGATE_FAULT; 6482 ret = kvm_vcpu_write_guest(vcpu, gpa, data, towrite); 6483 if (ret < 0) { 6484 r = X86EMUL_IO_NEEDED; 6485 goto out; 6486 } 6487 6488 bytes -= towrite; 6489 data += towrite; 6490 addr += towrite; 6491 } 6492 out: 6493 return r; 6494 } 6495 6496 static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, 6497 unsigned int bytes, struct x86_exception *exception, 6498 bool system) 6499 { 6500 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 6501 u32 access = PFERR_WRITE_MASK; 6502 6503 if (!system && static_call(kvm_x86_get_cpl)(vcpu) == 3) 6504 access |= PFERR_USER_MASK; 6505 6506 return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, 6507 access, exception); 6508 } 6509 6510 int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val, 6511 unsigned int bytes, struct x86_exception *exception) 6512 { 6513 /* kvm_write_guest_virt_system can pull in tons of pages. */ 6514 vcpu->arch.l1tf_flush_l1d = true; 6515 6516 return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, 6517 PFERR_WRITE_MASK, exception); 6518 } 6519 EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system); 6520 6521 int handle_ud(struct kvm_vcpu *vcpu) 6522 { 6523 static const char kvm_emulate_prefix[] = { __KVM_EMULATE_PREFIX }; 6524 int emul_type = EMULTYPE_TRAP_UD; 6525 char sig[5]; /* ud2; .ascii "kvm" */ 6526 struct x86_exception e; 6527 6528 if (unlikely(!static_call(kvm_x86_can_emulate_instruction)(vcpu, NULL, 0))) 6529 return 1; 6530 6531 if (force_emulation_prefix && 6532 kvm_read_guest_virt(vcpu, kvm_get_linear_rip(vcpu), 6533 sig, sizeof(sig), &e) == 0 && 6534 memcmp(sig, kvm_emulate_prefix, sizeof(sig)) == 0) { 6535 kvm_rip_write(vcpu, kvm_rip_read(vcpu) + sizeof(sig)); 6536 emul_type = EMULTYPE_TRAP_UD_FORCED; 6537 } 6538 6539 return kvm_emulate_instruction(vcpu, emul_type); 6540 } 6541 EXPORT_SYMBOL_GPL(handle_ud); 6542 6543 static int vcpu_is_mmio_gpa(struct kvm_vcpu *vcpu, unsigned long gva, 6544 gpa_t gpa, bool write) 6545 { 6546 /* For APIC access vmexit */ 6547 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) 6548 return 1; 6549 6550 if (vcpu_match_mmio_gpa(vcpu, gpa)) { 6551 trace_vcpu_match_mmio(gva, gpa, write, true); 6552 return 1; 6553 } 6554 6555 return 0; 6556 } 6557 6558 static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, 6559 gpa_t *gpa, struct x86_exception *exception, 6560 bool write) 6561 { 6562 u32 access = ((static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0) 6563 | (write ? PFERR_WRITE_MASK : 0); 6564 6565 /* 6566 * currently PKRU is only applied to ept enabled guest so 6567 * there is no pkey in EPT page table for L1 guest or EPT 6568 * shadow page table for L2 guest. 6569 */ 6570 if (vcpu_match_mmio_gva(vcpu, gva) 6571 && !permission_fault(vcpu, vcpu->arch.walk_mmu, 6572 vcpu->arch.mmio_access, 0, access)) { 6573 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT | 6574 (gva & (PAGE_SIZE - 1)); 6575 trace_vcpu_match_mmio(gva, *gpa, write, false); 6576 return 1; 6577 } 6578 6579 *gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); 6580 6581 if (*gpa == UNMAPPED_GVA) 6582 return -1; 6583 6584 return vcpu_is_mmio_gpa(vcpu, gva, *gpa, write); 6585 } 6586 6587 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, 6588 const void *val, int bytes) 6589 { 6590 int ret; 6591 6592 ret = kvm_vcpu_write_guest(vcpu, gpa, val, bytes); 6593 if (ret < 0) 6594 return 0; 6595 kvm_page_track_write(vcpu, gpa, val, bytes); 6596 return 1; 6597 } 6598 6599 struct read_write_emulator_ops { 6600 int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val, 6601 int bytes); 6602 int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa, 6603 void *val, int bytes); 6604 int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa, 6605 int bytes, void *val); 6606 int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa, 6607 void *val, int bytes); 6608 bool write; 6609 }; 6610 6611 static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes) 6612 { 6613 if (vcpu->mmio_read_completed) { 6614 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, 6615 vcpu->mmio_fragments[0].gpa, val); 6616 vcpu->mmio_read_completed = 0; 6617 return 1; 6618 } 6619 6620 return 0; 6621 } 6622 6623 static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, 6624 void *val, int bytes) 6625 { 6626 return !kvm_vcpu_read_guest(vcpu, gpa, val, bytes); 6627 } 6628 6629 static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, 6630 void *val, int bytes) 6631 { 6632 return emulator_write_phys(vcpu, gpa, val, bytes); 6633 } 6634 6635 static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val) 6636 { 6637 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, val); 6638 return vcpu_mmio_write(vcpu, gpa, bytes, val); 6639 } 6640 6641 static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, 6642 void *val, int bytes) 6643 { 6644 trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, NULL); 6645 return X86EMUL_IO_NEEDED; 6646 } 6647 6648 static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, 6649 void *val, int bytes) 6650 { 6651 struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0]; 6652 6653 memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); 6654 return X86EMUL_CONTINUE; 6655 } 6656 6657 static const struct read_write_emulator_ops read_emultor = { 6658 .read_write_prepare = read_prepare, 6659 .read_write_emulate = read_emulate, 6660 .read_write_mmio = vcpu_mmio_read, 6661 .read_write_exit_mmio = read_exit_mmio, 6662 }; 6663 6664 static const struct read_write_emulator_ops write_emultor = { 6665 .read_write_emulate = write_emulate, 6666 .read_write_mmio = write_mmio, 6667 .read_write_exit_mmio = write_exit_mmio, 6668 .write = true, 6669 }; 6670 6671 static int emulator_read_write_onepage(unsigned long addr, void *val, 6672 unsigned int bytes, 6673 struct x86_exception *exception, 6674 struct kvm_vcpu *vcpu, 6675 const struct read_write_emulator_ops *ops) 6676 { 6677 gpa_t gpa; 6678 int handled, ret; 6679 bool write = ops->write; 6680 struct kvm_mmio_fragment *frag; 6681 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 6682 6683 /* 6684 * If the exit was due to a NPF we may already have a GPA. 6685 * If the GPA is present, use it to avoid the GVA to GPA table walk. 6686 * Note, this cannot be used on string operations since string 6687 * operation using rep will only have the initial GPA from the NPF 6688 * occurred. 6689 */ 6690 if (ctxt->gpa_available && emulator_can_use_gpa(ctxt) && 6691 (addr & ~PAGE_MASK) == (ctxt->gpa_val & ~PAGE_MASK)) { 6692 gpa = ctxt->gpa_val; 6693 ret = vcpu_is_mmio_gpa(vcpu, addr, gpa, write); 6694 } else { 6695 ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write); 6696 if (ret < 0) 6697 return X86EMUL_PROPAGATE_FAULT; 6698 } 6699 6700 if (!ret && ops->read_write_emulate(vcpu, gpa, val, bytes)) 6701 return X86EMUL_CONTINUE; 6702 6703 /* 6704 * Is this MMIO handled locally? 6705 */ 6706 handled = ops->read_write_mmio(vcpu, gpa, bytes, val); 6707 if (handled == bytes) 6708 return X86EMUL_CONTINUE; 6709 6710 gpa += handled; 6711 bytes -= handled; 6712 val += handled; 6713 6714 WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS); 6715 frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++]; 6716 frag->gpa = gpa; 6717 frag->data = val; 6718 frag->len = bytes; 6719 return X86EMUL_CONTINUE; 6720 } 6721 6722 static int emulator_read_write(struct x86_emulate_ctxt *ctxt, 6723 unsigned long addr, 6724 void *val, unsigned int bytes, 6725 struct x86_exception *exception, 6726 const struct read_write_emulator_ops *ops) 6727 { 6728 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 6729 gpa_t gpa; 6730 int rc; 6731 6732 if (ops->read_write_prepare && 6733 ops->read_write_prepare(vcpu, val, bytes)) 6734 return X86EMUL_CONTINUE; 6735 6736 vcpu->mmio_nr_fragments = 0; 6737 6738 /* Crossing a page boundary? */ 6739 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) { 6740 int now; 6741 6742 now = -addr & ~PAGE_MASK; 6743 rc = emulator_read_write_onepage(addr, val, now, exception, 6744 vcpu, ops); 6745 6746 if (rc != X86EMUL_CONTINUE) 6747 return rc; 6748 addr += now; 6749 if (ctxt->mode != X86EMUL_MODE_PROT64) 6750 addr = (u32)addr; 6751 val += now; 6752 bytes -= now; 6753 } 6754 6755 rc = emulator_read_write_onepage(addr, val, bytes, exception, 6756 vcpu, ops); 6757 if (rc != X86EMUL_CONTINUE) 6758 return rc; 6759 6760 if (!vcpu->mmio_nr_fragments) 6761 return rc; 6762 6763 gpa = vcpu->mmio_fragments[0].gpa; 6764 6765 vcpu->mmio_needed = 1; 6766 vcpu->mmio_cur_fragment = 0; 6767 6768 vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len); 6769 vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write; 6770 vcpu->run->exit_reason = KVM_EXIT_MMIO; 6771 vcpu->run->mmio.phys_addr = gpa; 6772 6773 return ops->read_write_exit_mmio(vcpu, gpa, val, bytes); 6774 } 6775 6776 static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt, 6777 unsigned long addr, 6778 void *val, 6779 unsigned int bytes, 6780 struct x86_exception *exception) 6781 { 6782 return emulator_read_write(ctxt, addr, val, bytes, 6783 exception, &read_emultor); 6784 } 6785 6786 static int emulator_write_emulated(struct x86_emulate_ctxt *ctxt, 6787 unsigned long addr, 6788 const void *val, 6789 unsigned int bytes, 6790 struct x86_exception *exception) 6791 { 6792 return emulator_read_write(ctxt, addr, (void *)val, bytes, 6793 exception, &write_emultor); 6794 } 6795 6796 #define CMPXCHG_TYPE(t, ptr, old, new) \ 6797 (cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old)) 6798 6799 #ifdef CONFIG_X86_64 6800 # define CMPXCHG64(ptr, old, new) CMPXCHG_TYPE(u64, ptr, old, new) 6801 #else 6802 # define CMPXCHG64(ptr, old, new) \ 6803 (cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old)) 6804 #endif 6805 6806 static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt, 6807 unsigned long addr, 6808 const void *old, 6809 const void *new, 6810 unsigned int bytes, 6811 struct x86_exception *exception) 6812 { 6813 struct kvm_host_map map; 6814 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 6815 u64 page_line_mask; 6816 gpa_t gpa; 6817 char *kaddr; 6818 bool exchanged; 6819 6820 /* guests cmpxchg8b have to be emulated atomically */ 6821 if (bytes > 8 || (bytes & (bytes - 1))) 6822 goto emul_write; 6823 6824 gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL); 6825 6826 if (gpa == UNMAPPED_GVA || 6827 (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) 6828 goto emul_write; 6829 6830 /* 6831 * Emulate the atomic as a straight write to avoid #AC if SLD is 6832 * enabled in the host and the access splits a cache line. 6833 */ 6834 if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) 6835 page_line_mask = ~(cache_line_size() - 1); 6836 else 6837 page_line_mask = PAGE_MASK; 6838 6839 if (((gpa + bytes - 1) & page_line_mask) != (gpa & page_line_mask)) 6840 goto emul_write; 6841 6842 if (kvm_vcpu_map(vcpu, gpa_to_gfn(gpa), &map)) 6843 goto emul_write; 6844 6845 kaddr = map.hva + offset_in_page(gpa); 6846 6847 switch (bytes) { 6848 case 1: 6849 exchanged = CMPXCHG_TYPE(u8, kaddr, old, new); 6850 break; 6851 case 2: 6852 exchanged = CMPXCHG_TYPE(u16, kaddr, old, new); 6853 break; 6854 case 4: 6855 exchanged = CMPXCHG_TYPE(u32, kaddr, old, new); 6856 break; 6857 case 8: 6858 exchanged = CMPXCHG64(kaddr, old, new); 6859 break; 6860 default: 6861 BUG(); 6862 } 6863 6864 kvm_vcpu_unmap(vcpu, &map, true); 6865 6866 if (!exchanged) 6867 return X86EMUL_CMPXCHG_FAILED; 6868 6869 kvm_page_track_write(vcpu, gpa, new, bytes); 6870 6871 return X86EMUL_CONTINUE; 6872 6873 emul_write: 6874 printk_once(KERN_WARNING "kvm: emulating exchange as write\n"); 6875 6876 return emulator_write_emulated(ctxt, addr, new, bytes, exception); 6877 } 6878 6879 static int kernel_pio(struct kvm_vcpu *vcpu, void *pd) 6880 { 6881 int r = 0, i; 6882 6883 for (i = 0; i < vcpu->arch.pio.count; i++) { 6884 if (vcpu->arch.pio.in) 6885 r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port, 6886 vcpu->arch.pio.size, pd); 6887 else 6888 r = kvm_io_bus_write(vcpu, KVM_PIO_BUS, 6889 vcpu->arch.pio.port, vcpu->arch.pio.size, 6890 pd); 6891 if (r) 6892 break; 6893 pd += vcpu->arch.pio.size; 6894 } 6895 return r; 6896 } 6897 6898 static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size, 6899 unsigned short port, void *val, 6900 unsigned int count, bool in) 6901 { 6902 vcpu->arch.pio.port = port; 6903 vcpu->arch.pio.in = in; 6904 vcpu->arch.pio.count = count; 6905 vcpu->arch.pio.size = size; 6906 6907 if (!kernel_pio(vcpu, vcpu->arch.pio_data)) { 6908 vcpu->arch.pio.count = 0; 6909 return 1; 6910 } 6911 6912 vcpu->run->exit_reason = KVM_EXIT_IO; 6913 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; 6914 vcpu->run->io.size = size; 6915 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE; 6916 vcpu->run->io.count = count; 6917 vcpu->run->io.port = port; 6918 6919 return 0; 6920 } 6921 6922 static int emulator_pio_in(struct kvm_vcpu *vcpu, int size, 6923 unsigned short port, void *val, unsigned int count) 6924 { 6925 int ret; 6926 6927 if (vcpu->arch.pio.count) 6928 goto data_avail; 6929 6930 memset(vcpu->arch.pio_data, 0, size * count); 6931 6932 ret = emulator_pio_in_out(vcpu, size, port, val, count, true); 6933 if (ret) { 6934 data_avail: 6935 memcpy(val, vcpu->arch.pio_data, size * count); 6936 trace_kvm_pio(KVM_PIO_IN, port, size, count, vcpu->arch.pio_data); 6937 vcpu->arch.pio.count = 0; 6938 return 1; 6939 } 6940 6941 return 0; 6942 } 6943 6944 static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt, 6945 int size, unsigned short port, void *val, 6946 unsigned int count) 6947 { 6948 return emulator_pio_in(emul_to_vcpu(ctxt), size, port, val, count); 6949 6950 } 6951 6952 static int emulator_pio_out(struct kvm_vcpu *vcpu, int size, 6953 unsigned short port, const void *val, 6954 unsigned int count) 6955 { 6956 memcpy(vcpu->arch.pio_data, val, size * count); 6957 trace_kvm_pio(KVM_PIO_OUT, port, size, count, vcpu->arch.pio_data); 6958 return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false); 6959 } 6960 6961 static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt, 6962 int size, unsigned short port, 6963 const void *val, unsigned int count) 6964 { 6965 return emulator_pio_out(emul_to_vcpu(ctxt), size, port, val, count); 6966 } 6967 6968 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg) 6969 { 6970 return static_call(kvm_x86_get_segment_base)(vcpu, seg); 6971 } 6972 6973 static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address) 6974 { 6975 kvm_mmu_invlpg(emul_to_vcpu(ctxt), address); 6976 } 6977 6978 static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu) 6979 { 6980 if (!need_emulate_wbinvd(vcpu)) 6981 return X86EMUL_CONTINUE; 6982 6983 if (static_call(kvm_x86_has_wbinvd_exit)()) { 6984 int cpu = get_cpu(); 6985 6986 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); 6987 on_each_cpu_mask(vcpu->arch.wbinvd_dirty_mask, 6988 wbinvd_ipi, NULL, 1); 6989 put_cpu(); 6990 cpumask_clear(vcpu->arch.wbinvd_dirty_mask); 6991 } else 6992 wbinvd(); 6993 return X86EMUL_CONTINUE; 6994 } 6995 6996 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu) 6997 { 6998 kvm_emulate_wbinvd_noskip(vcpu); 6999 return kvm_skip_emulated_instruction(vcpu); 7000 } 7001 EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd); 7002 7003 7004 7005 static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt) 7006 { 7007 kvm_emulate_wbinvd_noskip(emul_to_vcpu(ctxt)); 7008 } 7009 7010 static void emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, 7011 unsigned long *dest) 7012 { 7013 kvm_get_dr(emul_to_vcpu(ctxt), dr, dest); 7014 } 7015 7016 static int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, 7017 unsigned long value) 7018 { 7019 7020 return kvm_set_dr(emul_to_vcpu(ctxt), dr, value); 7021 } 7022 7023 static u64 mk_cr_64(u64 curr_cr, u32 new_val) 7024 { 7025 return (curr_cr & ~((1ULL << 32) - 1)) | new_val; 7026 } 7027 7028 static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr) 7029 { 7030 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7031 unsigned long value; 7032 7033 switch (cr) { 7034 case 0: 7035 value = kvm_read_cr0(vcpu); 7036 break; 7037 case 2: 7038 value = vcpu->arch.cr2; 7039 break; 7040 case 3: 7041 value = kvm_read_cr3(vcpu); 7042 break; 7043 case 4: 7044 value = kvm_read_cr4(vcpu); 7045 break; 7046 case 8: 7047 value = kvm_get_cr8(vcpu); 7048 break; 7049 default: 7050 kvm_err("%s: unexpected cr %u\n", __func__, cr); 7051 return 0; 7052 } 7053 7054 return value; 7055 } 7056 7057 static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val) 7058 { 7059 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7060 int res = 0; 7061 7062 switch (cr) { 7063 case 0: 7064 res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val)); 7065 break; 7066 case 2: 7067 vcpu->arch.cr2 = val; 7068 break; 7069 case 3: 7070 res = kvm_set_cr3(vcpu, val); 7071 break; 7072 case 4: 7073 res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val)); 7074 break; 7075 case 8: 7076 res = kvm_set_cr8(vcpu, val); 7077 break; 7078 default: 7079 kvm_err("%s: unexpected cr %u\n", __func__, cr); 7080 res = -1; 7081 } 7082 7083 return res; 7084 } 7085 7086 static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt) 7087 { 7088 return static_call(kvm_x86_get_cpl)(emul_to_vcpu(ctxt)); 7089 } 7090 7091 static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) 7092 { 7093 static_call(kvm_x86_get_gdt)(emul_to_vcpu(ctxt), dt); 7094 } 7095 7096 static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) 7097 { 7098 static_call(kvm_x86_get_idt)(emul_to_vcpu(ctxt), dt); 7099 } 7100 7101 static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) 7102 { 7103 static_call(kvm_x86_set_gdt)(emul_to_vcpu(ctxt), dt); 7104 } 7105 7106 static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) 7107 { 7108 static_call(kvm_x86_set_idt)(emul_to_vcpu(ctxt), dt); 7109 } 7110 7111 static unsigned long emulator_get_cached_segment_base( 7112 struct x86_emulate_ctxt *ctxt, int seg) 7113 { 7114 return get_segment_base(emul_to_vcpu(ctxt), seg); 7115 } 7116 7117 static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector, 7118 struct desc_struct *desc, u32 *base3, 7119 int seg) 7120 { 7121 struct kvm_segment var; 7122 7123 kvm_get_segment(emul_to_vcpu(ctxt), &var, seg); 7124 *selector = var.selector; 7125 7126 if (var.unusable) { 7127 memset(desc, 0, sizeof(*desc)); 7128 if (base3) 7129 *base3 = 0; 7130 return false; 7131 } 7132 7133 if (var.g) 7134 var.limit >>= 12; 7135 set_desc_limit(desc, var.limit); 7136 set_desc_base(desc, (unsigned long)var.base); 7137 #ifdef CONFIG_X86_64 7138 if (base3) 7139 *base3 = var.base >> 32; 7140 #endif 7141 desc->type = var.type; 7142 desc->s = var.s; 7143 desc->dpl = var.dpl; 7144 desc->p = var.present; 7145 desc->avl = var.avl; 7146 desc->l = var.l; 7147 desc->d = var.db; 7148 desc->g = var.g; 7149 7150 return true; 7151 } 7152 7153 static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector, 7154 struct desc_struct *desc, u32 base3, 7155 int seg) 7156 { 7157 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7158 struct kvm_segment var; 7159 7160 var.selector = selector; 7161 var.base = get_desc_base(desc); 7162 #ifdef CONFIG_X86_64 7163 var.base |= ((u64)base3) << 32; 7164 #endif 7165 var.limit = get_desc_limit(desc); 7166 if (desc->g) 7167 var.limit = (var.limit << 12) | 0xfff; 7168 var.type = desc->type; 7169 var.dpl = desc->dpl; 7170 var.db = desc->d; 7171 var.s = desc->s; 7172 var.l = desc->l; 7173 var.g = desc->g; 7174 var.avl = desc->avl; 7175 var.present = desc->p; 7176 var.unusable = !var.present; 7177 var.padding = 0; 7178 7179 kvm_set_segment(vcpu, &var, seg); 7180 return; 7181 } 7182 7183 static int emulator_get_msr(struct x86_emulate_ctxt *ctxt, 7184 u32 msr_index, u64 *pdata) 7185 { 7186 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7187 int r; 7188 7189 r = kvm_get_msr(vcpu, msr_index, pdata); 7190 7191 if (r && kvm_get_msr_user_space(vcpu, msr_index, r)) { 7192 /* Bounce to user space */ 7193 return X86EMUL_IO_NEEDED; 7194 } 7195 7196 return r; 7197 } 7198 7199 static int emulator_set_msr(struct x86_emulate_ctxt *ctxt, 7200 u32 msr_index, u64 data) 7201 { 7202 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7203 int r; 7204 7205 r = kvm_set_msr(vcpu, msr_index, data); 7206 7207 if (r && kvm_set_msr_user_space(vcpu, msr_index, data, r)) { 7208 /* Bounce to user space */ 7209 return X86EMUL_IO_NEEDED; 7210 } 7211 7212 return r; 7213 } 7214 7215 static u64 emulator_get_smbase(struct x86_emulate_ctxt *ctxt) 7216 { 7217 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7218 7219 return vcpu->arch.smbase; 7220 } 7221 7222 static void emulator_set_smbase(struct x86_emulate_ctxt *ctxt, u64 smbase) 7223 { 7224 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7225 7226 vcpu->arch.smbase = smbase; 7227 } 7228 7229 static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt, 7230 u32 pmc) 7231 { 7232 return kvm_pmu_is_valid_rdpmc_ecx(emul_to_vcpu(ctxt), pmc); 7233 } 7234 7235 static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt, 7236 u32 pmc, u64 *pdata) 7237 { 7238 return kvm_pmu_rdpmc(emul_to_vcpu(ctxt), pmc, pdata); 7239 } 7240 7241 static void emulator_halt(struct x86_emulate_ctxt *ctxt) 7242 { 7243 emul_to_vcpu(ctxt)->arch.halt_request = 1; 7244 } 7245 7246 static int emulator_intercept(struct x86_emulate_ctxt *ctxt, 7247 struct x86_instruction_info *info, 7248 enum x86_intercept_stage stage) 7249 { 7250 return static_call(kvm_x86_check_intercept)(emul_to_vcpu(ctxt), info, stage, 7251 &ctxt->exception); 7252 } 7253 7254 static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt, 7255 u32 *eax, u32 *ebx, u32 *ecx, u32 *edx, 7256 bool exact_only) 7257 { 7258 return kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx, exact_only); 7259 } 7260 7261 static bool emulator_guest_has_long_mode(struct x86_emulate_ctxt *ctxt) 7262 { 7263 return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_LM); 7264 } 7265 7266 static bool emulator_guest_has_movbe(struct x86_emulate_ctxt *ctxt) 7267 { 7268 return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_MOVBE); 7269 } 7270 7271 static bool emulator_guest_has_fxsr(struct x86_emulate_ctxt *ctxt) 7272 { 7273 return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_FXSR); 7274 } 7275 7276 static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg) 7277 { 7278 return kvm_register_read_raw(emul_to_vcpu(ctxt), reg); 7279 } 7280 7281 static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val) 7282 { 7283 kvm_register_write_raw(emul_to_vcpu(ctxt), reg, val); 7284 } 7285 7286 static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked) 7287 { 7288 static_call(kvm_x86_set_nmi_mask)(emul_to_vcpu(ctxt), masked); 7289 } 7290 7291 static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt) 7292 { 7293 return emul_to_vcpu(ctxt)->arch.hflags; 7294 } 7295 7296 static void emulator_exiting_smm(struct x86_emulate_ctxt *ctxt) 7297 { 7298 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7299 7300 kvm_smm_changed(vcpu, false); 7301 } 7302 7303 static int emulator_leave_smm(struct x86_emulate_ctxt *ctxt, 7304 const char *smstate) 7305 { 7306 return static_call(kvm_x86_leave_smm)(emul_to_vcpu(ctxt), smstate); 7307 } 7308 7309 static void emulator_triple_fault(struct x86_emulate_ctxt *ctxt) 7310 { 7311 kvm_make_request(KVM_REQ_TRIPLE_FAULT, emul_to_vcpu(ctxt)); 7312 } 7313 7314 static int emulator_set_xcr(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr) 7315 { 7316 return __kvm_set_xcr(emul_to_vcpu(ctxt), index, xcr); 7317 } 7318 7319 static const struct x86_emulate_ops emulate_ops = { 7320 .read_gpr = emulator_read_gpr, 7321 .write_gpr = emulator_write_gpr, 7322 .read_std = emulator_read_std, 7323 .write_std = emulator_write_std, 7324 .read_phys = kvm_read_guest_phys_system, 7325 .fetch = kvm_fetch_guest_virt, 7326 .read_emulated = emulator_read_emulated, 7327 .write_emulated = emulator_write_emulated, 7328 .cmpxchg_emulated = emulator_cmpxchg_emulated, 7329 .invlpg = emulator_invlpg, 7330 .pio_in_emulated = emulator_pio_in_emulated, 7331 .pio_out_emulated = emulator_pio_out_emulated, 7332 .get_segment = emulator_get_segment, 7333 .set_segment = emulator_set_segment, 7334 .get_cached_segment_base = emulator_get_cached_segment_base, 7335 .get_gdt = emulator_get_gdt, 7336 .get_idt = emulator_get_idt, 7337 .set_gdt = emulator_set_gdt, 7338 .set_idt = emulator_set_idt, 7339 .get_cr = emulator_get_cr, 7340 .set_cr = emulator_set_cr, 7341 .cpl = emulator_get_cpl, 7342 .get_dr = emulator_get_dr, 7343 .set_dr = emulator_set_dr, 7344 .get_smbase = emulator_get_smbase, 7345 .set_smbase = emulator_set_smbase, 7346 .set_msr = emulator_set_msr, 7347 .get_msr = emulator_get_msr, 7348 .check_pmc = emulator_check_pmc, 7349 .read_pmc = emulator_read_pmc, 7350 .halt = emulator_halt, 7351 .wbinvd = emulator_wbinvd, 7352 .fix_hypercall = emulator_fix_hypercall, 7353 .intercept = emulator_intercept, 7354 .get_cpuid = emulator_get_cpuid, 7355 .guest_has_long_mode = emulator_guest_has_long_mode, 7356 .guest_has_movbe = emulator_guest_has_movbe, 7357 .guest_has_fxsr = emulator_guest_has_fxsr, 7358 .set_nmi_mask = emulator_set_nmi_mask, 7359 .get_hflags = emulator_get_hflags, 7360 .exiting_smm = emulator_exiting_smm, 7361 .leave_smm = emulator_leave_smm, 7362 .triple_fault = emulator_triple_fault, 7363 .set_xcr = emulator_set_xcr, 7364 }; 7365 7366 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) 7367 { 7368 u32 int_shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu); 7369 /* 7370 * an sti; sti; sequence only disable interrupts for the first 7371 * instruction. So, if the last instruction, be it emulated or 7372 * not, left the system with the INT_STI flag enabled, it 7373 * means that the last instruction is an sti. We should not 7374 * leave the flag on in this case. The same goes for mov ss 7375 */ 7376 if (int_shadow & mask) 7377 mask = 0; 7378 if (unlikely(int_shadow || mask)) { 7379 static_call(kvm_x86_set_interrupt_shadow)(vcpu, mask); 7380 if (!mask) 7381 kvm_make_request(KVM_REQ_EVENT, vcpu); 7382 } 7383 } 7384 7385 static bool inject_emulated_exception(struct kvm_vcpu *vcpu) 7386 { 7387 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 7388 if (ctxt->exception.vector == PF_VECTOR) 7389 return kvm_inject_emulated_page_fault(vcpu, &ctxt->exception); 7390 7391 if (ctxt->exception.error_code_valid) 7392 kvm_queue_exception_e(vcpu, ctxt->exception.vector, 7393 ctxt->exception.error_code); 7394 else 7395 kvm_queue_exception(vcpu, ctxt->exception.vector); 7396 return false; 7397 } 7398 7399 static struct x86_emulate_ctxt *alloc_emulate_ctxt(struct kvm_vcpu *vcpu) 7400 { 7401 struct x86_emulate_ctxt *ctxt; 7402 7403 ctxt = kmem_cache_zalloc(x86_emulator_cache, GFP_KERNEL_ACCOUNT); 7404 if (!ctxt) { 7405 pr_err("kvm: failed to allocate vcpu's emulator\n"); 7406 return NULL; 7407 } 7408 7409 ctxt->vcpu = vcpu; 7410 ctxt->ops = &emulate_ops; 7411 vcpu->arch.emulate_ctxt = ctxt; 7412 7413 return ctxt; 7414 } 7415 7416 static void init_emulate_ctxt(struct kvm_vcpu *vcpu) 7417 { 7418 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 7419 int cs_db, cs_l; 7420 7421 static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l); 7422 7423 ctxt->gpa_available = false; 7424 ctxt->eflags = kvm_get_rflags(vcpu); 7425 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0; 7426 7427 ctxt->eip = kvm_rip_read(vcpu); 7428 ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : 7429 (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 : 7430 (cs_l && is_long_mode(vcpu)) ? X86EMUL_MODE_PROT64 : 7431 cs_db ? X86EMUL_MODE_PROT32 : 7432 X86EMUL_MODE_PROT16; 7433 BUILD_BUG_ON(HF_GUEST_MASK != X86EMUL_GUEST_MASK); 7434 BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK); 7435 BUILD_BUG_ON(HF_SMM_INSIDE_NMI_MASK != X86EMUL_SMM_INSIDE_NMI_MASK); 7436 7437 ctxt->interruptibility = 0; 7438 ctxt->have_exception = false; 7439 ctxt->exception.vector = -1; 7440 ctxt->perm_ok = false; 7441 7442 init_decode_cache(ctxt); 7443 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; 7444 } 7445 7446 void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip) 7447 { 7448 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 7449 int ret; 7450 7451 init_emulate_ctxt(vcpu); 7452 7453 ctxt->op_bytes = 2; 7454 ctxt->ad_bytes = 2; 7455 ctxt->_eip = ctxt->eip + inc_eip; 7456 ret = emulate_int_real(ctxt, irq); 7457 7458 if (ret != X86EMUL_CONTINUE) { 7459 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 7460 } else { 7461 ctxt->eip = ctxt->_eip; 7462 kvm_rip_write(vcpu, ctxt->eip); 7463 kvm_set_rflags(vcpu, ctxt->eflags); 7464 } 7465 } 7466 EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt); 7467 7468 static void prepare_emulation_failure_exit(struct kvm_vcpu *vcpu) 7469 { 7470 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 7471 u32 insn_size = ctxt->fetch.end - ctxt->fetch.data; 7472 struct kvm_run *run = vcpu->run; 7473 7474 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 7475 run->emulation_failure.suberror = KVM_INTERNAL_ERROR_EMULATION; 7476 run->emulation_failure.ndata = 0; 7477 run->emulation_failure.flags = 0; 7478 7479 if (insn_size) { 7480 run->emulation_failure.ndata = 3; 7481 run->emulation_failure.flags |= 7482 KVM_INTERNAL_ERROR_EMULATION_FLAG_INSTRUCTION_BYTES; 7483 run->emulation_failure.insn_size = insn_size; 7484 memset(run->emulation_failure.insn_bytes, 0x90, 7485 sizeof(run->emulation_failure.insn_bytes)); 7486 memcpy(run->emulation_failure.insn_bytes, 7487 ctxt->fetch.data, insn_size); 7488 } 7489 } 7490 7491 static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type) 7492 { 7493 struct kvm *kvm = vcpu->kvm; 7494 7495 ++vcpu->stat.insn_emulation_fail; 7496 trace_kvm_emulate_insn_failed(vcpu); 7497 7498 if (emulation_type & EMULTYPE_VMWARE_GP) { 7499 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); 7500 return 1; 7501 } 7502 7503 if (kvm->arch.exit_on_emulation_error || 7504 (emulation_type & EMULTYPE_SKIP)) { 7505 prepare_emulation_failure_exit(vcpu); 7506 return 0; 7507 } 7508 7509 kvm_queue_exception(vcpu, UD_VECTOR); 7510 7511 if (!is_guest_mode(vcpu) && static_call(kvm_x86_get_cpl)(vcpu) == 0) { 7512 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 7513 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; 7514 vcpu->run->internal.ndata = 0; 7515 return 0; 7516 } 7517 7518 return 1; 7519 } 7520 7521 static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, 7522 bool write_fault_to_shadow_pgtable, 7523 int emulation_type) 7524 { 7525 gpa_t gpa = cr2_or_gpa; 7526 kvm_pfn_t pfn; 7527 7528 if (!(emulation_type & EMULTYPE_ALLOW_RETRY_PF)) 7529 return false; 7530 7531 if (WARN_ON_ONCE(is_guest_mode(vcpu)) || 7532 WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF))) 7533 return false; 7534 7535 if (!vcpu->arch.mmu->direct_map) { 7536 /* 7537 * Write permission should be allowed since only 7538 * write access need to be emulated. 7539 */ 7540 gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL); 7541 7542 /* 7543 * If the mapping is invalid in guest, let cpu retry 7544 * it to generate fault. 7545 */ 7546 if (gpa == UNMAPPED_GVA) 7547 return true; 7548 } 7549 7550 /* 7551 * Do not retry the unhandleable instruction if it faults on the 7552 * readonly host memory, otherwise it will goto a infinite loop: 7553 * retry instruction -> write #PF -> emulation fail -> retry 7554 * instruction -> ... 7555 */ 7556 pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa)); 7557 7558 /* 7559 * If the instruction failed on the error pfn, it can not be fixed, 7560 * report the error to userspace. 7561 */ 7562 if (is_error_noslot_pfn(pfn)) 7563 return false; 7564 7565 kvm_release_pfn_clean(pfn); 7566 7567 /* The instructions are well-emulated on direct mmu. */ 7568 if (vcpu->arch.mmu->direct_map) { 7569 unsigned int indirect_shadow_pages; 7570 7571 write_lock(&vcpu->kvm->mmu_lock); 7572 indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages; 7573 write_unlock(&vcpu->kvm->mmu_lock); 7574 7575 if (indirect_shadow_pages) 7576 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); 7577 7578 return true; 7579 } 7580 7581 /* 7582 * if emulation was due to access to shadowed page table 7583 * and it failed try to unshadow page and re-enter the 7584 * guest to let CPU execute the instruction. 7585 */ 7586 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); 7587 7588 /* 7589 * If the access faults on its page table, it can not 7590 * be fixed by unprotecting shadow page and it should 7591 * be reported to userspace. 7592 */ 7593 return !write_fault_to_shadow_pgtable; 7594 } 7595 7596 static bool retry_instruction(struct x86_emulate_ctxt *ctxt, 7597 gpa_t cr2_or_gpa, int emulation_type) 7598 { 7599 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7600 unsigned long last_retry_eip, last_retry_addr, gpa = cr2_or_gpa; 7601 7602 last_retry_eip = vcpu->arch.last_retry_eip; 7603 last_retry_addr = vcpu->arch.last_retry_addr; 7604 7605 /* 7606 * If the emulation is caused by #PF and it is non-page_table 7607 * writing instruction, it means the VM-EXIT is caused by shadow 7608 * page protected, we can zap the shadow page and retry this 7609 * instruction directly. 7610 * 7611 * Note: if the guest uses a non-page-table modifying instruction 7612 * on the PDE that points to the instruction, then we will unmap 7613 * the instruction and go to an infinite loop. So, we cache the 7614 * last retried eip and the last fault address, if we meet the eip 7615 * and the address again, we can break out of the potential infinite 7616 * loop. 7617 */ 7618 vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0; 7619 7620 if (!(emulation_type & EMULTYPE_ALLOW_RETRY_PF)) 7621 return false; 7622 7623 if (WARN_ON_ONCE(is_guest_mode(vcpu)) || 7624 WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF))) 7625 return false; 7626 7627 if (x86_page_table_writing_insn(ctxt)) 7628 return false; 7629 7630 if (ctxt->eip == last_retry_eip && last_retry_addr == cr2_or_gpa) 7631 return false; 7632 7633 vcpu->arch.last_retry_eip = ctxt->eip; 7634 vcpu->arch.last_retry_addr = cr2_or_gpa; 7635 7636 if (!vcpu->arch.mmu->direct_map) 7637 gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL); 7638 7639 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); 7640 7641 return true; 7642 } 7643 7644 static int complete_emulated_mmio(struct kvm_vcpu *vcpu); 7645 static int complete_emulated_pio(struct kvm_vcpu *vcpu); 7646 7647 static void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm) 7648 { 7649 trace_kvm_smm_transition(vcpu->vcpu_id, vcpu->arch.smbase, entering_smm); 7650 7651 if (entering_smm) { 7652 vcpu->arch.hflags |= HF_SMM_MASK; 7653 } else { 7654 vcpu->arch.hflags &= ~(HF_SMM_MASK | HF_SMM_INSIDE_NMI_MASK); 7655 7656 /* Process a latched INIT or SMI, if any. */ 7657 kvm_make_request(KVM_REQ_EVENT, vcpu); 7658 } 7659 7660 kvm_mmu_reset_context(vcpu); 7661 } 7662 7663 static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7, 7664 unsigned long *db) 7665 { 7666 u32 dr6 = 0; 7667 int i; 7668 u32 enable, rwlen; 7669 7670 enable = dr7; 7671 rwlen = dr7 >> 16; 7672 for (i = 0; i < 4; i++, enable >>= 2, rwlen >>= 4) 7673 if ((enable & 3) && (rwlen & 15) == type && db[i] == addr) 7674 dr6 |= (1 << i); 7675 return dr6; 7676 } 7677 7678 static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu) 7679 { 7680 struct kvm_run *kvm_run = vcpu->run; 7681 7682 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { 7683 kvm_run->debug.arch.dr6 = DR6_BS | DR6_ACTIVE_LOW; 7684 kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu); 7685 kvm_run->debug.arch.exception = DB_VECTOR; 7686 kvm_run->exit_reason = KVM_EXIT_DEBUG; 7687 return 0; 7688 } 7689 kvm_queue_exception_p(vcpu, DB_VECTOR, DR6_BS); 7690 return 1; 7691 } 7692 7693 int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu) 7694 { 7695 unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu); 7696 int r; 7697 7698 r = static_call(kvm_x86_skip_emulated_instruction)(vcpu); 7699 if (unlikely(!r)) 7700 return 0; 7701 7702 /* 7703 * rflags is the old, "raw" value of the flags. The new value has 7704 * not been saved yet. 7705 * 7706 * This is correct even for TF set by the guest, because "the 7707 * processor will not generate this exception after the instruction 7708 * that sets the TF flag". 7709 */ 7710 if (unlikely(rflags & X86_EFLAGS_TF)) 7711 r = kvm_vcpu_do_singlestep(vcpu); 7712 return r; 7713 } 7714 EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction); 7715 7716 static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r) 7717 { 7718 if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) && 7719 (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) { 7720 struct kvm_run *kvm_run = vcpu->run; 7721 unsigned long eip = kvm_get_linear_rip(vcpu); 7722 u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0, 7723 vcpu->arch.guest_debug_dr7, 7724 vcpu->arch.eff_db); 7725 7726 if (dr6 != 0) { 7727 kvm_run->debug.arch.dr6 = dr6 | DR6_ACTIVE_LOW; 7728 kvm_run->debug.arch.pc = eip; 7729 kvm_run->debug.arch.exception = DB_VECTOR; 7730 kvm_run->exit_reason = KVM_EXIT_DEBUG; 7731 *r = 0; 7732 return true; 7733 } 7734 } 7735 7736 if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) && 7737 !(kvm_get_rflags(vcpu) & X86_EFLAGS_RF)) { 7738 unsigned long eip = kvm_get_linear_rip(vcpu); 7739 u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0, 7740 vcpu->arch.dr7, 7741 vcpu->arch.db); 7742 7743 if (dr6 != 0) { 7744 kvm_queue_exception_p(vcpu, DB_VECTOR, dr6); 7745 *r = 1; 7746 return true; 7747 } 7748 } 7749 7750 return false; 7751 } 7752 7753 static bool is_vmware_backdoor_opcode(struct x86_emulate_ctxt *ctxt) 7754 { 7755 switch (ctxt->opcode_len) { 7756 case 1: 7757 switch (ctxt->b) { 7758 case 0xe4: /* IN */ 7759 case 0xe5: 7760 case 0xec: 7761 case 0xed: 7762 case 0xe6: /* OUT */ 7763 case 0xe7: 7764 case 0xee: 7765 case 0xef: 7766 case 0x6c: /* INS */ 7767 case 0x6d: 7768 case 0x6e: /* OUTS */ 7769 case 0x6f: 7770 return true; 7771 } 7772 break; 7773 case 2: 7774 switch (ctxt->b) { 7775 case 0x33: /* RDPMC */ 7776 return true; 7777 } 7778 break; 7779 } 7780 7781 return false; 7782 } 7783 7784 /* 7785 * Decode to be emulated instruction. Return EMULATION_OK if success. 7786 */ 7787 int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type, 7788 void *insn, int insn_len) 7789 { 7790 int r = EMULATION_OK; 7791 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 7792 7793 init_emulate_ctxt(vcpu); 7794 7795 /* 7796 * We will reenter on the same instruction since we do not set 7797 * complete_userspace_io. This does not handle watchpoints yet, 7798 * those would be handled in the emulate_ops. 7799 */ 7800 if (!(emulation_type & EMULTYPE_SKIP) && 7801 kvm_vcpu_check_breakpoint(vcpu, &r)) 7802 return r; 7803 7804 r = x86_decode_insn(ctxt, insn, insn_len, emulation_type); 7805 7806 trace_kvm_emulate_insn_start(vcpu); 7807 ++vcpu->stat.insn_emulation; 7808 7809 return r; 7810 } 7811 EXPORT_SYMBOL_GPL(x86_decode_emulated_instruction); 7812 7813 int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, 7814 int emulation_type, void *insn, int insn_len) 7815 { 7816 int r; 7817 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 7818 bool writeback = true; 7819 bool write_fault_to_spt; 7820 7821 if (unlikely(!static_call(kvm_x86_can_emulate_instruction)(vcpu, insn, insn_len))) 7822 return 1; 7823 7824 vcpu->arch.l1tf_flush_l1d = true; 7825 7826 /* 7827 * Clear write_fault_to_shadow_pgtable here to ensure it is 7828 * never reused. 7829 */ 7830 write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable; 7831 vcpu->arch.write_fault_to_shadow_pgtable = false; 7832 7833 if (!(emulation_type & EMULTYPE_NO_DECODE)) { 7834 kvm_clear_exception_queue(vcpu); 7835 7836 r = x86_decode_emulated_instruction(vcpu, emulation_type, 7837 insn, insn_len); 7838 if (r != EMULATION_OK) { 7839 if ((emulation_type & EMULTYPE_TRAP_UD) || 7840 (emulation_type & EMULTYPE_TRAP_UD_FORCED)) { 7841 kvm_queue_exception(vcpu, UD_VECTOR); 7842 return 1; 7843 } 7844 if (reexecute_instruction(vcpu, cr2_or_gpa, 7845 write_fault_to_spt, 7846 emulation_type)) 7847 return 1; 7848 if (ctxt->have_exception) { 7849 /* 7850 * #UD should result in just EMULATION_FAILED, and trap-like 7851 * exception should not be encountered during decode. 7852 */ 7853 WARN_ON_ONCE(ctxt->exception.vector == UD_VECTOR || 7854 exception_type(ctxt->exception.vector) == EXCPT_TRAP); 7855 inject_emulated_exception(vcpu); 7856 return 1; 7857 } 7858 return handle_emulation_failure(vcpu, emulation_type); 7859 } 7860 } 7861 7862 if ((emulation_type & EMULTYPE_VMWARE_GP) && 7863 !is_vmware_backdoor_opcode(ctxt)) { 7864 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); 7865 return 1; 7866 } 7867 7868 /* 7869 * Note, EMULTYPE_SKIP is intended for use *only* by vendor callbacks 7870 * for kvm_skip_emulated_instruction(). The caller is responsible for 7871 * updating interruptibility state and injecting single-step #DBs. 7872 */ 7873 if (emulation_type & EMULTYPE_SKIP) { 7874 kvm_rip_write(vcpu, ctxt->_eip); 7875 if (ctxt->eflags & X86_EFLAGS_RF) 7876 kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF); 7877 return 1; 7878 } 7879 7880 if (retry_instruction(ctxt, cr2_or_gpa, emulation_type)) 7881 return 1; 7882 7883 /* this is needed for vmware backdoor interface to work since it 7884 changes registers values during IO operation */ 7885 if (vcpu->arch.emulate_regs_need_sync_from_vcpu) { 7886 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; 7887 emulator_invalidate_register_cache(ctxt); 7888 } 7889 7890 restart: 7891 if (emulation_type & EMULTYPE_PF) { 7892 /* Save the faulting GPA (cr2) in the address field */ 7893 ctxt->exception.address = cr2_or_gpa; 7894 7895 /* With shadow page tables, cr2 contains a GVA or nGPA. */ 7896 if (vcpu->arch.mmu->direct_map) { 7897 ctxt->gpa_available = true; 7898 ctxt->gpa_val = cr2_or_gpa; 7899 } 7900 } else { 7901 /* Sanitize the address out of an abundance of paranoia. */ 7902 ctxt->exception.address = 0; 7903 } 7904 7905 r = x86_emulate_insn(ctxt); 7906 7907 if (r == EMULATION_INTERCEPTED) 7908 return 1; 7909 7910 if (r == EMULATION_FAILED) { 7911 if (reexecute_instruction(vcpu, cr2_or_gpa, write_fault_to_spt, 7912 emulation_type)) 7913 return 1; 7914 7915 return handle_emulation_failure(vcpu, emulation_type); 7916 } 7917 7918 if (ctxt->have_exception) { 7919 r = 1; 7920 if (inject_emulated_exception(vcpu)) 7921 return r; 7922 } else if (vcpu->arch.pio.count) { 7923 if (!vcpu->arch.pio.in) { 7924 /* FIXME: return into emulator if single-stepping. */ 7925 vcpu->arch.pio.count = 0; 7926 } else { 7927 writeback = false; 7928 vcpu->arch.complete_userspace_io = complete_emulated_pio; 7929 } 7930 r = 0; 7931 } else if (vcpu->mmio_needed) { 7932 ++vcpu->stat.mmio_exits; 7933 7934 if (!vcpu->mmio_is_write) 7935 writeback = false; 7936 r = 0; 7937 vcpu->arch.complete_userspace_io = complete_emulated_mmio; 7938 } else if (r == EMULATION_RESTART) 7939 goto restart; 7940 else 7941 r = 1; 7942 7943 if (writeback) { 7944 unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu); 7945 toggle_interruptibility(vcpu, ctxt->interruptibility); 7946 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; 7947 if (!ctxt->have_exception || 7948 exception_type(ctxt->exception.vector) == EXCPT_TRAP) { 7949 kvm_rip_write(vcpu, ctxt->eip); 7950 if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP))) 7951 r = kvm_vcpu_do_singlestep(vcpu); 7952 if (kvm_x86_ops.update_emulated_instruction) 7953 static_call(kvm_x86_update_emulated_instruction)(vcpu); 7954 __kvm_set_rflags(vcpu, ctxt->eflags); 7955 } 7956 7957 /* 7958 * For STI, interrupts are shadowed; so KVM_REQ_EVENT will 7959 * do nothing, and it will be requested again as soon as 7960 * the shadow expires. But we still need to check here, 7961 * because POPF has no interrupt shadow. 7962 */ 7963 if (unlikely((ctxt->eflags & ~rflags) & X86_EFLAGS_IF)) 7964 kvm_make_request(KVM_REQ_EVENT, vcpu); 7965 } else 7966 vcpu->arch.emulate_regs_need_sync_to_vcpu = true; 7967 7968 return r; 7969 } 7970 7971 int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type) 7972 { 7973 return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0); 7974 } 7975 EXPORT_SYMBOL_GPL(kvm_emulate_instruction); 7976 7977 int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu, 7978 void *insn, int insn_len) 7979 { 7980 return x86_emulate_instruction(vcpu, 0, 0, insn, insn_len); 7981 } 7982 EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer); 7983 7984 static int complete_fast_pio_out_port_0x7e(struct kvm_vcpu *vcpu) 7985 { 7986 vcpu->arch.pio.count = 0; 7987 return 1; 7988 } 7989 7990 static int complete_fast_pio_out(struct kvm_vcpu *vcpu) 7991 { 7992 vcpu->arch.pio.count = 0; 7993 7994 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) 7995 return 1; 7996 7997 return kvm_skip_emulated_instruction(vcpu); 7998 } 7999 8000 static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, 8001 unsigned short port) 8002 { 8003 unsigned long val = kvm_rax_read(vcpu); 8004 int ret = emulator_pio_out(vcpu, size, port, &val, 1); 8005 8006 if (ret) 8007 return ret; 8008 8009 /* 8010 * Workaround userspace that relies on old KVM behavior of %rip being 8011 * incremented prior to exiting to userspace to handle "OUT 0x7e". 8012 */ 8013 if (port == 0x7e && 8014 kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_OUT_7E_INC_RIP)) { 8015 vcpu->arch.complete_userspace_io = 8016 complete_fast_pio_out_port_0x7e; 8017 kvm_skip_emulated_instruction(vcpu); 8018 } else { 8019 vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu); 8020 vcpu->arch.complete_userspace_io = complete_fast_pio_out; 8021 } 8022 return 0; 8023 } 8024 8025 static int complete_fast_pio_in(struct kvm_vcpu *vcpu) 8026 { 8027 unsigned long val; 8028 8029 /* We should only ever be called with arch.pio.count equal to 1 */ 8030 BUG_ON(vcpu->arch.pio.count != 1); 8031 8032 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) { 8033 vcpu->arch.pio.count = 0; 8034 return 1; 8035 } 8036 8037 /* For size less than 4 we merge, else we zero extend */ 8038 val = (vcpu->arch.pio.size < 4) ? kvm_rax_read(vcpu) : 0; 8039 8040 /* 8041 * Since vcpu->arch.pio.count == 1 let emulator_pio_in perform 8042 * the copy and tracing 8043 */ 8044 emulator_pio_in(vcpu, vcpu->arch.pio.size, vcpu->arch.pio.port, &val, 1); 8045 kvm_rax_write(vcpu, val); 8046 8047 return kvm_skip_emulated_instruction(vcpu); 8048 } 8049 8050 static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size, 8051 unsigned short port) 8052 { 8053 unsigned long val; 8054 int ret; 8055 8056 /* For size less than 4 we merge, else we zero extend */ 8057 val = (size < 4) ? kvm_rax_read(vcpu) : 0; 8058 8059 ret = emulator_pio_in(vcpu, size, port, &val, 1); 8060 if (ret) { 8061 kvm_rax_write(vcpu, val); 8062 return ret; 8063 } 8064 8065 vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu); 8066 vcpu->arch.complete_userspace_io = complete_fast_pio_in; 8067 8068 return 0; 8069 } 8070 8071 int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in) 8072 { 8073 int ret; 8074 8075 if (in) 8076 ret = kvm_fast_pio_in(vcpu, size, port); 8077 else 8078 ret = kvm_fast_pio_out(vcpu, size, port); 8079 return ret && kvm_skip_emulated_instruction(vcpu); 8080 } 8081 EXPORT_SYMBOL_GPL(kvm_fast_pio); 8082 8083 static int kvmclock_cpu_down_prep(unsigned int cpu) 8084 { 8085 __this_cpu_write(cpu_tsc_khz, 0); 8086 return 0; 8087 } 8088 8089 static void tsc_khz_changed(void *data) 8090 { 8091 struct cpufreq_freqs *freq = data; 8092 unsigned long khz = 0; 8093 8094 if (data) 8095 khz = freq->new; 8096 else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) 8097 khz = cpufreq_quick_get(raw_smp_processor_id()); 8098 if (!khz) 8099 khz = tsc_khz; 8100 __this_cpu_write(cpu_tsc_khz, khz); 8101 } 8102 8103 #ifdef CONFIG_X86_64 8104 static void kvm_hyperv_tsc_notifier(void) 8105 { 8106 struct kvm *kvm; 8107 struct kvm_vcpu *vcpu; 8108 int cpu; 8109 unsigned long flags; 8110 8111 mutex_lock(&kvm_lock); 8112 list_for_each_entry(kvm, &vm_list, vm_list) 8113 kvm_make_mclock_inprogress_request(kvm); 8114 8115 hyperv_stop_tsc_emulation(); 8116 8117 /* TSC frequency always matches when on Hyper-V */ 8118 for_each_present_cpu(cpu) 8119 per_cpu(cpu_tsc_khz, cpu) = tsc_khz; 8120 kvm_max_guest_tsc_khz = tsc_khz; 8121 8122 list_for_each_entry(kvm, &vm_list, vm_list) { 8123 struct kvm_arch *ka = &kvm->arch; 8124 8125 spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags); 8126 pvclock_update_vm_gtod_copy(kvm); 8127 spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags); 8128 8129 kvm_for_each_vcpu(cpu, vcpu, kvm) 8130 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 8131 8132 kvm_for_each_vcpu(cpu, vcpu, kvm) 8133 kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu); 8134 } 8135 mutex_unlock(&kvm_lock); 8136 } 8137 #endif 8138 8139 static void __kvmclock_cpufreq_notifier(struct cpufreq_freqs *freq, int cpu) 8140 { 8141 struct kvm *kvm; 8142 struct kvm_vcpu *vcpu; 8143 int i, send_ipi = 0; 8144 8145 /* 8146 * We allow guests to temporarily run on slowing clocks, 8147 * provided we notify them after, or to run on accelerating 8148 * clocks, provided we notify them before. Thus time never 8149 * goes backwards. 8150 * 8151 * However, we have a problem. We can't atomically update 8152 * the frequency of a given CPU from this function; it is 8153 * merely a notifier, which can be called from any CPU. 8154 * Changing the TSC frequency at arbitrary points in time 8155 * requires a recomputation of local variables related to 8156 * the TSC for each VCPU. We must flag these local variables 8157 * to be updated and be sure the update takes place with the 8158 * new frequency before any guests proceed. 8159 * 8160 * Unfortunately, the combination of hotplug CPU and frequency 8161 * change creates an intractable locking scenario; the order 8162 * of when these callouts happen is undefined with respect to 8163 * CPU hotplug, and they can race with each other. As such, 8164 * merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is 8165 * undefined; you can actually have a CPU frequency change take 8166 * place in between the computation of X and the setting of the 8167 * variable. To protect against this problem, all updates of 8168 * the per_cpu tsc_khz variable are done in an interrupt 8169 * protected IPI, and all callers wishing to update the value 8170 * must wait for a synchronous IPI to complete (which is trivial 8171 * if the caller is on the CPU already). This establishes the 8172 * necessary total order on variable updates. 8173 * 8174 * Note that because a guest time update may take place 8175 * anytime after the setting of the VCPU's request bit, the 8176 * correct TSC value must be set before the request. However, 8177 * to ensure the update actually makes it to any guest which 8178 * starts running in hardware virtualization between the set 8179 * and the acquisition of the spinlock, we must also ping the 8180 * CPU after setting the request bit. 8181 * 8182 */ 8183 8184 smp_call_function_single(cpu, tsc_khz_changed, freq, 1); 8185 8186 mutex_lock(&kvm_lock); 8187 list_for_each_entry(kvm, &vm_list, vm_list) { 8188 kvm_for_each_vcpu(i, vcpu, kvm) { 8189 if (vcpu->cpu != cpu) 8190 continue; 8191 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 8192 if (vcpu->cpu != raw_smp_processor_id()) 8193 send_ipi = 1; 8194 } 8195 } 8196 mutex_unlock(&kvm_lock); 8197 8198 if (freq->old < freq->new && send_ipi) { 8199 /* 8200 * We upscale the frequency. Must make the guest 8201 * doesn't see old kvmclock values while running with 8202 * the new frequency, otherwise we risk the guest sees 8203 * time go backwards. 8204 * 8205 * In case we update the frequency for another cpu 8206 * (which might be in guest context) send an interrupt 8207 * to kick the cpu out of guest context. Next time 8208 * guest context is entered kvmclock will be updated, 8209 * so the guest will not see stale values. 8210 */ 8211 smp_call_function_single(cpu, tsc_khz_changed, freq, 1); 8212 } 8213 } 8214 8215 static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val, 8216 void *data) 8217 { 8218 struct cpufreq_freqs *freq = data; 8219 int cpu; 8220 8221 if (val == CPUFREQ_PRECHANGE && freq->old > freq->new) 8222 return 0; 8223 if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new) 8224 return 0; 8225 8226 for_each_cpu(cpu, freq->policy->cpus) 8227 __kvmclock_cpufreq_notifier(freq, cpu); 8228 8229 return 0; 8230 } 8231 8232 static struct notifier_block kvmclock_cpufreq_notifier_block = { 8233 .notifier_call = kvmclock_cpufreq_notifier 8234 }; 8235 8236 static int kvmclock_cpu_online(unsigned int cpu) 8237 { 8238 tsc_khz_changed(NULL); 8239 return 0; 8240 } 8241 8242 static void kvm_timer_init(void) 8243 { 8244 max_tsc_khz = tsc_khz; 8245 8246 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { 8247 #ifdef CONFIG_CPU_FREQ 8248 struct cpufreq_policy *policy; 8249 int cpu; 8250 8251 cpu = get_cpu(); 8252 policy = cpufreq_cpu_get(cpu); 8253 if (policy) { 8254 if (policy->cpuinfo.max_freq) 8255 max_tsc_khz = policy->cpuinfo.max_freq; 8256 cpufreq_cpu_put(policy); 8257 } 8258 put_cpu(); 8259 #endif 8260 cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block, 8261 CPUFREQ_TRANSITION_NOTIFIER); 8262 } 8263 8264 cpuhp_setup_state(CPUHP_AP_X86_KVM_CLK_ONLINE, "x86/kvm/clk:online", 8265 kvmclock_cpu_online, kvmclock_cpu_down_prep); 8266 } 8267 8268 DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu); 8269 EXPORT_PER_CPU_SYMBOL_GPL(current_vcpu); 8270 8271 int kvm_is_in_guest(void) 8272 { 8273 return __this_cpu_read(current_vcpu) != NULL; 8274 } 8275 8276 static int kvm_is_user_mode(void) 8277 { 8278 int user_mode = 3; 8279 8280 if (__this_cpu_read(current_vcpu)) 8281 user_mode = static_call(kvm_x86_get_cpl)(__this_cpu_read(current_vcpu)); 8282 8283 return user_mode != 0; 8284 } 8285 8286 static unsigned long kvm_get_guest_ip(void) 8287 { 8288 unsigned long ip = 0; 8289 8290 if (__this_cpu_read(current_vcpu)) 8291 ip = kvm_rip_read(__this_cpu_read(current_vcpu)); 8292 8293 return ip; 8294 } 8295 8296 static void kvm_handle_intel_pt_intr(void) 8297 { 8298 struct kvm_vcpu *vcpu = __this_cpu_read(current_vcpu); 8299 8300 kvm_make_request(KVM_REQ_PMI, vcpu); 8301 __set_bit(MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI_BIT, 8302 (unsigned long *)&vcpu->arch.pmu.global_status); 8303 } 8304 8305 static struct perf_guest_info_callbacks kvm_guest_cbs = { 8306 .is_in_guest = kvm_is_in_guest, 8307 .is_user_mode = kvm_is_user_mode, 8308 .get_guest_ip = kvm_get_guest_ip, 8309 .handle_intel_pt_intr = kvm_handle_intel_pt_intr, 8310 }; 8311 8312 #ifdef CONFIG_X86_64 8313 static void pvclock_gtod_update_fn(struct work_struct *work) 8314 { 8315 struct kvm *kvm; 8316 8317 struct kvm_vcpu *vcpu; 8318 int i; 8319 8320 mutex_lock(&kvm_lock); 8321 list_for_each_entry(kvm, &vm_list, vm_list) 8322 kvm_for_each_vcpu(i, vcpu, kvm) 8323 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); 8324 atomic_set(&kvm_guest_has_master_clock, 0); 8325 mutex_unlock(&kvm_lock); 8326 } 8327 8328 static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn); 8329 8330 /* 8331 * Indirection to move queue_work() out of the tk_core.seq write held 8332 * region to prevent possible deadlocks against time accessors which 8333 * are invoked with work related locks held. 8334 */ 8335 static void pvclock_irq_work_fn(struct irq_work *w) 8336 { 8337 queue_work(system_long_wq, &pvclock_gtod_work); 8338 } 8339 8340 static DEFINE_IRQ_WORK(pvclock_irq_work, pvclock_irq_work_fn); 8341 8342 /* 8343 * Notification about pvclock gtod data update. 8344 */ 8345 static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused, 8346 void *priv) 8347 { 8348 struct pvclock_gtod_data *gtod = &pvclock_gtod_data; 8349 struct timekeeper *tk = priv; 8350 8351 update_pvclock_gtod(tk); 8352 8353 /* 8354 * Disable master clock if host does not trust, or does not use, 8355 * TSC based clocksource. Delegate queue_work() to irq_work as 8356 * this is invoked with tk_core.seq write held. 8357 */ 8358 if (!gtod_is_based_on_tsc(gtod->clock.vclock_mode) && 8359 atomic_read(&kvm_guest_has_master_clock) != 0) 8360 irq_work_queue(&pvclock_irq_work); 8361 return 0; 8362 } 8363 8364 static struct notifier_block pvclock_gtod_notifier = { 8365 .notifier_call = pvclock_gtod_notify, 8366 }; 8367 #endif 8368 8369 int kvm_arch_init(void *opaque) 8370 { 8371 struct kvm_x86_init_ops *ops = opaque; 8372 int r; 8373 8374 if (kvm_x86_ops.hardware_enable) { 8375 printk(KERN_ERR "kvm: already loaded the other module\n"); 8376 r = -EEXIST; 8377 goto out; 8378 } 8379 8380 if (!ops->cpu_has_kvm_support()) { 8381 pr_err_ratelimited("kvm: no hardware support\n"); 8382 r = -EOPNOTSUPP; 8383 goto out; 8384 } 8385 if (ops->disabled_by_bios()) { 8386 pr_err_ratelimited("kvm: disabled by bios\n"); 8387 r = -EOPNOTSUPP; 8388 goto out; 8389 } 8390 8391 /* 8392 * KVM explicitly assumes that the guest has an FPU and 8393 * FXSAVE/FXRSTOR. For example, the KVM_GET_FPU explicitly casts the 8394 * vCPU's FPU state as a fxregs_state struct. 8395 */ 8396 if (!boot_cpu_has(X86_FEATURE_FPU) || !boot_cpu_has(X86_FEATURE_FXSR)) { 8397 printk(KERN_ERR "kvm: inadequate fpu\n"); 8398 r = -EOPNOTSUPP; 8399 goto out; 8400 } 8401 8402 r = -ENOMEM; 8403 x86_fpu_cache = kmem_cache_create("x86_fpu", sizeof(struct fpu), 8404 __alignof__(struct fpu), SLAB_ACCOUNT, 8405 NULL); 8406 if (!x86_fpu_cache) { 8407 printk(KERN_ERR "kvm: failed to allocate cache for x86 fpu\n"); 8408 goto out; 8409 } 8410 8411 x86_emulator_cache = kvm_alloc_emulator_cache(); 8412 if (!x86_emulator_cache) { 8413 pr_err("kvm: failed to allocate cache for x86 emulator\n"); 8414 goto out_free_x86_fpu_cache; 8415 } 8416 8417 user_return_msrs = alloc_percpu(struct kvm_user_return_msrs); 8418 if (!user_return_msrs) { 8419 printk(KERN_ERR "kvm: failed to allocate percpu kvm_user_return_msrs\n"); 8420 goto out_free_x86_emulator_cache; 8421 } 8422 kvm_nr_uret_msrs = 0; 8423 8424 r = kvm_mmu_module_init(); 8425 if (r) 8426 goto out_free_percpu; 8427 8428 kvm_timer_init(); 8429 8430 perf_register_guest_info_callbacks(&kvm_guest_cbs); 8431 8432 if (boot_cpu_has(X86_FEATURE_XSAVE)) { 8433 host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK); 8434 supported_xcr0 = host_xcr0 & KVM_SUPPORTED_XCR0; 8435 } 8436 8437 if (pi_inject_timer == -1) 8438 pi_inject_timer = housekeeping_enabled(HK_FLAG_TIMER); 8439 #ifdef CONFIG_X86_64 8440 pvclock_gtod_register_notifier(&pvclock_gtod_notifier); 8441 8442 if (hypervisor_is_type(X86_HYPER_MS_HYPERV)) 8443 set_hv_tscchange_cb(kvm_hyperv_tsc_notifier); 8444 #endif 8445 8446 return 0; 8447 8448 out_free_percpu: 8449 free_percpu(user_return_msrs); 8450 out_free_x86_emulator_cache: 8451 kmem_cache_destroy(x86_emulator_cache); 8452 out_free_x86_fpu_cache: 8453 kmem_cache_destroy(x86_fpu_cache); 8454 out: 8455 return r; 8456 } 8457 8458 void kvm_arch_exit(void) 8459 { 8460 #ifdef CONFIG_X86_64 8461 if (hypervisor_is_type(X86_HYPER_MS_HYPERV)) 8462 clear_hv_tscchange_cb(); 8463 #endif 8464 kvm_lapic_exit(); 8465 perf_unregister_guest_info_callbacks(&kvm_guest_cbs); 8466 8467 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) 8468 cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block, 8469 CPUFREQ_TRANSITION_NOTIFIER); 8470 cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE); 8471 #ifdef CONFIG_X86_64 8472 pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier); 8473 irq_work_sync(&pvclock_irq_work); 8474 cancel_work_sync(&pvclock_gtod_work); 8475 #endif 8476 kvm_x86_ops.hardware_enable = NULL; 8477 kvm_mmu_module_exit(); 8478 free_percpu(user_return_msrs); 8479 kmem_cache_destroy(x86_emulator_cache); 8480 kmem_cache_destroy(x86_fpu_cache); 8481 #ifdef CONFIG_KVM_XEN 8482 static_key_deferred_flush(&kvm_xen_enabled); 8483 WARN_ON(static_branch_unlikely(&kvm_xen_enabled.key)); 8484 #endif 8485 } 8486 8487 static int __kvm_vcpu_halt(struct kvm_vcpu *vcpu, int state, int reason) 8488 { 8489 ++vcpu->stat.halt_exits; 8490 if (lapic_in_kernel(vcpu)) { 8491 vcpu->arch.mp_state = state; 8492 return 1; 8493 } else { 8494 vcpu->run->exit_reason = reason; 8495 return 0; 8496 } 8497 } 8498 8499 int kvm_vcpu_halt(struct kvm_vcpu *vcpu) 8500 { 8501 return __kvm_vcpu_halt(vcpu, KVM_MP_STATE_HALTED, KVM_EXIT_HLT); 8502 } 8503 EXPORT_SYMBOL_GPL(kvm_vcpu_halt); 8504 8505 int kvm_emulate_halt(struct kvm_vcpu *vcpu) 8506 { 8507 int ret = kvm_skip_emulated_instruction(vcpu); 8508 /* 8509 * TODO: we might be squashing a GUESTDBG_SINGLESTEP-triggered 8510 * KVM_EXIT_DEBUG here. 8511 */ 8512 return kvm_vcpu_halt(vcpu) && ret; 8513 } 8514 EXPORT_SYMBOL_GPL(kvm_emulate_halt); 8515 8516 int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu) 8517 { 8518 int ret = kvm_skip_emulated_instruction(vcpu); 8519 8520 return __kvm_vcpu_halt(vcpu, KVM_MP_STATE_AP_RESET_HOLD, KVM_EXIT_AP_RESET_HOLD) && ret; 8521 } 8522 EXPORT_SYMBOL_GPL(kvm_emulate_ap_reset_hold); 8523 8524 #ifdef CONFIG_X86_64 8525 static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr, 8526 unsigned long clock_type) 8527 { 8528 struct kvm_clock_pairing clock_pairing; 8529 struct timespec64 ts; 8530 u64 cycle; 8531 int ret; 8532 8533 if (clock_type != KVM_CLOCK_PAIRING_WALLCLOCK) 8534 return -KVM_EOPNOTSUPP; 8535 8536 if (!kvm_get_walltime_and_clockread(&ts, &cycle)) 8537 return -KVM_EOPNOTSUPP; 8538 8539 clock_pairing.sec = ts.tv_sec; 8540 clock_pairing.nsec = ts.tv_nsec; 8541 clock_pairing.tsc = kvm_read_l1_tsc(vcpu, cycle); 8542 clock_pairing.flags = 0; 8543 memset(&clock_pairing.pad, 0, sizeof(clock_pairing.pad)); 8544 8545 ret = 0; 8546 if (kvm_write_guest(vcpu->kvm, paddr, &clock_pairing, 8547 sizeof(struct kvm_clock_pairing))) 8548 ret = -KVM_EFAULT; 8549 8550 return ret; 8551 } 8552 #endif 8553 8554 /* 8555 * kvm_pv_kick_cpu_op: Kick a vcpu. 8556 * 8557 * @apicid - apicid of vcpu to be kicked. 8558 */ 8559 static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid) 8560 { 8561 struct kvm_lapic_irq lapic_irq; 8562 8563 lapic_irq.shorthand = APIC_DEST_NOSHORT; 8564 lapic_irq.dest_mode = APIC_DEST_PHYSICAL; 8565 lapic_irq.level = 0; 8566 lapic_irq.dest_id = apicid; 8567 lapic_irq.msi_redir_hint = false; 8568 8569 lapic_irq.delivery_mode = APIC_DM_REMRD; 8570 kvm_irq_delivery_to_apic(kvm, NULL, &lapic_irq, NULL); 8571 } 8572 8573 bool kvm_apicv_activated(struct kvm *kvm) 8574 { 8575 return (READ_ONCE(kvm->arch.apicv_inhibit_reasons) == 0); 8576 } 8577 EXPORT_SYMBOL_GPL(kvm_apicv_activated); 8578 8579 static void kvm_apicv_init(struct kvm *kvm) 8580 { 8581 if (enable_apicv) 8582 clear_bit(APICV_INHIBIT_REASON_DISABLE, 8583 &kvm->arch.apicv_inhibit_reasons); 8584 else 8585 set_bit(APICV_INHIBIT_REASON_DISABLE, 8586 &kvm->arch.apicv_inhibit_reasons); 8587 } 8588 8589 static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id) 8590 { 8591 struct kvm_vcpu *target = NULL; 8592 struct kvm_apic_map *map; 8593 8594 vcpu->stat.directed_yield_attempted++; 8595 8596 if (single_task_running()) 8597 goto no_yield; 8598 8599 rcu_read_lock(); 8600 map = rcu_dereference(vcpu->kvm->arch.apic_map); 8601 8602 if (likely(map) && dest_id <= map->max_apic_id && map->phys_map[dest_id]) 8603 target = map->phys_map[dest_id]->vcpu; 8604 8605 rcu_read_unlock(); 8606 8607 if (!target || !READ_ONCE(target->ready)) 8608 goto no_yield; 8609 8610 /* Ignore requests to yield to self */ 8611 if (vcpu == target) 8612 goto no_yield; 8613 8614 if (kvm_vcpu_yield_to(target) <= 0) 8615 goto no_yield; 8616 8617 vcpu->stat.directed_yield_successful++; 8618 8619 no_yield: 8620 return; 8621 } 8622 8623 static int complete_hypercall_exit(struct kvm_vcpu *vcpu) 8624 { 8625 u64 ret = vcpu->run->hypercall.ret; 8626 8627 if (!is_64_bit_mode(vcpu)) 8628 ret = (u32)ret; 8629 kvm_rax_write(vcpu, ret); 8630 ++vcpu->stat.hypercalls; 8631 return kvm_skip_emulated_instruction(vcpu); 8632 } 8633 8634 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) 8635 { 8636 unsigned long nr, a0, a1, a2, a3, ret; 8637 int op_64_bit; 8638 8639 if (kvm_xen_hypercall_enabled(vcpu->kvm)) 8640 return kvm_xen_hypercall(vcpu); 8641 8642 if (kvm_hv_hypercall_enabled(vcpu)) 8643 return kvm_hv_hypercall(vcpu); 8644 8645 nr = kvm_rax_read(vcpu); 8646 a0 = kvm_rbx_read(vcpu); 8647 a1 = kvm_rcx_read(vcpu); 8648 a2 = kvm_rdx_read(vcpu); 8649 a3 = kvm_rsi_read(vcpu); 8650 8651 trace_kvm_hypercall(nr, a0, a1, a2, a3); 8652 8653 op_64_bit = is_64_bit_mode(vcpu); 8654 if (!op_64_bit) { 8655 nr &= 0xFFFFFFFF; 8656 a0 &= 0xFFFFFFFF; 8657 a1 &= 0xFFFFFFFF; 8658 a2 &= 0xFFFFFFFF; 8659 a3 &= 0xFFFFFFFF; 8660 } 8661 8662 if (static_call(kvm_x86_get_cpl)(vcpu) != 0) { 8663 ret = -KVM_EPERM; 8664 goto out; 8665 } 8666 8667 ret = -KVM_ENOSYS; 8668 8669 switch (nr) { 8670 case KVM_HC_VAPIC_POLL_IRQ: 8671 ret = 0; 8672 break; 8673 case KVM_HC_KICK_CPU: 8674 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_UNHALT)) 8675 break; 8676 8677 kvm_pv_kick_cpu_op(vcpu->kvm, a0, a1); 8678 kvm_sched_yield(vcpu, a1); 8679 ret = 0; 8680 break; 8681 #ifdef CONFIG_X86_64 8682 case KVM_HC_CLOCK_PAIRING: 8683 ret = kvm_pv_clock_pairing(vcpu, a0, a1); 8684 break; 8685 #endif 8686 case KVM_HC_SEND_IPI: 8687 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_SEND_IPI)) 8688 break; 8689 8690 ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit); 8691 break; 8692 case KVM_HC_SCHED_YIELD: 8693 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_SCHED_YIELD)) 8694 break; 8695 8696 kvm_sched_yield(vcpu, a0); 8697 ret = 0; 8698 break; 8699 case KVM_HC_MAP_GPA_RANGE: { 8700 u64 gpa = a0, npages = a1, attrs = a2; 8701 8702 ret = -KVM_ENOSYS; 8703 if (!(vcpu->kvm->arch.hypercall_exit_enabled & (1 << KVM_HC_MAP_GPA_RANGE))) 8704 break; 8705 8706 if (!PAGE_ALIGNED(gpa) || !npages || 8707 gpa_to_gfn(gpa) + npages <= gpa_to_gfn(gpa)) { 8708 ret = -KVM_EINVAL; 8709 break; 8710 } 8711 8712 vcpu->run->exit_reason = KVM_EXIT_HYPERCALL; 8713 vcpu->run->hypercall.nr = KVM_HC_MAP_GPA_RANGE; 8714 vcpu->run->hypercall.args[0] = gpa; 8715 vcpu->run->hypercall.args[1] = npages; 8716 vcpu->run->hypercall.args[2] = attrs; 8717 vcpu->run->hypercall.longmode = op_64_bit; 8718 vcpu->arch.complete_userspace_io = complete_hypercall_exit; 8719 return 0; 8720 } 8721 default: 8722 ret = -KVM_ENOSYS; 8723 break; 8724 } 8725 out: 8726 if (!op_64_bit) 8727 ret = (u32)ret; 8728 kvm_rax_write(vcpu, ret); 8729 8730 ++vcpu->stat.hypercalls; 8731 return kvm_skip_emulated_instruction(vcpu); 8732 } 8733 EXPORT_SYMBOL_GPL(kvm_emulate_hypercall); 8734 8735 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt) 8736 { 8737 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 8738 char instruction[3]; 8739 unsigned long rip = kvm_rip_read(vcpu); 8740 8741 static_call(kvm_x86_patch_hypercall)(vcpu, instruction); 8742 8743 return emulator_write_emulated(ctxt, rip, instruction, 3, 8744 &ctxt->exception); 8745 } 8746 8747 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) 8748 { 8749 return vcpu->run->request_interrupt_window && 8750 likely(!pic_in_kernel(vcpu->kvm)); 8751 } 8752 8753 static void post_kvm_run_save(struct kvm_vcpu *vcpu) 8754 { 8755 struct kvm_run *kvm_run = vcpu->run; 8756 8757 /* 8758 * if_flag is obsolete and useless, so do not bother 8759 * setting it for SEV-ES guests. Userspace can just 8760 * use kvm_run->ready_for_interrupt_injection. 8761 */ 8762 kvm_run->if_flag = !vcpu->arch.guest_state_protected 8763 && (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0; 8764 8765 kvm_run->cr8 = kvm_get_cr8(vcpu); 8766 kvm_run->apic_base = kvm_get_apic_base(vcpu); 8767 kvm_run->ready_for_interrupt_injection = 8768 pic_in_kernel(vcpu->kvm) || 8769 kvm_vcpu_ready_for_interrupt_injection(vcpu); 8770 8771 if (is_smm(vcpu)) 8772 kvm_run->flags |= KVM_RUN_X86_SMM; 8773 } 8774 8775 static void update_cr8_intercept(struct kvm_vcpu *vcpu) 8776 { 8777 int max_irr, tpr; 8778 8779 if (!kvm_x86_ops.update_cr8_intercept) 8780 return; 8781 8782 if (!lapic_in_kernel(vcpu)) 8783 return; 8784 8785 if (vcpu->arch.apicv_active) 8786 return; 8787 8788 if (!vcpu->arch.apic->vapic_addr) 8789 max_irr = kvm_lapic_find_highest_irr(vcpu); 8790 else 8791 max_irr = -1; 8792 8793 if (max_irr != -1) 8794 max_irr >>= 4; 8795 8796 tpr = kvm_lapic_get_cr8(vcpu); 8797 8798 static_call(kvm_x86_update_cr8_intercept)(vcpu, tpr, max_irr); 8799 } 8800 8801 8802 int kvm_check_nested_events(struct kvm_vcpu *vcpu) 8803 { 8804 if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) { 8805 kvm_x86_ops.nested_ops->triple_fault(vcpu); 8806 return 1; 8807 } 8808 8809 return kvm_x86_ops.nested_ops->check_events(vcpu); 8810 } 8811 8812 static void kvm_inject_exception(struct kvm_vcpu *vcpu) 8813 { 8814 if (vcpu->arch.exception.error_code && !is_protmode(vcpu)) 8815 vcpu->arch.exception.error_code = false; 8816 static_call(kvm_x86_queue_exception)(vcpu); 8817 } 8818 8819 static int inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit) 8820 { 8821 int r; 8822 bool can_inject = true; 8823 8824 /* try to reinject previous events if any */ 8825 8826 if (vcpu->arch.exception.injected) { 8827 kvm_inject_exception(vcpu); 8828 can_inject = false; 8829 } 8830 /* 8831 * Do not inject an NMI or interrupt if there is a pending 8832 * exception. Exceptions and interrupts are recognized at 8833 * instruction boundaries, i.e. the start of an instruction. 8834 * Trap-like exceptions, e.g. #DB, have higher priority than 8835 * NMIs and interrupts, i.e. traps are recognized before an 8836 * NMI/interrupt that's pending on the same instruction. 8837 * Fault-like exceptions, e.g. #GP and #PF, are the lowest 8838 * priority, but are only generated (pended) during instruction 8839 * execution, i.e. a pending fault-like exception means the 8840 * fault occurred on the *previous* instruction and must be 8841 * serviced prior to recognizing any new events in order to 8842 * fully complete the previous instruction. 8843 */ 8844 else if (!vcpu->arch.exception.pending) { 8845 if (vcpu->arch.nmi_injected) { 8846 static_call(kvm_x86_set_nmi)(vcpu); 8847 can_inject = false; 8848 } else if (vcpu->arch.interrupt.injected) { 8849 static_call(kvm_x86_set_irq)(vcpu); 8850 can_inject = false; 8851 } 8852 } 8853 8854 WARN_ON_ONCE(vcpu->arch.exception.injected && 8855 vcpu->arch.exception.pending); 8856 8857 /* 8858 * Call check_nested_events() even if we reinjected a previous event 8859 * in order for caller to determine if it should require immediate-exit 8860 * from L2 to L1 due to pending L1 events which require exit 8861 * from L2 to L1. 8862 */ 8863 if (is_guest_mode(vcpu)) { 8864 r = kvm_check_nested_events(vcpu); 8865 if (r < 0) 8866 goto out; 8867 } 8868 8869 /* try to inject new event if pending */ 8870 if (vcpu->arch.exception.pending) { 8871 trace_kvm_inj_exception(vcpu->arch.exception.nr, 8872 vcpu->arch.exception.has_error_code, 8873 vcpu->arch.exception.error_code); 8874 8875 vcpu->arch.exception.pending = false; 8876 vcpu->arch.exception.injected = true; 8877 8878 if (exception_type(vcpu->arch.exception.nr) == EXCPT_FAULT) 8879 __kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) | 8880 X86_EFLAGS_RF); 8881 8882 if (vcpu->arch.exception.nr == DB_VECTOR) { 8883 kvm_deliver_exception_payload(vcpu); 8884 if (vcpu->arch.dr7 & DR7_GD) { 8885 vcpu->arch.dr7 &= ~DR7_GD; 8886 kvm_update_dr7(vcpu); 8887 } 8888 } 8889 8890 kvm_inject_exception(vcpu); 8891 can_inject = false; 8892 } 8893 8894 /* 8895 * Finally, inject interrupt events. If an event cannot be injected 8896 * due to architectural conditions (e.g. IF=0) a window-open exit 8897 * will re-request KVM_REQ_EVENT. Sometimes however an event is pending 8898 * and can architecturally be injected, but we cannot do it right now: 8899 * an interrupt could have arrived just now and we have to inject it 8900 * as a vmexit, or there could already an event in the queue, which is 8901 * indicated by can_inject. In that case we request an immediate exit 8902 * in order to make progress and get back here for another iteration. 8903 * The kvm_x86_ops hooks communicate this by returning -EBUSY. 8904 */ 8905 if (vcpu->arch.smi_pending) { 8906 r = can_inject ? static_call(kvm_x86_smi_allowed)(vcpu, true) : -EBUSY; 8907 if (r < 0) 8908 goto out; 8909 if (r) { 8910 vcpu->arch.smi_pending = false; 8911 ++vcpu->arch.smi_count; 8912 enter_smm(vcpu); 8913 can_inject = false; 8914 } else 8915 static_call(kvm_x86_enable_smi_window)(vcpu); 8916 } 8917 8918 if (vcpu->arch.nmi_pending) { 8919 r = can_inject ? static_call(kvm_x86_nmi_allowed)(vcpu, true) : -EBUSY; 8920 if (r < 0) 8921 goto out; 8922 if (r) { 8923 --vcpu->arch.nmi_pending; 8924 vcpu->arch.nmi_injected = true; 8925 static_call(kvm_x86_set_nmi)(vcpu); 8926 can_inject = false; 8927 WARN_ON(static_call(kvm_x86_nmi_allowed)(vcpu, true) < 0); 8928 } 8929 if (vcpu->arch.nmi_pending) 8930 static_call(kvm_x86_enable_nmi_window)(vcpu); 8931 } 8932 8933 if (kvm_cpu_has_injectable_intr(vcpu)) { 8934 r = can_inject ? static_call(kvm_x86_interrupt_allowed)(vcpu, true) : -EBUSY; 8935 if (r < 0) 8936 goto out; 8937 if (r) { 8938 kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), false); 8939 static_call(kvm_x86_set_irq)(vcpu); 8940 WARN_ON(static_call(kvm_x86_interrupt_allowed)(vcpu, true) < 0); 8941 } 8942 if (kvm_cpu_has_injectable_intr(vcpu)) 8943 static_call(kvm_x86_enable_irq_window)(vcpu); 8944 } 8945 8946 if (is_guest_mode(vcpu) && 8947 kvm_x86_ops.nested_ops->hv_timer_pending && 8948 kvm_x86_ops.nested_ops->hv_timer_pending(vcpu)) 8949 *req_immediate_exit = true; 8950 8951 WARN_ON(vcpu->arch.exception.pending); 8952 return 0; 8953 8954 out: 8955 if (r == -EBUSY) { 8956 *req_immediate_exit = true; 8957 r = 0; 8958 } 8959 return r; 8960 } 8961 8962 static void process_nmi(struct kvm_vcpu *vcpu) 8963 { 8964 unsigned limit = 2; 8965 8966 /* 8967 * x86 is limited to one NMI running, and one NMI pending after it. 8968 * If an NMI is already in progress, limit further NMIs to just one. 8969 * Otherwise, allow two (and we'll inject the first one immediately). 8970 */ 8971 if (static_call(kvm_x86_get_nmi_mask)(vcpu) || vcpu->arch.nmi_injected) 8972 limit = 1; 8973 8974 vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0); 8975 vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit); 8976 kvm_make_request(KVM_REQ_EVENT, vcpu); 8977 } 8978 8979 static u32 enter_smm_get_segment_flags(struct kvm_segment *seg) 8980 { 8981 u32 flags = 0; 8982 flags |= seg->g << 23; 8983 flags |= seg->db << 22; 8984 flags |= seg->l << 21; 8985 flags |= seg->avl << 20; 8986 flags |= seg->present << 15; 8987 flags |= seg->dpl << 13; 8988 flags |= seg->s << 12; 8989 flags |= seg->type << 8; 8990 return flags; 8991 } 8992 8993 static void enter_smm_save_seg_32(struct kvm_vcpu *vcpu, char *buf, int n) 8994 { 8995 struct kvm_segment seg; 8996 int offset; 8997 8998 kvm_get_segment(vcpu, &seg, n); 8999 put_smstate(u32, buf, 0x7fa8 + n * 4, seg.selector); 9000 9001 if (n < 3) 9002 offset = 0x7f84 + n * 12; 9003 else 9004 offset = 0x7f2c + (n - 3) * 12; 9005 9006 put_smstate(u32, buf, offset + 8, seg.base); 9007 put_smstate(u32, buf, offset + 4, seg.limit); 9008 put_smstate(u32, buf, offset, enter_smm_get_segment_flags(&seg)); 9009 } 9010 9011 #ifdef CONFIG_X86_64 9012 static void enter_smm_save_seg_64(struct kvm_vcpu *vcpu, char *buf, int n) 9013 { 9014 struct kvm_segment seg; 9015 int offset; 9016 u16 flags; 9017 9018 kvm_get_segment(vcpu, &seg, n); 9019 offset = 0x7e00 + n * 16; 9020 9021 flags = enter_smm_get_segment_flags(&seg) >> 8; 9022 put_smstate(u16, buf, offset, seg.selector); 9023 put_smstate(u16, buf, offset + 2, flags); 9024 put_smstate(u32, buf, offset + 4, seg.limit); 9025 put_smstate(u64, buf, offset + 8, seg.base); 9026 } 9027 #endif 9028 9029 static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf) 9030 { 9031 struct desc_ptr dt; 9032 struct kvm_segment seg; 9033 unsigned long val; 9034 int i; 9035 9036 put_smstate(u32, buf, 0x7ffc, kvm_read_cr0(vcpu)); 9037 put_smstate(u32, buf, 0x7ff8, kvm_read_cr3(vcpu)); 9038 put_smstate(u32, buf, 0x7ff4, kvm_get_rflags(vcpu)); 9039 put_smstate(u32, buf, 0x7ff0, kvm_rip_read(vcpu)); 9040 9041 for (i = 0; i < 8; i++) 9042 put_smstate(u32, buf, 0x7fd0 + i * 4, kvm_register_read_raw(vcpu, i)); 9043 9044 kvm_get_dr(vcpu, 6, &val); 9045 put_smstate(u32, buf, 0x7fcc, (u32)val); 9046 kvm_get_dr(vcpu, 7, &val); 9047 put_smstate(u32, buf, 0x7fc8, (u32)val); 9048 9049 kvm_get_segment(vcpu, &seg, VCPU_SREG_TR); 9050 put_smstate(u32, buf, 0x7fc4, seg.selector); 9051 put_smstate(u32, buf, 0x7f64, seg.base); 9052 put_smstate(u32, buf, 0x7f60, seg.limit); 9053 put_smstate(u32, buf, 0x7f5c, enter_smm_get_segment_flags(&seg)); 9054 9055 kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR); 9056 put_smstate(u32, buf, 0x7fc0, seg.selector); 9057 put_smstate(u32, buf, 0x7f80, seg.base); 9058 put_smstate(u32, buf, 0x7f7c, seg.limit); 9059 put_smstate(u32, buf, 0x7f78, enter_smm_get_segment_flags(&seg)); 9060 9061 static_call(kvm_x86_get_gdt)(vcpu, &dt); 9062 put_smstate(u32, buf, 0x7f74, dt.address); 9063 put_smstate(u32, buf, 0x7f70, dt.size); 9064 9065 static_call(kvm_x86_get_idt)(vcpu, &dt); 9066 put_smstate(u32, buf, 0x7f58, dt.address); 9067 put_smstate(u32, buf, 0x7f54, dt.size); 9068 9069 for (i = 0; i < 6; i++) 9070 enter_smm_save_seg_32(vcpu, buf, i); 9071 9072 put_smstate(u32, buf, 0x7f14, kvm_read_cr4(vcpu)); 9073 9074 /* revision id */ 9075 put_smstate(u32, buf, 0x7efc, 0x00020000); 9076 put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase); 9077 } 9078 9079 #ifdef CONFIG_X86_64 9080 static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf) 9081 { 9082 struct desc_ptr dt; 9083 struct kvm_segment seg; 9084 unsigned long val; 9085 int i; 9086 9087 for (i = 0; i < 16; i++) 9088 put_smstate(u64, buf, 0x7ff8 - i * 8, kvm_register_read_raw(vcpu, i)); 9089 9090 put_smstate(u64, buf, 0x7f78, kvm_rip_read(vcpu)); 9091 put_smstate(u32, buf, 0x7f70, kvm_get_rflags(vcpu)); 9092 9093 kvm_get_dr(vcpu, 6, &val); 9094 put_smstate(u64, buf, 0x7f68, val); 9095 kvm_get_dr(vcpu, 7, &val); 9096 put_smstate(u64, buf, 0x7f60, val); 9097 9098 put_smstate(u64, buf, 0x7f58, kvm_read_cr0(vcpu)); 9099 put_smstate(u64, buf, 0x7f50, kvm_read_cr3(vcpu)); 9100 put_smstate(u64, buf, 0x7f48, kvm_read_cr4(vcpu)); 9101 9102 put_smstate(u32, buf, 0x7f00, vcpu->arch.smbase); 9103 9104 /* revision id */ 9105 put_smstate(u32, buf, 0x7efc, 0x00020064); 9106 9107 put_smstate(u64, buf, 0x7ed0, vcpu->arch.efer); 9108 9109 kvm_get_segment(vcpu, &seg, VCPU_SREG_TR); 9110 put_smstate(u16, buf, 0x7e90, seg.selector); 9111 put_smstate(u16, buf, 0x7e92, enter_smm_get_segment_flags(&seg) >> 8); 9112 put_smstate(u32, buf, 0x7e94, seg.limit); 9113 put_smstate(u64, buf, 0x7e98, seg.base); 9114 9115 static_call(kvm_x86_get_idt)(vcpu, &dt); 9116 put_smstate(u32, buf, 0x7e84, dt.size); 9117 put_smstate(u64, buf, 0x7e88, dt.address); 9118 9119 kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR); 9120 put_smstate(u16, buf, 0x7e70, seg.selector); 9121 put_smstate(u16, buf, 0x7e72, enter_smm_get_segment_flags(&seg) >> 8); 9122 put_smstate(u32, buf, 0x7e74, seg.limit); 9123 put_smstate(u64, buf, 0x7e78, seg.base); 9124 9125 static_call(kvm_x86_get_gdt)(vcpu, &dt); 9126 put_smstate(u32, buf, 0x7e64, dt.size); 9127 put_smstate(u64, buf, 0x7e68, dt.address); 9128 9129 for (i = 0; i < 6; i++) 9130 enter_smm_save_seg_64(vcpu, buf, i); 9131 } 9132 #endif 9133 9134 static void enter_smm(struct kvm_vcpu *vcpu) 9135 { 9136 struct kvm_segment cs, ds; 9137 struct desc_ptr dt; 9138 unsigned long cr0; 9139 char buf[512]; 9140 9141 memset(buf, 0, 512); 9142 #ifdef CONFIG_X86_64 9143 if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) 9144 enter_smm_save_state_64(vcpu, buf); 9145 else 9146 #endif 9147 enter_smm_save_state_32(vcpu, buf); 9148 9149 /* 9150 * Give enter_smm() a chance to make ISA-specific changes to the vCPU 9151 * state (e.g. leave guest mode) after we've saved the state into the 9152 * SMM state-save area. 9153 */ 9154 static_call(kvm_x86_enter_smm)(vcpu, buf); 9155 9156 kvm_smm_changed(vcpu, true); 9157 kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf)); 9158 9159 if (static_call(kvm_x86_get_nmi_mask)(vcpu)) 9160 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; 9161 else 9162 static_call(kvm_x86_set_nmi_mask)(vcpu, true); 9163 9164 kvm_set_rflags(vcpu, X86_EFLAGS_FIXED); 9165 kvm_rip_write(vcpu, 0x8000); 9166 9167 cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG); 9168 static_call(kvm_x86_set_cr0)(vcpu, cr0); 9169 vcpu->arch.cr0 = cr0; 9170 9171 static_call(kvm_x86_set_cr4)(vcpu, 0); 9172 9173 /* Undocumented: IDT limit is set to zero on entry to SMM. */ 9174 dt.address = dt.size = 0; 9175 static_call(kvm_x86_set_idt)(vcpu, &dt); 9176 9177 kvm_set_dr(vcpu, 7, DR7_FIXED_1); 9178 9179 cs.selector = (vcpu->arch.smbase >> 4) & 0xffff; 9180 cs.base = vcpu->arch.smbase; 9181 9182 ds.selector = 0; 9183 ds.base = 0; 9184 9185 cs.limit = ds.limit = 0xffffffff; 9186 cs.type = ds.type = 0x3; 9187 cs.dpl = ds.dpl = 0; 9188 cs.db = ds.db = 0; 9189 cs.s = ds.s = 1; 9190 cs.l = ds.l = 0; 9191 cs.g = ds.g = 1; 9192 cs.avl = ds.avl = 0; 9193 cs.present = ds.present = 1; 9194 cs.unusable = ds.unusable = 0; 9195 cs.padding = ds.padding = 0; 9196 9197 kvm_set_segment(vcpu, &cs, VCPU_SREG_CS); 9198 kvm_set_segment(vcpu, &ds, VCPU_SREG_DS); 9199 kvm_set_segment(vcpu, &ds, VCPU_SREG_ES); 9200 kvm_set_segment(vcpu, &ds, VCPU_SREG_FS); 9201 kvm_set_segment(vcpu, &ds, VCPU_SREG_GS); 9202 kvm_set_segment(vcpu, &ds, VCPU_SREG_SS); 9203 9204 #ifdef CONFIG_X86_64 9205 if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) 9206 static_call(kvm_x86_set_efer)(vcpu, 0); 9207 #endif 9208 9209 kvm_update_cpuid_runtime(vcpu); 9210 kvm_mmu_reset_context(vcpu); 9211 } 9212 9213 static void process_smi(struct kvm_vcpu *vcpu) 9214 { 9215 vcpu->arch.smi_pending = true; 9216 kvm_make_request(KVM_REQ_EVENT, vcpu); 9217 } 9218 9219 void kvm_make_scan_ioapic_request_mask(struct kvm *kvm, 9220 unsigned long *vcpu_bitmap) 9221 { 9222 cpumask_var_t cpus; 9223 9224 zalloc_cpumask_var(&cpus, GFP_ATOMIC); 9225 9226 kvm_make_vcpus_request_mask(kvm, KVM_REQ_SCAN_IOAPIC, 9227 NULL, vcpu_bitmap, cpus); 9228 9229 free_cpumask_var(cpus); 9230 } 9231 9232 void kvm_make_scan_ioapic_request(struct kvm *kvm) 9233 { 9234 kvm_make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC); 9235 } 9236 9237 void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu) 9238 { 9239 if (!lapic_in_kernel(vcpu)) 9240 return; 9241 9242 vcpu->arch.apicv_active = kvm_apicv_activated(vcpu->kvm); 9243 kvm_apic_update_apicv(vcpu); 9244 static_call(kvm_x86_refresh_apicv_exec_ctrl)(vcpu); 9245 9246 /* 9247 * When APICv gets disabled, we may still have injected interrupts 9248 * pending. At the same time, KVM_REQ_EVENT may not be set as APICv was 9249 * still active when the interrupt got accepted. Make sure 9250 * inject_pending_event() is called to check for that. 9251 */ 9252 if (!vcpu->arch.apicv_active) 9253 kvm_make_request(KVM_REQ_EVENT, vcpu); 9254 } 9255 EXPORT_SYMBOL_GPL(kvm_vcpu_update_apicv); 9256 9257 /* 9258 * NOTE: Do not hold any lock prior to calling this. 9259 * 9260 * In particular, kvm_request_apicv_update() expects kvm->srcu not to be 9261 * locked, because it calls __x86_set_memory_region() which does 9262 * synchronize_srcu(&kvm->srcu). 9263 */ 9264 void kvm_request_apicv_update(struct kvm *kvm, bool activate, ulong bit) 9265 { 9266 struct kvm_vcpu *except; 9267 unsigned long old, new, expected; 9268 9269 if (!kvm_x86_ops.check_apicv_inhibit_reasons || 9270 !static_call(kvm_x86_check_apicv_inhibit_reasons)(bit)) 9271 return; 9272 9273 old = READ_ONCE(kvm->arch.apicv_inhibit_reasons); 9274 do { 9275 expected = new = old; 9276 if (activate) 9277 __clear_bit(bit, &new); 9278 else 9279 __set_bit(bit, &new); 9280 if (new == old) 9281 break; 9282 old = cmpxchg(&kvm->arch.apicv_inhibit_reasons, expected, new); 9283 } while (old != expected); 9284 9285 if (!!old == !!new) 9286 return; 9287 9288 trace_kvm_apicv_update_request(activate, bit); 9289 if (kvm_x86_ops.pre_update_apicv_exec_ctrl) 9290 static_call(kvm_x86_pre_update_apicv_exec_ctrl)(kvm, activate); 9291 9292 /* 9293 * Sending request to update APICV for all other vcpus, 9294 * while update the calling vcpu immediately instead of 9295 * waiting for another #VMEXIT to handle the request. 9296 */ 9297 except = kvm_get_running_vcpu(); 9298 kvm_make_all_cpus_request_except(kvm, KVM_REQ_APICV_UPDATE, 9299 except); 9300 if (except) 9301 kvm_vcpu_update_apicv(except); 9302 } 9303 EXPORT_SYMBOL_GPL(kvm_request_apicv_update); 9304 9305 static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) 9306 { 9307 if (!kvm_apic_present(vcpu)) 9308 return; 9309 9310 bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256); 9311 9312 if (irqchip_split(vcpu->kvm)) 9313 kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors); 9314 else { 9315 if (vcpu->arch.apicv_active) 9316 static_call(kvm_x86_sync_pir_to_irr)(vcpu); 9317 if (ioapic_in_kernel(vcpu->kvm)) 9318 kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors); 9319 } 9320 9321 if (is_guest_mode(vcpu)) 9322 vcpu->arch.load_eoi_exitmap_pending = true; 9323 else 9324 kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu); 9325 } 9326 9327 static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu) 9328 { 9329 u64 eoi_exit_bitmap[4]; 9330 9331 if (!kvm_apic_hw_enabled(vcpu->arch.apic)) 9332 return; 9333 9334 if (to_hv_vcpu(vcpu)) 9335 bitmap_or((ulong *)eoi_exit_bitmap, 9336 vcpu->arch.ioapic_handled_vectors, 9337 to_hv_synic(vcpu)->vec_bitmap, 256); 9338 9339 static_call(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap); 9340 } 9341 9342 void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, 9343 unsigned long start, unsigned long end) 9344 { 9345 unsigned long apic_address; 9346 9347 /* 9348 * The physical address of apic access page is stored in the VMCS. 9349 * Update it when it becomes invalid. 9350 */ 9351 apic_address = gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT); 9352 if (start <= apic_address && apic_address < end) 9353 kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD); 9354 } 9355 9356 void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) 9357 { 9358 if (!lapic_in_kernel(vcpu)) 9359 return; 9360 9361 if (!kvm_x86_ops.set_apic_access_page_addr) 9362 return; 9363 9364 static_call(kvm_x86_set_apic_access_page_addr)(vcpu); 9365 } 9366 9367 void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu) 9368 { 9369 smp_send_reschedule(vcpu->cpu); 9370 } 9371 EXPORT_SYMBOL_GPL(__kvm_request_immediate_exit); 9372 9373 /* 9374 * Returns 1 to let vcpu_run() continue the guest execution loop without 9375 * exiting to the userspace. Otherwise, the value will be returned to the 9376 * userspace. 9377 */ 9378 static int vcpu_enter_guest(struct kvm_vcpu *vcpu) 9379 { 9380 int r; 9381 bool req_int_win = 9382 dm_request_for_irq_injection(vcpu) && 9383 kvm_cpu_accept_dm_intr(vcpu); 9384 fastpath_t exit_fastpath; 9385 9386 bool req_immediate_exit = false; 9387 9388 /* Forbid vmenter if vcpu dirty ring is soft-full */ 9389 if (unlikely(vcpu->kvm->dirty_ring_size && 9390 kvm_dirty_ring_soft_full(&vcpu->dirty_ring))) { 9391 vcpu->run->exit_reason = KVM_EXIT_DIRTY_RING_FULL; 9392 trace_kvm_dirty_ring_exit(vcpu); 9393 r = 0; 9394 goto out; 9395 } 9396 9397 if (kvm_request_pending(vcpu)) { 9398 if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) { 9399 if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) { 9400 r = 0; 9401 goto out; 9402 } 9403 } 9404 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) 9405 kvm_mmu_unload(vcpu); 9406 if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu)) 9407 __kvm_migrate_timers(vcpu); 9408 if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu)) 9409 kvm_gen_update_masterclock(vcpu->kvm); 9410 if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu)) 9411 kvm_gen_kvmclock_update(vcpu); 9412 if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) { 9413 r = kvm_guest_time_update(vcpu); 9414 if (unlikely(r)) 9415 goto out; 9416 } 9417 if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu)) 9418 kvm_mmu_sync_roots(vcpu); 9419 if (kvm_check_request(KVM_REQ_LOAD_MMU_PGD, vcpu)) 9420 kvm_mmu_load_pgd(vcpu); 9421 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { 9422 kvm_vcpu_flush_tlb_all(vcpu); 9423 9424 /* Flushing all ASIDs flushes the current ASID... */ 9425 kvm_clear_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); 9426 } 9427 if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu)) 9428 kvm_vcpu_flush_tlb_current(vcpu); 9429 if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu)) 9430 kvm_vcpu_flush_tlb_guest(vcpu); 9431 9432 if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) { 9433 vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS; 9434 r = 0; 9435 goto out; 9436 } 9437 if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) { 9438 if (is_guest_mode(vcpu)) { 9439 kvm_x86_ops.nested_ops->triple_fault(vcpu); 9440 } else { 9441 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; 9442 vcpu->mmio_needed = 0; 9443 r = 0; 9444 goto out; 9445 } 9446 } 9447 if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) { 9448 /* Page is swapped out. Do synthetic halt */ 9449 vcpu->arch.apf.halted = true; 9450 r = 1; 9451 goto out; 9452 } 9453 if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu)) 9454 record_steal_time(vcpu); 9455 if (kvm_check_request(KVM_REQ_SMI, vcpu)) 9456 process_smi(vcpu); 9457 if (kvm_check_request(KVM_REQ_NMI, vcpu)) 9458 process_nmi(vcpu); 9459 if (kvm_check_request(KVM_REQ_PMU, vcpu)) 9460 kvm_pmu_handle_event(vcpu); 9461 if (kvm_check_request(KVM_REQ_PMI, vcpu)) 9462 kvm_pmu_deliver_pmi(vcpu); 9463 if (kvm_check_request(KVM_REQ_IOAPIC_EOI_EXIT, vcpu)) { 9464 BUG_ON(vcpu->arch.pending_ioapic_eoi > 255); 9465 if (test_bit(vcpu->arch.pending_ioapic_eoi, 9466 vcpu->arch.ioapic_handled_vectors)) { 9467 vcpu->run->exit_reason = KVM_EXIT_IOAPIC_EOI; 9468 vcpu->run->eoi.vector = 9469 vcpu->arch.pending_ioapic_eoi; 9470 r = 0; 9471 goto out; 9472 } 9473 } 9474 if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu)) 9475 vcpu_scan_ioapic(vcpu); 9476 if (kvm_check_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu)) 9477 vcpu_load_eoi_exitmap(vcpu); 9478 if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu)) 9479 kvm_vcpu_reload_apic_access_page(vcpu); 9480 if (kvm_check_request(KVM_REQ_HV_CRASH, vcpu)) { 9481 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; 9482 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_CRASH; 9483 r = 0; 9484 goto out; 9485 } 9486 if (kvm_check_request(KVM_REQ_HV_RESET, vcpu)) { 9487 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; 9488 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_RESET; 9489 r = 0; 9490 goto out; 9491 } 9492 if (kvm_check_request(KVM_REQ_HV_EXIT, vcpu)) { 9493 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); 9494 9495 vcpu->run->exit_reason = KVM_EXIT_HYPERV; 9496 vcpu->run->hyperv = hv_vcpu->exit; 9497 r = 0; 9498 goto out; 9499 } 9500 9501 /* 9502 * KVM_REQ_HV_STIMER has to be processed after 9503 * KVM_REQ_CLOCK_UPDATE, because Hyper-V SynIC timers 9504 * depend on the guest clock being up-to-date 9505 */ 9506 if (kvm_check_request(KVM_REQ_HV_STIMER, vcpu)) 9507 kvm_hv_process_stimers(vcpu); 9508 if (kvm_check_request(KVM_REQ_APICV_UPDATE, vcpu)) 9509 kvm_vcpu_update_apicv(vcpu); 9510 if (kvm_check_request(KVM_REQ_APF_READY, vcpu)) 9511 kvm_check_async_pf_completion(vcpu); 9512 if (kvm_check_request(KVM_REQ_MSR_FILTER_CHANGED, vcpu)) 9513 static_call(kvm_x86_msr_filter_changed)(vcpu); 9514 9515 if (kvm_check_request(KVM_REQ_UPDATE_CPU_DIRTY_LOGGING, vcpu)) 9516 static_call(kvm_x86_update_cpu_dirty_logging)(vcpu); 9517 } 9518 9519 if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win || 9520 kvm_xen_has_interrupt(vcpu)) { 9521 ++vcpu->stat.req_event; 9522 r = kvm_apic_accept_events(vcpu); 9523 if (r < 0) { 9524 r = 0; 9525 goto out; 9526 } 9527 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { 9528 r = 1; 9529 goto out; 9530 } 9531 9532 r = inject_pending_event(vcpu, &req_immediate_exit); 9533 if (r < 0) { 9534 r = 0; 9535 goto out; 9536 } 9537 if (req_int_win) 9538 static_call(kvm_x86_enable_irq_window)(vcpu); 9539 9540 if (kvm_lapic_enabled(vcpu)) { 9541 update_cr8_intercept(vcpu); 9542 kvm_lapic_sync_to_vapic(vcpu); 9543 } 9544 } 9545 9546 r = kvm_mmu_reload(vcpu); 9547 if (unlikely(r)) { 9548 goto cancel_injection; 9549 } 9550 9551 preempt_disable(); 9552 9553 static_call(kvm_x86_prepare_guest_switch)(vcpu); 9554 9555 /* 9556 * Disable IRQs before setting IN_GUEST_MODE. Posted interrupt 9557 * IPI are then delayed after guest entry, which ensures that they 9558 * result in virtual interrupt delivery. 9559 */ 9560 local_irq_disable(); 9561 vcpu->mode = IN_GUEST_MODE; 9562 9563 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 9564 9565 /* 9566 * 1) We should set ->mode before checking ->requests. Please see 9567 * the comment in kvm_vcpu_exiting_guest_mode(). 9568 * 9569 * 2) For APICv, we should set ->mode before checking PID.ON. This 9570 * pairs with the memory barrier implicit in pi_test_and_set_on 9571 * (see vmx_deliver_posted_interrupt). 9572 * 9573 * 3) This also orders the write to mode from any reads to the page 9574 * tables done while the VCPU is running. Please see the comment 9575 * in kvm_flush_remote_tlbs. 9576 */ 9577 smp_mb__after_srcu_read_unlock(); 9578 9579 /* 9580 * This handles the case where a posted interrupt was 9581 * notified with kvm_vcpu_kick. 9582 */ 9583 if (kvm_lapic_enabled(vcpu) && vcpu->arch.apicv_active) 9584 static_call(kvm_x86_sync_pir_to_irr)(vcpu); 9585 9586 if (kvm_vcpu_exit_request(vcpu)) { 9587 vcpu->mode = OUTSIDE_GUEST_MODE; 9588 smp_wmb(); 9589 local_irq_enable(); 9590 preempt_enable(); 9591 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 9592 r = 1; 9593 goto cancel_injection; 9594 } 9595 9596 if (req_immediate_exit) { 9597 kvm_make_request(KVM_REQ_EVENT, vcpu); 9598 static_call(kvm_x86_request_immediate_exit)(vcpu); 9599 } 9600 9601 fpregs_assert_state_consistent(); 9602 if (test_thread_flag(TIF_NEED_FPU_LOAD)) 9603 switch_fpu_return(); 9604 9605 if (unlikely(vcpu->arch.switch_db_regs)) { 9606 set_debugreg(0, 7); 9607 set_debugreg(vcpu->arch.eff_db[0], 0); 9608 set_debugreg(vcpu->arch.eff_db[1], 1); 9609 set_debugreg(vcpu->arch.eff_db[2], 2); 9610 set_debugreg(vcpu->arch.eff_db[3], 3); 9611 set_debugreg(vcpu->arch.dr6, 6); 9612 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; 9613 } else if (unlikely(hw_breakpoint_active())) { 9614 set_debugreg(0, 7); 9615 } 9616 9617 for (;;) { 9618 exit_fastpath = static_call(kvm_x86_run)(vcpu); 9619 if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST)) 9620 break; 9621 9622 if (unlikely(kvm_vcpu_exit_request(vcpu))) { 9623 exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED; 9624 break; 9625 } 9626 9627 if (vcpu->arch.apicv_active) 9628 static_call(kvm_x86_sync_pir_to_irr)(vcpu); 9629 } 9630 9631 /* 9632 * Do this here before restoring debug registers on the host. And 9633 * since we do this before handling the vmexit, a DR access vmexit 9634 * can (a) read the correct value of the debug registers, (b) set 9635 * KVM_DEBUGREG_WONT_EXIT again. 9636 */ 9637 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) { 9638 WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP); 9639 static_call(kvm_x86_sync_dirty_debug_regs)(vcpu); 9640 kvm_update_dr0123(vcpu); 9641 kvm_update_dr7(vcpu); 9642 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; 9643 } 9644 9645 /* 9646 * If the guest has used debug registers, at least dr7 9647 * will be disabled while returning to the host. 9648 * If we don't have active breakpoints in the host, we don't 9649 * care about the messed up debug address registers. But if 9650 * we have some of them active, restore the old state. 9651 */ 9652 if (hw_breakpoint_active()) 9653 hw_breakpoint_restore(); 9654 9655 vcpu->arch.last_vmentry_cpu = vcpu->cpu; 9656 vcpu->arch.last_guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc()); 9657 9658 vcpu->mode = OUTSIDE_GUEST_MODE; 9659 smp_wmb(); 9660 9661 static_call(kvm_x86_handle_exit_irqoff)(vcpu); 9662 9663 /* 9664 * Consume any pending interrupts, including the possible source of 9665 * VM-Exit on SVM and any ticks that occur between VM-Exit and now. 9666 * An instruction is required after local_irq_enable() to fully unblock 9667 * interrupts on processors that implement an interrupt shadow, the 9668 * stat.exits increment will do nicely. 9669 */ 9670 kvm_before_interrupt(vcpu); 9671 local_irq_enable(); 9672 ++vcpu->stat.exits; 9673 local_irq_disable(); 9674 kvm_after_interrupt(vcpu); 9675 9676 /* 9677 * Wait until after servicing IRQs to account guest time so that any 9678 * ticks that occurred while running the guest are properly accounted 9679 * to the guest. Waiting until IRQs are enabled degrades the accuracy 9680 * of accounting via context tracking, but the loss of accuracy is 9681 * acceptable for all known use cases. 9682 */ 9683 vtime_account_guest_exit(); 9684 9685 if (lapic_in_kernel(vcpu)) { 9686 s64 delta = vcpu->arch.apic->lapic_timer.advance_expire_delta; 9687 if (delta != S64_MIN) { 9688 trace_kvm_wait_lapic_expire(vcpu->vcpu_id, delta); 9689 vcpu->arch.apic->lapic_timer.advance_expire_delta = S64_MIN; 9690 } 9691 } 9692 9693 local_irq_enable(); 9694 preempt_enable(); 9695 9696 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 9697 9698 /* 9699 * Profile KVM exit RIPs: 9700 */ 9701 if (unlikely(prof_on == KVM_PROFILING)) { 9702 unsigned long rip = kvm_rip_read(vcpu); 9703 profile_hit(KVM_PROFILING, (void *)rip); 9704 } 9705 9706 if (unlikely(vcpu->arch.tsc_always_catchup)) 9707 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 9708 9709 if (vcpu->arch.apic_attention) 9710 kvm_lapic_sync_from_vapic(vcpu); 9711 9712 r = static_call(kvm_x86_handle_exit)(vcpu, exit_fastpath); 9713 return r; 9714 9715 cancel_injection: 9716 if (req_immediate_exit) 9717 kvm_make_request(KVM_REQ_EVENT, vcpu); 9718 static_call(kvm_x86_cancel_injection)(vcpu); 9719 if (unlikely(vcpu->arch.apic_attention)) 9720 kvm_lapic_sync_from_vapic(vcpu); 9721 out: 9722 return r; 9723 } 9724 9725 static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) 9726 { 9727 if (!kvm_arch_vcpu_runnable(vcpu) && 9728 (!kvm_x86_ops.pre_block || static_call(kvm_x86_pre_block)(vcpu) == 0)) { 9729 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); 9730 kvm_vcpu_block(vcpu); 9731 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); 9732 9733 if (kvm_x86_ops.post_block) 9734 static_call(kvm_x86_post_block)(vcpu); 9735 9736 if (!kvm_check_request(KVM_REQ_UNHALT, vcpu)) 9737 return 1; 9738 } 9739 9740 if (kvm_apic_accept_events(vcpu) < 0) 9741 return 0; 9742 switch(vcpu->arch.mp_state) { 9743 case KVM_MP_STATE_HALTED: 9744 case KVM_MP_STATE_AP_RESET_HOLD: 9745 vcpu->arch.pv.pv_unhalted = false; 9746 vcpu->arch.mp_state = 9747 KVM_MP_STATE_RUNNABLE; 9748 fallthrough; 9749 case KVM_MP_STATE_RUNNABLE: 9750 vcpu->arch.apf.halted = false; 9751 break; 9752 case KVM_MP_STATE_INIT_RECEIVED: 9753 break; 9754 default: 9755 return -EINTR; 9756 } 9757 return 1; 9758 } 9759 9760 static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu) 9761 { 9762 if (is_guest_mode(vcpu)) 9763 kvm_check_nested_events(vcpu); 9764 9765 return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && 9766 !vcpu->arch.apf.halted); 9767 } 9768 9769 static int vcpu_run(struct kvm_vcpu *vcpu) 9770 { 9771 int r; 9772 struct kvm *kvm = vcpu->kvm; 9773 9774 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); 9775 vcpu->arch.l1tf_flush_l1d = true; 9776 9777 for (;;) { 9778 if (kvm_vcpu_running(vcpu)) { 9779 r = vcpu_enter_guest(vcpu); 9780 } else { 9781 r = vcpu_block(kvm, vcpu); 9782 } 9783 9784 if (r <= 0) 9785 break; 9786 9787 kvm_clear_request(KVM_REQ_UNBLOCK, vcpu); 9788 if (kvm_cpu_has_pending_timer(vcpu)) 9789 kvm_inject_pending_timer_irqs(vcpu); 9790 9791 if (dm_request_for_irq_injection(vcpu) && 9792 kvm_vcpu_ready_for_interrupt_injection(vcpu)) { 9793 r = 0; 9794 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; 9795 ++vcpu->stat.request_irq_exits; 9796 break; 9797 } 9798 9799 if (__xfer_to_guest_mode_work_pending()) { 9800 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); 9801 r = xfer_to_guest_mode_handle_work(vcpu); 9802 if (r) 9803 return r; 9804 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); 9805 } 9806 } 9807 9808 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); 9809 9810 return r; 9811 } 9812 9813 static inline int complete_emulated_io(struct kvm_vcpu *vcpu) 9814 { 9815 int r; 9816 9817 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 9818 r = kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE); 9819 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 9820 return r; 9821 } 9822 9823 static int complete_emulated_pio(struct kvm_vcpu *vcpu) 9824 { 9825 BUG_ON(!vcpu->arch.pio.count); 9826 9827 return complete_emulated_io(vcpu); 9828 } 9829 9830 /* 9831 * Implements the following, as a state machine: 9832 * 9833 * read: 9834 * for each fragment 9835 * for each mmio piece in the fragment 9836 * write gpa, len 9837 * exit 9838 * copy data 9839 * execute insn 9840 * 9841 * write: 9842 * for each fragment 9843 * for each mmio piece in the fragment 9844 * write gpa, len 9845 * copy data 9846 * exit 9847 */ 9848 static int complete_emulated_mmio(struct kvm_vcpu *vcpu) 9849 { 9850 struct kvm_run *run = vcpu->run; 9851 struct kvm_mmio_fragment *frag; 9852 unsigned len; 9853 9854 BUG_ON(!vcpu->mmio_needed); 9855 9856 /* Complete previous fragment */ 9857 frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment]; 9858 len = min(8u, frag->len); 9859 if (!vcpu->mmio_is_write) 9860 memcpy(frag->data, run->mmio.data, len); 9861 9862 if (frag->len <= 8) { 9863 /* Switch to the next fragment. */ 9864 frag++; 9865 vcpu->mmio_cur_fragment++; 9866 } else { 9867 /* Go forward to the next mmio piece. */ 9868 frag->data += len; 9869 frag->gpa += len; 9870 frag->len -= len; 9871 } 9872 9873 if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) { 9874 vcpu->mmio_needed = 0; 9875 9876 /* FIXME: return into emulator if single-stepping. */ 9877 if (vcpu->mmio_is_write) 9878 return 1; 9879 vcpu->mmio_read_completed = 1; 9880 return complete_emulated_io(vcpu); 9881 } 9882 9883 run->exit_reason = KVM_EXIT_MMIO; 9884 run->mmio.phys_addr = frag->gpa; 9885 if (vcpu->mmio_is_write) 9886 memcpy(run->mmio.data, frag->data, min(8u, frag->len)); 9887 run->mmio.len = min(8u, frag->len); 9888 run->mmio.is_write = vcpu->mmio_is_write; 9889 vcpu->arch.complete_userspace_io = complete_emulated_mmio; 9890 return 0; 9891 } 9892 9893 static void kvm_save_current_fpu(struct fpu *fpu) 9894 { 9895 /* 9896 * If the target FPU state is not resident in the CPU registers, just 9897 * memcpy() from current, else save CPU state directly to the target. 9898 */ 9899 if (test_thread_flag(TIF_NEED_FPU_LOAD)) 9900 memcpy(&fpu->state, ¤t->thread.fpu.state, 9901 fpu_kernel_xstate_size); 9902 else 9903 save_fpregs_to_fpstate(fpu); 9904 } 9905 9906 /* Swap (qemu) user FPU context for the guest FPU context. */ 9907 static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) 9908 { 9909 fpregs_lock(); 9910 9911 kvm_save_current_fpu(vcpu->arch.user_fpu); 9912 9913 /* 9914 * Guests with protected state can't have it set by the hypervisor, 9915 * so skip trying to set it. 9916 */ 9917 if (vcpu->arch.guest_fpu) 9918 /* PKRU is separately restored in kvm_x86_ops.run. */ 9919 __restore_fpregs_from_fpstate(&vcpu->arch.guest_fpu->state, 9920 ~XFEATURE_MASK_PKRU); 9921 9922 fpregs_mark_activate(); 9923 fpregs_unlock(); 9924 9925 trace_kvm_fpu(1); 9926 } 9927 9928 /* When vcpu_run ends, restore user space FPU context. */ 9929 static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) 9930 { 9931 fpregs_lock(); 9932 9933 /* 9934 * Guests with protected state can't have it read by the hypervisor, 9935 * so skip trying to save it. 9936 */ 9937 if (vcpu->arch.guest_fpu) 9938 kvm_save_current_fpu(vcpu->arch.guest_fpu); 9939 9940 restore_fpregs_from_fpstate(&vcpu->arch.user_fpu->state); 9941 9942 fpregs_mark_activate(); 9943 fpregs_unlock(); 9944 9945 ++vcpu->stat.fpu_reload; 9946 trace_kvm_fpu(0); 9947 } 9948 9949 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) 9950 { 9951 struct kvm_run *kvm_run = vcpu->run; 9952 int r; 9953 9954 vcpu_load(vcpu); 9955 kvm_sigset_activate(vcpu); 9956 kvm_run->flags = 0; 9957 kvm_load_guest_fpu(vcpu); 9958 9959 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { 9960 if (kvm_run->immediate_exit) { 9961 r = -EINTR; 9962 goto out; 9963 } 9964 kvm_vcpu_block(vcpu); 9965 if (kvm_apic_accept_events(vcpu) < 0) { 9966 r = 0; 9967 goto out; 9968 } 9969 kvm_clear_request(KVM_REQ_UNHALT, vcpu); 9970 r = -EAGAIN; 9971 if (signal_pending(current)) { 9972 r = -EINTR; 9973 kvm_run->exit_reason = KVM_EXIT_INTR; 9974 ++vcpu->stat.signal_exits; 9975 } 9976 goto out; 9977 } 9978 9979 if (kvm_run->kvm_valid_regs & ~KVM_SYNC_X86_VALID_FIELDS) { 9980 r = -EINVAL; 9981 goto out; 9982 } 9983 9984 if (kvm_run->kvm_dirty_regs) { 9985 r = sync_regs(vcpu); 9986 if (r != 0) 9987 goto out; 9988 } 9989 9990 /* re-sync apic's tpr */ 9991 if (!lapic_in_kernel(vcpu)) { 9992 if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) { 9993 r = -EINVAL; 9994 goto out; 9995 } 9996 } 9997 9998 if (unlikely(vcpu->arch.complete_userspace_io)) { 9999 int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io; 10000 vcpu->arch.complete_userspace_io = NULL; 10001 r = cui(vcpu); 10002 if (r <= 0) 10003 goto out; 10004 } else 10005 WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed); 10006 10007 if (kvm_run->immediate_exit) 10008 r = -EINTR; 10009 else 10010 r = vcpu_run(vcpu); 10011 10012 out: 10013 kvm_put_guest_fpu(vcpu); 10014 if (kvm_run->kvm_valid_regs) 10015 store_regs(vcpu); 10016 post_kvm_run_save(vcpu); 10017 kvm_sigset_deactivate(vcpu); 10018 10019 vcpu_put(vcpu); 10020 return r; 10021 } 10022 10023 static void __get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 10024 { 10025 if (vcpu->arch.emulate_regs_need_sync_to_vcpu) { 10026 /* 10027 * We are here if userspace calls get_regs() in the middle of 10028 * instruction emulation. Registers state needs to be copied 10029 * back from emulation context to vcpu. Userspace shouldn't do 10030 * that usually, but some bad designed PV devices (vmware 10031 * backdoor interface) need this to work 10032 */ 10033 emulator_writeback_register_cache(vcpu->arch.emulate_ctxt); 10034 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; 10035 } 10036 regs->rax = kvm_rax_read(vcpu); 10037 regs->rbx = kvm_rbx_read(vcpu); 10038 regs->rcx = kvm_rcx_read(vcpu); 10039 regs->rdx = kvm_rdx_read(vcpu); 10040 regs->rsi = kvm_rsi_read(vcpu); 10041 regs->rdi = kvm_rdi_read(vcpu); 10042 regs->rsp = kvm_rsp_read(vcpu); 10043 regs->rbp = kvm_rbp_read(vcpu); 10044 #ifdef CONFIG_X86_64 10045 regs->r8 = kvm_r8_read(vcpu); 10046 regs->r9 = kvm_r9_read(vcpu); 10047 regs->r10 = kvm_r10_read(vcpu); 10048 regs->r11 = kvm_r11_read(vcpu); 10049 regs->r12 = kvm_r12_read(vcpu); 10050 regs->r13 = kvm_r13_read(vcpu); 10051 regs->r14 = kvm_r14_read(vcpu); 10052 regs->r15 = kvm_r15_read(vcpu); 10053 #endif 10054 10055 regs->rip = kvm_rip_read(vcpu); 10056 regs->rflags = kvm_get_rflags(vcpu); 10057 } 10058 10059 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 10060 { 10061 vcpu_load(vcpu); 10062 __get_regs(vcpu, regs); 10063 vcpu_put(vcpu); 10064 return 0; 10065 } 10066 10067 static void __set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 10068 { 10069 vcpu->arch.emulate_regs_need_sync_from_vcpu = true; 10070 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; 10071 10072 kvm_rax_write(vcpu, regs->rax); 10073 kvm_rbx_write(vcpu, regs->rbx); 10074 kvm_rcx_write(vcpu, regs->rcx); 10075 kvm_rdx_write(vcpu, regs->rdx); 10076 kvm_rsi_write(vcpu, regs->rsi); 10077 kvm_rdi_write(vcpu, regs->rdi); 10078 kvm_rsp_write(vcpu, regs->rsp); 10079 kvm_rbp_write(vcpu, regs->rbp); 10080 #ifdef CONFIG_X86_64 10081 kvm_r8_write(vcpu, regs->r8); 10082 kvm_r9_write(vcpu, regs->r9); 10083 kvm_r10_write(vcpu, regs->r10); 10084 kvm_r11_write(vcpu, regs->r11); 10085 kvm_r12_write(vcpu, regs->r12); 10086 kvm_r13_write(vcpu, regs->r13); 10087 kvm_r14_write(vcpu, regs->r14); 10088 kvm_r15_write(vcpu, regs->r15); 10089 #endif 10090 10091 kvm_rip_write(vcpu, regs->rip); 10092 kvm_set_rflags(vcpu, regs->rflags | X86_EFLAGS_FIXED); 10093 10094 vcpu->arch.exception.pending = false; 10095 10096 kvm_make_request(KVM_REQ_EVENT, vcpu); 10097 } 10098 10099 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 10100 { 10101 vcpu_load(vcpu); 10102 __set_regs(vcpu, regs); 10103 vcpu_put(vcpu); 10104 return 0; 10105 } 10106 10107 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) 10108 { 10109 struct kvm_segment cs; 10110 10111 kvm_get_segment(vcpu, &cs, VCPU_SREG_CS); 10112 *db = cs.db; 10113 *l = cs.l; 10114 } 10115 EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits); 10116 10117 static void __get_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 10118 { 10119 struct desc_ptr dt; 10120 10121 if (vcpu->arch.guest_state_protected) 10122 goto skip_protected_regs; 10123 10124 kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS); 10125 kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS); 10126 kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES); 10127 kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS); 10128 kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS); 10129 kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS); 10130 10131 kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR); 10132 kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); 10133 10134 static_call(kvm_x86_get_idt)(vcpu, &dt); 10135 sregs->idt.limit = dt.size; 10136 sregs->idt.base = dt.address; 10137 static_call(kvm_x86_get_gdt)(vcpu, &dt); 10138 sregs->gdt.limit = dt.size; 10139 sregs->gdt.base = dt.address; 10140 10141 sregs->cr2 = vcpu->arch.cr2; 10142 sregs->cr3 = kvm_read_cr3(vcpu); 10143 10144 skip_protected_regs: 10145 sregs->cr0 = kvm_read_cr0(vcpu); 10146 sregs->cr4 = kvm_read_cr4(vcpu); 10147 sregs->cr8 = kvm_get_cr8(vcpu); 10148 sregs->efer = vcpu->arch.efer; 10149 sregs->apic_base = kvm_get_apic_base(vcpu); 10150 } 10151 10152 static void __get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 10153 { 10154 __get_sregs_common(vcpu, sregs); 10155 10156 if (vcpu->arch.guest_state_protected) 10157 return; 10158 10159 if (vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft) 10160 set_bit(vcpu->arch.interrupt.nr, 10161 (unsigned long *)sregs->interrupt_bitmap); 10162 } 10163 10164 static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2) 10165 { 10166 int i; 10167 10168 __get_sregs_common(vcpu, (struct kvm_sregs *)sregs2); 10169 10170 if (vcpu->arch.guest_state_protected) 10171 return; 10172 10173 if (is_pae_paging(vcpu)) { 10174 for (i = 0 ; i < 4 ; i++) 10175 sregs2->pdptrs[i] = kvm_pdptr_read(vcpu, i); 10176 sregs2->flags |= KVM_SREGS2_FLAGS_PDPTRS_VALID; 10177 } 10178 } 10179 10180 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 10181 struct kvm_sregs *sregs) 10182 { 10183 vcpu_load(vcpu); 10184 __get_sregs(vcpu, sregs); 10185 vcpu_put(vcpu); 10186 return 0; 10187 } 10188 10189 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 10190 struct kvm_mp_state *mp_state) 10191 { 10192 int r; 10193 10194 vcpu_load(vcpu); 10195 if (kvm_mpx_supported()) 10196 kvm_load_guest_fpu(vcpu); 10197 10198 r = kvm_apic_accept_events(vcpu); 10199 if (r < 0) 10200 goto out; 10201 r = 0; 10202 10203 if ((vcpu->arch.mp_state == KVM_MP_STATE_HALTED || 10204 vcpu->arch.mp_state == KVM_MP_STATE_AP_RESET_HOLD) && 10205 vcpu->arch.pv.pv_unhalted) 10206 mp_state->mp_state = KVM_MP_STATE_RUNNABLE; 10207 else 10208 mp_state->mp_state = vcpu->arch.mp_state; 10209 10210 out: 10211 if (kvm_mpx_supported()) 10212 kvm_put_guest_fpu(vcpu); 10213 vcpu_put(vcpu); 10214 return r; 10215 } 10216 10217 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 10218 struct kvm_mp_state *mp_state) 10219 { 10220 int ret = -EINVAL; 10221 10222 vcpu_load(vcpu); 10223 10224 if (!lapic_in_kernel(vcpu) && 10225 mp_state->mp_state != KVM_MP_STATE_RUNNABLE) 10226 goto out; 10227 10228 /* 10229 * KVM_MP_STATE_INIT_RECEIVED means the processor is in 10230 * INIT state; latched init should be reported using 10231 * KVM_SET_VCPU_EVENTS, so reject it here. 10232 */ 10233 if ((kvm_vcpu_latch_init(vcpu) || vcpu->arch.smi_pending) && 10234 (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED || 10235 mp_state->mp_state == KVM_MP_STATE_INIT_RECEIVED)) 10236 goto out; 10237 10238 if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) { 10239 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; 10240 set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events); 10241 } else 10242 vcpu->arch.mp_state = mp_state->mp_state; 10243 kvm_make_request(KVM_REQ_EVENT, vcpu); 10244 10245 ret = 0; 10246 out: 10247 vcpu_put(vcpu); 10248 return ret; 10249 } 10250 10251 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, 10252 int reason, bool has_error_code, u32 error_code) 10253 { 10254 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 10255 int ret; 10256 10257 init_emulate_ctxt(vcpu); 10258 10259 ret = emulator_task_switch(ctxt, tss_selector, idt_index, reason, 10260 has_error_code, error_code); 10261 if (ret) { 10262 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 10263 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; 10264 vcpu->run->internal.ndata = 0; 10265 return 0; 10266 } 10267 10268 kvm_rip_write(vcpu, ctxt->eip); 10269 kvm_set_rflags(vcpu, ctxt->eflags); 10270 return 1; 10271 } 10272 EXPORT_SYMBOL_GPL(kvm_task_switch); 10273 10274 static bool kvm_is_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 10275 { 10276 if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) { 10277 /* 10278 * When EFER.LME and CR0.PG are set, the processor is in 10279 * 64-bit mode (though maybe in a 32-bit code segment). 10280 * CR4.PAE and EFER.LMA must be set. 10281 */ 10282 if (!(sregs->cr4 & X86_CR4_PAE) || !(sregs->efer & EFER_LMA)) 10283 return false; 10284 if (kvm_vcpu_is_illegal_gpa(vcpu, sregs->cr3)) 10285 return false; 10286 } else { 10287 /* 10288 * Not in 64-bit mode: EFER.LMA is clear and the code 10289 * segment cannot be 64-bit. 10290 */ 10291 if (sregs->efer & EFER_LMA || sregs->cs.l) 10292 return false; 10293 } 10294 10295 return kvm_is_valid_cr4(vcpu, sregs->cr4); 10296 } 10297 10298 static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs, 10299 int *mmu_reset_needed, bool update_pdptrs) 10300 { 10301 struct msr_data apic_base_msr; 10302 int idx; 10303 struct desc_ptr dt; 10304 10305 if (!kvm_is_valid_sregs(vcpu, sregs)) 10306 return -EINVAL; 10307 10308 apic_base_msr.data = sregs->apic_base; 10309 apic_base_msr.host_initiated = true; 10310 if (kvm_set_apic_base(vcpu, &apic_base_msr)) 10311 return -EINVAL; 10312 10313 if (vcpu->arch.guest_state_protected) 10314 return 0; 10315 10316 dt.size = sregs->idt.limit; 10317 dt.address = sregs->idt.base; 10318 static_call(kvm_x86_set_idt)(vcpu, &dt); 10319 dt.size = sregs->gdt.limit; 10320 dt.address = sregs->gdt.base; 10321 static_call(kvm_x86_set_gdt)(vcpu, &dt); 10322 10323 vcpu->arch.cr2 = sregs->cr2; 10324 *mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; 10325 vcpu->arch.cr3 = sregs->cr3; 10326 kvm_register_mark_available(vcpu, VCPU_EXREG_CR3); 10327 10328 kvm_set_cr8(vcpu, sregs->cr8); 10329 10330 *mmu_reset_needed |= vcpu->arch.efer != sregs->efer; 10331 static_call(kvm_x86_set_efer)(vcpu, sregs->efer); 10332 10333 *mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0; 10334 static_call(kvm_x86_set_cr0)(vcpu, sregs->cr0); 10335 vcpu->arch.cr0 = sregs->cr0; 10336 10337 *mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; 10338 static_call(kvm_x86_set_cr4)(vcpu, sregs->cr4); 10339 10340 if (update_pdptrs) { 10341 idx = srcu_read_lock(&vcpu->kvm->srcu); 10342 if (is_pae_paging(vcpu)) { 10343 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); 10344 *mmu_reset_needed = 1; 10345 } 10346 srcu_read_unlock(&vcpu->kvm->srcu, idx); 10347 } 10348 10349 kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS); 10350 kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS); 10351 kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES); 10352 kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS); 10353 kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS); 10354 kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS); 10355 10356 kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR); 10357 kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); 10358 10359 update_cr8_intercept(vcpu); 10360 10361 /* Older userspace won't unhalt the vcpu on reset. */ 10362 if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 && 10363 sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 && 10364 !is_protmode(vcpu)) 10365 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 10366 10367 return 0; 10368 } 10369 10370 static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 10371 { 10372 int pending_vec, max_bits; 10373 int mmu_reset_needed = 0; 10374 int ret = __set_sregs_common(vcpu, sregs, &mmu_reset_needed, true); 10375 10376 if (ret) 10377 return ret; 10378 10379 if (mmu_reset_needed) 10380 kvm_mmu_reset_context(vcpu); 10381 10382 max_bits = KVM_NR_INTERRUPTS; 10383 pending_vec = find_first_bit( 10384 (const unsigned long *)sregs->interrupt_bitmap, max_bits); 10385 10386 if (pending_vec < max_bits) { 10387 kvm_queue_interrupt(vcpu, pending_vec, false); 10388 pr_debug("Set back pending irq %d\n", pending_vec); 10389 kvm_make_request(KVM_REQ_EVENT, vcpu); 10390 } 10391 return 0; 10392 } 10393 10394 static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2) 10395 { 10396 int mmu_reset_needed = 0; 10397 bool valid_pdptrs = sregs2->flags & KVM_SREGS2_FLAGS_PDPTRS_VALID; 10398 bool pae = (sregs2->cr0 & X86_CR0_PG) && (sregs2->cr4 & X86_CR4_PAE) && 10399 !(sregs2->efer & EFER_LMA); 10400 int i, ret; 10401 10402 if (sregs2->flags & ~KVM_SREGS2_FLAGS_PDPTRS_VALID) 10403 return -EINVAL; 10404 10405 if (valid_pdptrs && (!pae || vcpu->arch.guest_state_protected)) 10406 return -EINVAL; 10407 10408 ret = __set_sregs_common(vcpu, (struct kvm_sregs *)sregs2, 10409 &mmu_reset_needed, !valid_pdptrs); 10410 if (ret) 10411 return ret; 10412 10413 if (valid_pdptrs) { 10414 for (i = 0; i < 4 ; i++) 10415 kvm_pdptr_write(vcpu, i, sregs2->pdptrs[i]); 10416 10417 kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR); 10418 mmu_reset_needed = 1; 10419 vcpu->arch.pdptrs_from_userspace = true; 10420 } 10421 if (mmu_reset_needed) 10422 kvm_mmu_reset_context(vcpu); 10423 return 0; 10424 } 10425 10426 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 10427 struct kvm_sregs *sregs) 10428 { 10429 int ret; 10430 10431 vcpu_load(vcpu); 10432 ret = __set_sregs(vcpu, sregs); 10433 vcpu_put(vcpu); 10434 return ret; 10435 } 10436 10437 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 10438 struct kvm_guest_debug *dbg) 10439 { 10440 unsigned long rflags; 10441 int i, r; 10442 10443 if (vcpu->arch.guest_state_protected) 10444 return -EINVAL; 10445 10446 vcpu_load(vcpu); 10447 10448 if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) { 10449 r = -EBUSY; 10450 if (vcpu->arch.exception.pending) 10451 goto out; 10452 if (dbg->control & KVM_GUESTDBG_INJECT_DB) 10453 kvm_queue_exception(vcpu, DB_VECTOR); 10454 else 10455 kvm_queue_exception(vcpu, BP_VECTOR); 10456 } 10457 10458 /* 10459 * Read rflags as long as potentially injected trace flags are still 10460 * filtered out. 10461 */ 10462 rflags = kvm_get_rflags(vcpu); 10463 10464 vcpu->guest_debug = dbg->control; 10465 if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)) 10466 vcpu->guest_debug = 0; 10467 10468 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { 10469 for (i = 0; i < KVM_NR_DB_REGS; ++i) 10470 vcpu->arch.eff_db[i] = dbg->arch.debugreg[i]; 10471 vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7]; 10472 } else { 10473 for (i = 0; i < KVM_NR_DB_REGS; i++) 10474 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; 10475 } 10476 kvm_update_dr7(vcpu); 10477 10478 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) 10479 vcpu->arch.singlestep_rip = kvm_get_linear_rip(vcpu); 10480 10481 /* 10482 * Trigger an rflags update that will inject or remove the trace 10483 * flags. 10484 */ 10485 kvm_set_rflags(vcpu, rflags); 10486 10487 static_call(kvm_x86_update_exception_bitmap)(vcpu); 10488 10489 r = 0; 10490 10491 out: 10492 vcpu_put(vcpu); 10493 return r; 10494 } 10495 10496 /* 10497 * Translate a guest virtual address to a guest physical address. 10498 */ 10499 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 10500 struct kvm_translation *tr) 10501 { 10502 unsigned long vaddr = tr->linear_address; 10503 gpa_t gpa; 10504 int idx; 10505 10506 vcpu_load(vcpu); 10507 10508 idx = srcu_read_lock(&vcpu->kvm->srcu); 10509 gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL); 10510 srcu_read_unlock(&vcpu->kvm->srcu, idx); 10511 tr->physical_address = gpa; 10512 tr->valid = gpa != UNMAPPED_GVA; 10513 tr->writeable = 1; 10514 tr->usermode = 0; 10515 10516 vcpu_put(vcpu); 10517 return 0; 10518 } 10519 10520 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 10521 { 10522 struct fxregs_state *fxsave; 10523 10524 if (!vcpu->arch.guest_fpu) 10525 return 0; 10526 10527 vcpu_load(vcpu); 10528 10529 fxsave = &vcpu->arch.guest_fpu->state.fxsave; 10530 memcpy(fpu->fpr, fxsave->st_space, 128); 10531 fpu->fcw = fxsave->cwd; 10532 fpu->fsw = fxsave->swd; 10533 fpu->ftwx = fxsave->twd; 10534 fpu->last_opcode = fxsave->fop; 10535 fpu->last_ip = fxsave->rip; 10536 fpu->last_dp = fxsave->rdp; 10537 memcpy(fpu->xmm, fxsave->xmm_space, sizeof(fxsave->xmm_space)); 10538 10539 vcpu_put(vcpu); 10540 return 0; 10541 } 10542 10543 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 10544 { 10545 struct fxregs_state *fxsave; 10546 10547 if (!vcpu->arch.guest_fpu) 10548 return 0; 10549 10550 vcpu_load(vcpu); 10551 10552 fxsave = &vcpu->arch.guest_fpu->state.fxsave; 10553 10554 memcpy(fxsave->st_space, fpu->fpr, 128); 10555 fxsave->cwd = fpu->fcw; 10556 fxsave->swd = fpu->fsw; 10557 fxsave->twd = fpu->ftwx; 10558 fxsave->fop = fpu->last_opcode; 10559 fxsave->rip = fpu->last_ip; 10560 fxsave->rdp = fpu->last_dp; 10561 memcpy(fxsave->xmm_space, fpu->xmm, sizeof(fxsave->xmm_space)); 10562 10563 vcpu_put(vcpu); 10564 return 0; 10565 } 10566 10567 static void store_regs(struct kvm_vcpu *vcpu) 10568 { 10569 BUILD_BUG_ON(sizeof(struct kvm_sync_regs) > SYNC_REGS_SIZE_BYTES); 10570 10571 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_REGS) 10572 __get_regs(vcpu, &vcpu->run->s.regs.regs); 10573 10574 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_SREGS) 10575 __get_sregs(vcpu, &vcpu->run->s.regs.sregs); 10576 10577 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_EVENTS) 10578 kvm_vcpu_ioctl_x86_get_vcpu_events( 10579 vcpu, &vcpu->run->s.regs.events); 10580 } 10581 10582 static int sync_regs(struct kvm_vcpu *vcpu) 10583 { 10584 if (vcpu->run->kvm_dirty_regs & ~KVM_SYNC_X86_VALID_FIELDS) 10585 return -EINVAL; 10586 10587 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_REGS) { 10588 __set_regs(vcpu, &vcpu->run->s.regs.regs); 10589 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_REGS; 10590 } 10591 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_SREGS) { 10592 if (__set_sregs(vcpu, &vcpu->run->s.regs.sregs)) 10593 return -EINVAL; 10594 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_SREGS; 10595 } 10596 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_EVENTS) { 10597 if (kvm_vcpu_ioctl_x86_set_vcpu_events( 10598 vcpu, &vcpu->run->s.regs.events)) 10599 return -EINVAL; 10600 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_EVENTS; 10601 } 10602 10603 return 0; 10604 } 10605 10606 static void fx_init(struct kvm_vcpu *vcpu) 10607 { 10608 if (!vcpu->arch.guest_fpu) 10609 return; 10610 10611 fpstate_init(&vcpu->arch.guest_fpu->state); 10612 if (boot_cpu_has(X86_FEATURE_XSAVES)) 10613 vcpu->arch.guest_fpu->state.xsave.header.xcomp_bv = 10614 host_xcr0 | XSTATE_COMPACTION_ENABLED; 10615 10616 /* 10617 * Ensure guest xcr0 is valid for loading 10618 */ 10619 vcpu->arch.xcr0 = XFEATURE_MASK_FP; 10620 10621 vcpu->arch.cr0 |= X86_CR0_ET; 10622 } 10623 10624 void kvm_free_guest_fpu(struct kvm_vcpu *vcpu) 10625 { 10626 if (vcpu->arch.guest_fpu) { 10627 kmem_cache_free(x86_fpu_cache, vcpu->arch.guest_fpu); 10628 vcpu->arch.guest_fpu = NULL; 10629 } 10630 } 10631 EXPORT_SYMBOL_GPL(kvm_free_guest_fpu); 10632 10633 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) 10634 { 10635 if (kvm_check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0) 10636 pr_warn_once("kvm: SMP vm created on host with unstable TSC; " 10637 "guest TSC will not be reliable\n"); 10638 10639 return 0; 10640 } 10641 10642 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) 10643 { 10644 struct page *page; 10645 int r; 10646 10647 vcpu->arch.last_vmentry_cpu = -1; 10648 10649 if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu)) 10650 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 10651 else 10652 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; 10653 10654 r = kvm_mmu_create(vcpu); 10655 if (r < 0) 10656 return r; 10657 10658 if (irqchip_in_kernel(vcpu->kvm)) { 10659 r = kvm_create_lapic(vcpu, lapic_timer_advance_ns); 10660 if (r < 0) 10661 goto fail_mmu_destroy; 10662 if (kvm_apicv_activated(vcpu->kvm)) 10663 vcpu->arch.apicv_active = true; 10664 } else 10665 static_branch_inc(&kvm_has_noapic_vcpu); 10666 10667 r = -ENOMEM; 10668 10669 page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); 10670 if (!page) 10671 goto fail_free_lapic; 10672 vcpu->arch.pio_data = page_address(page); 10673 10674 vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4, 10675 GFP_KERNEL_ACCOUNT); 10676 if (!vcpu->arch.mce_banks) 10677 goto fail_free_pio_data; 10678 vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS; 10679 10680 if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, 10681 GFP_KERNEL_ACCOUNT)) 10682 goto fail_free_mce_banks; 10683 10684 if (!alloc_emulate_ctxt(vcpu)) 10685 goto free_wbinvd_dirty_mask; 10686 10687 vcpu->arch.user_fpu = kmem_cache_zalloc(x86_fpu_cache, 10688 GFP_KERNEL_ACCOUNT); 10689 if (!vcpu->arch.user_fpu) { 10690 pr_err("kvm: failed to allocate userspace's fpu\n"); 10691 goto free_emulate_ctxt; 10692 } 10693 10694 vcpu->arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache, 10695 GFP_KERNEL_ACCOUNT); 10696 if (!vcpu->arch.guest_fpu) { 10697 pr_err("kvm: failed to allocate vcpu's fpu\n"); 10698 goto free_user_fpu; 10699 } 10700 fx_init(vcpu); 10701 10702 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); 10703 vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu); 10704 10705 vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT; 10706 10707 kvm_async_pf_hash_reset(vcpu); 10708 kvm_pmu_init(vcpu); 10709 10710 vcpu->arch.pending_external_vector = -1; 10711 vcpu->arch.preempted_in_kernel = false; 10712 10713 #if IS_ENABLED(CONFIG_HYPERV) 10714 vcpu->arch.hv_root_tdp = INVALID_PAGE; 10715 #endif 10716 10717 r = static_call(kvm_x86_vcpu_create)(vcpu); 10718 if (r) 10719 goto free_guest_fpu; 10720 10721 vcpu->arch.arch_capabilities = kvm_get_arch_capabilities(); 10722 vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT; 10723 kvm_vcpu_mtrr_init(vcpu); 10724 vcpu_load(vcpu); 10725 kvm_set_tsc_khz(vcpu, max_tsc_khz); 10726 kvm_vcpu_reset(vcpu, false); 10727 kvm_init_mmu(vcpu); 10728 vcpu_put(vcpu); 10729 return 0; 10730 10731 free_guest_fpu: 10732 kvm_free_guest_fpu(vcpu); 10733 free_user_fpu: 10734 kmem_cache_free(x86_fpu_cache, vcpu->arch.user_fpu); 10735 free_emulate_ctxt: 10736 kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt); 10737 free_wbinvd_dirty_mask: 10738 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); 10739 fail_free_mce_banks: 10740 kfree(vcpu->arch.mce_banks); 10741 fail_free_pio_data: 10742 free_page((unsigned long)vcpu->arch.pio_data); 10743 fail_free_lapic: 10744 kvm_free_lapic(vcpu); 10745 fail_mmu_destroy: 10746 kvm_mmu_destroy(vcpu); 10747 return r; 10748 } 10749 10750 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 10751 { 10752 struct kvm *kvm = vcpu->kvm; 10753 10754 if (mutex_lock_killable(&vcpu->mutex)) 10755 return; 10756 vcpu_load(vcpu); 10757 kvm_synchronize_tsc(vcpu, 0); 10758 vcpu_put(vcpu); 10759 10760 /* poll control enabled by default */ 10761 vcpu->arch.msr_kvm_poll_control = 1; 10762 10763 mutex_unlock(&vcpu->mutex); 10764 10765 if (kvmclock_periodic_sync && vcpu->vcpu_idx == 0) 10766 schedule_delayed_work(&kvm->arch.kvmclock_sync_work, 10767 KVMCLOCK_SYNC_PERIOD); 10768 } 10769 10770 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 10771 { 10772 struct gfn_to_pfn_cache *cache = &vcpu->arch.st.cache; 10773 int idx; 10774 10775 kvm_release_pfn(cache->pfn, cache->dirty, cache); 10776 10777 kvmclock_reset(vcpu); 10778 10779 static_call(kvm_x86_vcpu_free)(vcpu); 10780 10781 kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt); 10782 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); 10783 kmem_cache_free(x86_fpu_cache, vcpu->arch.user_fpu); 10784 kvm_free_guest_fpu(vcpu); 10785 10786 kvm_hv_vcpu_uninit(vcpu); 10787 kvm_pmu_destroy(vcpu); 10788 kfree(vcpu->arch.mce_banks); 10789 kvm_free_lapic(vcpu); 10790 idx = srcu_read_lock(&vcpu->kvm->srcu); 10791 kvm_mmu_destroy(vcpu); 10792 srcu_read_unlock(&vcpu->kvm->srcu, idx); 10793 free_page((unsigned long)vcpu->arch.pio_data); 10794 kvfree(vcpu->arch.cpuid_entries); 10795 if (!lapic_in_kernel(vcpu)) 10796 static_branch_dec(&kvm_has_noapic_vcpu); 10797 } 10798 10799 void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) 10800 { 10801 unsigned long old_cr0 = kvm_read_cr0(vcpu); 10802 10803 kvm_lapic_reset(vcpu, init_event); 10804 10805 vcpu->arch.hflags = 0; 10806 10807 vcpu->arch.smi_pending = 0; 10808 vcpu->arch.smi_count = 0; 10809 atomic_set(&vcpu->arch.nmi_queued, 0); 10810 vcpu->arch.nmi_pending = 0; 10811 vcpu->arch.nmi_injected = false; 10812 kvm_clear_interrupt_queue(vcpu); 10813 kvm_clear_exception_queue(vcpu); 10814 10815 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); 10816 kvm_update_dr0123(vcpu); 10817 vcpu->arch.dr6 = DR6_ACTIVE_LOW; 10818 vcpu->arch.dr7 = DR7_FIXED_1; 10819 kvm_update_dr7(vcpu); 10820 10821 vcpu->arch.cr2 = 0; 10822 10823 kvm_make_request(KVM_REQ_EVENT, vcpu); 10824 vcpu->arch.apf.msr_en_val = 0; 10825 vcpu->arch.apf.msr_int_val = 0; 10826 vcpu->arch.st.msr_val = 0; 10827 10828 kvmclock_reset(vcpu); 10829 10830 kvm_clear_async_pf_completion_queue(vcpu); 10831 kvm_async_pf_hash_reset(vcpu); 10832 vcpu->arch.apf.halted = false; 10833 10834 if (vcpu->arch.guest_fpu && kvm_mpx_supported()) { 10835 void *mpx_state_buffer; 10836 10837 /* 10838 * To avoid have the INIT path from kvm_apic_has_events() that be 10839 * called with loaded FPU and does not let userspace fix the state. 10840 */ 10841 if (init_event) 10842 kvm_put_guest_fpu(vcpu); 10843 mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu->state.xsave, 10844 XFEATURE_BNDREGS); 10845 if (mpx_state_buffer) 10846 memset(mpx_state_buffer, 0, sizeof(struct mpx_bndreg_state)); 10847 mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu->state.xsave, 10848 XFEATURE_BNDCSR); 10849 if (mpx_state_buffer) 10850 memset(mpx_state_buffer, 0, sizeof(struct mpx_bndcsr)); 10851 if (init_event) 10852 kvm_load_guest_fpu(vcpu); 10853 } 10854 10855 if (!init_event) { 10856 kvm_pmu_reset(vcpu); 10857 vcpu->arch.smbase = 0x30000; 10858 10859 vcpu->arch.msr_misc_features_enables = 0; 10860 10861 vcpu->arch.xcr0 = XFEATURE_MASK_FP; 10862 } 10863 10864 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); 10865 vcpu->arch.regs_avail = ~0; 10866 vcpu->arch.regs_dirty = ~0; 10867 10868 vcpu->arch.ia32_xss = 0; 10869 10870 static_call(kvm_x86_vcpu_reset)(vcpu, init_event); 10871 10872 /* 10873 * Reset the MMU context if paging was enabled prior to INIT (which is 10874 * implied if CR0.PG=1 as CR0 will be '0' prior to RESET). Unlike the 10875 * standard CR0/CR4/EFER modification paths, only CR0.PG needs to be 10876 * checked because it is unconditionally cleared on INIT and all other 10877 * paging related bits are ignored if paging is disabled, i.e. CR0.WP, 10878 * CR4, and EFER changes are all irrelevant if CR0.PG was '0'. 10879 */ 10880 if (old_cr0 & X86_CR0_PG) 10881 kvm_mmu_reset_context(vcpu); 10882 } 10883 10884 void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) 10885 { 10886 struct kvm_segment cs; 10887 10888 kvm_get_segment(vcpu, &cs, VCPU_SREG_CS); 10889 cs.selector = vector << 8; 10890 cs.base = vector << 12; 10891 kvm_set_segment(vcpu, &cs, VCPU_SREG_CS); 10892 kvm_rip_write(vcpu, 0); 10893 } 10894 EXPORT_SYMBOL_GPL(kvm_vcpu_deliver_sipi_vector); 10895 10896 int kvm_arch_hardware_enable(void) 10897 { 10898 struct kvm *kvm; 10899 struct kvm_vcpu *vcpu; 10900 int i; 10901 int ret; 10902 u64 local_tsc; 10903 u64 max_tsc = 0; 10904 bool stable, backwards_tsc = false; 10905 10906 kvm_user_return_msr_cpu_online(); 10907 ret = static_call(kvm_x86_hardware_enable)(); 10908 if (ret != 0) 10909 return ret; 10910 10911 local_tsc = rdtsc(); 10912 stable = !kvm_check_tsc_unstable(); 10913 list_for_each_entry(kvm, &vm_list, vm_list) { 10914 kvm_for_each_vcpu(i, vcpu, kvm) { 10915 if (!stable && vcpu->cpu == smp_processor_id()) 10916 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 10917 if (stable && vcpu->arch.last_host_tsc > local_tsc) { 10918 backwards_tsc = true; 10919 if (vcpu->arch.last_host_tsc > max_tsc) 10920 max_tsc = vcpu->arch.last_host_tsc; 10921 } 10922 } 10923 } 10924 10925 /* 10926 * Sometimes, even reliable TSCs go backwards. This happens on 10927 * platforms that reset TSC during suspend or hibernate actions, but 10928 * maintain synchronization. We must compensate. Fortunately, we can 10929 * detect that condition here, which happens early in CPU bringup, 10930 * before any KVM threads can be running. Unfortunately, we can't 10931 * bring the TSCs fully up to date with real time, as we aren't yet far 10932 * enough into CPU bringup that we know how much real time has actually 10933 * elapsed; our helper function, ktime_get_boottime_ns() will be using boot 10934 * variables that haven't been updated yet. 10935 * 10936 * So we simply find the maximum observed TSC above, then record the 10937 * adjustment to TSC in each VCPU. When the VCPU later gets loaded, 10938 * the adjustment will be applied. Note that we accumulate 10939 * adjustments, in case multiple suspend cycles happen before some VCPU 10940 * gets a chance to run again. In the event that no KVM threads get a 10941 * chance to run, we will miss the entire elapsed period, as we'll have 10942 * reset last_host_tsc, so VCPUs will not have the TSC adjusted and may 10943 * loose cycle time. This isn't too big a deal, since the loss will be 10944 * uniform across all VCPUs (not to mention the scenario is extremely 10945 * unlikely). It is possible that a second hibernate recovery happens 10946 * much faster than a first, causing the observed TSC here to be 10947 * smaller; this would require additional padding adjustment, which is 10948 * why we set last_host_tsc to the local tsc observed here. 10949 * 10950 * N.B. - this code below runs only on platforms with reliable TSC, 10951 * as that is the only way backwards_tsc is set above. Also note 10952 * that this runs for ALL vcpus, which is not a bug; all VCPUs should 10953 * have the same delta_cyc adjustment applied if backwards_tsc 10954 * is detected. Note further, this adjustment is only done once, 10955 * as we reset last_host_tsc on all VCPUs to stop this from being 10956 * called multiple times (one for each physical CPU bringup). 10957 * 10958 * Platforms with unreliable TSCs don't have to deal with this, they 10959 * will be compensated by the logic in vcpu_load, which sets the TSC to 10960 * catchup mode. This will catchup all VCPUs to real time, but cannot 10961 * guarantee that they stay in perfect synchronization. 10962 */ 10963 if (backwards_tsc) { 10964 u64 delta_cyc = max_tsc - local_tsc; 10965 list_for_each_entry(kvm, &vm_list, vm_list) { 10966 kvm->arch.backwards_tsc_observed = true; 10967 kvm_for_each_vcpu(i, vcpu, kvm) { 10968 vcpu->arch.tsc_offset_adjustment += delta_cyc; 10969 vcpu->arch.last_host_tsc = local_tsc; 10970 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); 10971 } 10972 10973 /* 10974 * We have to disable TSC offset matching.. if you were 10975 * booting a VM while issuing an S4 host suspend.... 10976 * you may have some problem. Solving this issue is 10977 * left as an exercise to the reader. 10978 */ 10979 kvm->arch.last_tsc_nsec = 0; 10980 kvm->arch.last_tsc_write = 0; 10981 } 10982 10983 } 10984 return 0; 10985 } 10986 10987 void kvm_arch_hardware_disable(void) 10988 { 10989 static_call(kvm_x86_hardware_disable)(); 10990 drop_user_return_notifiers(); 10991 } 10992 10993 int kvm_arch_hardware_setup(void *opaque) 10994 { 10995 struct kvm_x86_init_ops *ops = opaque; 10996 int r; 10997 10998 rdmsrl_safe(MSR_EFER, &host_efer); 10999 11000 if (boot_cpu_has(X86_FEATURE_XSAVES)) 11001 rdmsrl(MSR_IA32_XSS, host_xss); 11002 11003 r = ops->hardware_setup(); 11004 if (r != 0) 11005 return r; 11006 11007 memcpy(&kvm_x86_ops, ops->runtime_ops, sizeof(kvm_x86_ops)); 11008 kvm_ops_static_call_update(); 11009 11010 if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES)) 11011 supported_xss = 0; 11012 11013 #define __kvm_cpu_cap_has(UNUSED_, f) kvm_cpu_cap_has(f) 11014 cr4_reserved_bits = __cr4_reserved_bits(__kvm_cpu_cap_has, UNUSED_); 11015 #undef __kvm_cpu_cap_has 11016 11017 if (kvm_has_tsc_control) { 11018 /* 11019 * Make sure the user can only configure tsc_khz values that 11020 * fit into a signed integer. 11021 * A min value is not calculated because it will always 11022 * be 1 on all machines. 11023 */ 11024 u64 max = min(0x7fffffffULL, 11025 __scale_tsc(kvm_max_tsc_scaling_ratio, tsc_khz)); 11026 kvm_max_guest_tsc_khz = max; 11027 11028 kvm_default_tsc_scaling_ratio = 1ULL << kvm_tsc_scaling_ratio_frac_bits; 11029 } 11030 11031 kvm_init_msr_list(); 11032 return 0; 11033 } 11034 11035 void kvm_arch_hardware_unsetup(void) 11036 { 11037 static_call(kvm_x86_hardware_unsetup)(); 11038 } 11039 11040 int kvm_arch_check_processor_compat(void *opaque) 11041 { 11042 struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); 11043 struct kvm_x86_init_ops *ops = opaque; 11044 11045 WARN_ON(!irqs_disabled()); 11046 11047 if (__cr4_reserved_bits(cpu_has, c) != 11048 __cr4_reserved_bits(cpu_has, &boot_cpu_data)) 11049 return -EIO; 11050 11051 return ops->check_processor_compatibility(); 11052 } 11053 11054 bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu) 11055 { 11056 return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id; 11057 } 11058 EXPORT_SYMBOL_GPL(kvm_vcpu_is_reset_bsp); 11059 11060 bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu) 11061 { 11062 return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0; 11063 } 11064 11065 __read_mostly DEFINE_STATIC_KEY_FALSE(kvm_has_noapic_vcpu); 11066 EXPORT_SYMBOL_GPL(kvm_has_noapic_vcpu); 11067 11068 void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) 11069 { 11070 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 11071 11072 vcpu->arch.l1tf_flush_l1d = true; 11073 if (pmu->version && unlikely(pmu->event_count)) { 11074 pmu->need_cleanup = true; 11075 kvm_make_request(KVM_REQ_PMU, vcpu); 11076 } 11077 static_call(kvm_x86_sched_in)(vcpu, cpu); 11078 } 11079 11080 void kvm_arch_free_vm(struct kvm *kvm) 11081 { 11082 kfree(to_kvm_hv(kvm)->hv_pa_pg); 11083 vfree(kvm); 11084 } 11085 11086 11087 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 11088 { 11089 if (type) 11090 return -EINVAL; 11091 11092 INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list); 11093 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); 11094 INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages); 11095 INIT_LIST_HEAD(&kvm->arch.lpage_disallowed_mmu_pages); 11096 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); 11097 atomic_set(&kvm->arch.noncoherent_dma_count, 0); 11098 11099 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ 11100 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); 11101 /* Reserve bit 1 of irq_sources_bitmap for irqfd-resampler */ 11102 set_bit(KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, 11103 &kvm->arch.irq_sources_bitmap); 11104 11105 raw_spin_lock_init(&kvm->arch.tsc_write_lock); 11106 mutex_init(&kvm->arch.apic_map_lock); 11107 spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock); 11108 11109 kvm->arch.kvmclock_offset = -get_kvmclock_base_ns(); 11110 pvclock_update_vm_gtod_copy(kvm); 11111 11112 kvm->arch.guest_can_read_msr_platform_info = true; 11113 11114 #if IS_ENABLED(CONFIG_HYPERV) 11115 spin_lock_init(&kvm->arch.hv_root_tdp_lock); 11116 kvm->arch.hv_root_tdp = INVALID_PAGE; 11117 #endif 11118 11119 INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn); 11120 INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn); 11121 11122 kvm_apicv_init(kvm); 11123 kvm_hv_init_vm(kvm); 11124 kvm_page_track_init(kvm); 11125 kvm_mmu_init_vm(kvm); 11126 11127 return static_call(kvm_x86_vm_init)(kvm); 11128 } 11129 11130 int kvm_arch_post_init_vm(struct kvm *kvm) 11131 { 11132 return kvm_mmu_post_init_vm(kvm); 11133 } 11134 11135 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu) 11136 { 11137 vcpu_load(vcpu); 11138 kvm_mmu_unload(vcpu); 11139 vcpu_put(vcpu); 11140 } 11141 11142 static void kvm_free_vcpus(struct kvm *kvm) 11143 { 11144 unsigned int i; 11145 struct kvm_vcpu *vcpu; 11146 11147 /* 11148 * Unpin any mmu pages first. 11149 */ 11150 kvm_for_each_vcpu(i, vcpu, kvm) { 11151 kvm_clear_async_pf_completion_queue(vcpu); 11152 kvm_unload_vcpu_mmu(vcpu); 11153 } 11154 kvm_for_each_vcpu(i, vcpu, kvm) 11155 kvm_vcpu_destroy(vcpu); 11156 11157 mutex_lock(&kvm->lock); 11158 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) 11159 kvm->vcpus[i] = NULL; 11160 11161 atomic_set(&kvm->online_vcpus, 0); 11162 mutex_unlock(&kvm->lock); 11163 } 11164 11165 void kvm_arch_sync_events(struct kvm *kvm) 11166 { 11167 cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work); 11168 cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work); 11169 kvm_free_pit(kvm); 11170 } 11171 11172 #define ERR_PTR_USR(e) ((void __user *)ERR_PTR(e)) 11173 11174 /** 11175 * __x86_set_memory_region: Setup KVM internal memory slot 11176 * 11177 * @kvm: the kvm pointer to the VM. 11178 * @id: the slot ID to setup. 11179 * @gpa: the GPA to install the slot (unused when @size == 0). 11180 * @size: the size of the slot. Set to zero to uninstall a slot. 11181 * 11182 * This function helps to setup a KVM internal memory slot. Specify 11183 * @size > 0 to install a new slot, while @size == 0 to uninstall a 11184 * slot. The return code can be one of the following: 11185 * 11186 * HVA: on success (uninstall will return a bogus HVA) 11187 * -errno: on error 11188 * 11189 * The caller should always use IS_ERR() to check the return value 11190 * before use. Note, the KVM internal memory slots are guaranteed to 11191 * remain valid and unchanged until the VM is destroyed, i.e., the 11192 * GPA->HVA translation will not change. However, the HVA is a user 11193 * address, i.e. its accessibility is not guaranteed, and must be 11194 * accessed via __copy_{to,from}_user(). 11195 */ 11196 void __user * __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, 11197 u32 size) 11198 { 11199 int i, r; 11200 unsigned long hva, old_npages; 11201 struct kvm_memslots *slots = kvm_memslots(kvm); 11202 struct kvm_memory_slot *slot; 11203 11204 /* Called with kvm->slots_lock held. */ 11205 if (WARN_ON(id >= KVM_MEM_SLOTS_NUM)) 11206 return ERR_PTR_USR(-EINVAL); 11207 11208 slot = id_to_memslot(slots, id); 11209 if (size) { 11210 if (slot && slot->npages) 11211 return ERR_PTR_USR(-EEXIST); 11212 11213 /* 11214 * MAP_SHARED to prevent internal slot pages from being moved 11215 * by fork()/COW. 11216 */ 11217 hva = vm_mmap(NULL, 0, size, PROT_READ | PROT_WRITE, 11218 MAP_SHARED | MAP_ANONYMOUS, 0); 11219 if (IS_ERR((void *)hva)) 11220 return (void __user *)hva; 11221 } else { 11222 if (!slot || !slot->npages) 11223 return NULL; 11224 11225 old_npages = slot->npages; 11226 hva = slot->userspace_addr; 11227 } 11228 11229 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 11230 struct kvm_userspace_memory_region m; 11231 11232 m.slot = id | (i << 16); 11233 m.flags = 0; 11234 m.guest_phys_addr = gpa; 11235 m.userspace_addr = hva; 11236 m.memory_size = size; 11237 r = __kvm_set_memory_region(kvm, &m); 11238 if (r < 0) 11239 return ERR_PTR_USR(r); 11240 } 11241 11242 if (!size) 11243 vm_munmap(hva, old_npages * PAGE_SIZE); 11244 11245 return (void __user *)hva; 11246 } 11247 EXPORT_SYMBOL_GPL(__x86_set_memory_region); 11248 11249 void kvm_arch_pre_destroy_vm(struct kvm *kvm) 11250 { 11251 kvm_mmu_pre_destroy_vm(kvm); 11252 } 11253 11254 void kvm_arch_destroy_vm(struct kvm *kvm) 11255 { 11256 if (current->mm == kvm->mm) { 11257 /* 11258 * Free memory regions allocated on behalf of userspace, 11259 * unless the the memory map has changed due to process exit 11260 * or fd copying. 11261 */ 11262 mutex_lock(&kvm->slots_lock); 11263 __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 11264 0, 0); 11265 __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 11266 0, 0); 11267 __x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0); 11268 mutex_unlock(&kvm->slots_lock); 11269 } 11270 static_call_cond(kvm_x86_vm_destroy)(kvm); 11271 kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1)); 11272 kvm_pic_destroy(kvm); 11273 kvm_ioapic_destroy(kvm); 11274 kvm_free_vcpus(kvm); 11275 kvfree(rcu_dereference_check(kvm->arch.apic_map, 1)); 11276 kfree(srcu_dereference_check(kvm->arch.pmu_event_filter, &kvm->srcu, 1)); 11277 kvm_mmu_uninit_vm(kvm); 11278 kvm_page_track_cleanup(kvm); 11279 kvm_xen_destroy_vm(kvm); 11280 kvm_hv_destroy_vm(kvm); 11281 } 11282 11283 static void memslot_rmap_free(struct kvm_memory_slot *slot) 11284 { 11285 int i; 11286 11287 for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { 11288 kvfree(slot->arch.rmap[i]); 11289 slot->arch.rmap[i] = NULL; 11290 } 11291 } 11292 11293 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) 11294 { 11295 int i; 11296 11297 memslot_rmap_free(slot); 11298 11299 for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) { 11300 kvfree(slot->arch.lpage_info[i - 1]); 11301 slot->arch.lpage_info[i - 1] = NULL; 11302 } 11303 11304 kvm_page_track_free_memslot(slot); 11305 } 11306 11307 static int memslot_rmap_alloc(struct kvm_memory_slot *slot, 11308 unsigned long npages) 11309 { 11310 const int sz = sizeof(*slot->arch.rmap[0]); 11311 int i; 11312 11313 for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { 11314 int level = i + 1; 11315 int lpages = gfn_to_index(slot->base_gfn + npages - 1, 11316 slot->base_gfn, level) + 1; 11317 11318 WARN_ON(slot->arch.rmap[i]); 11319 11320 slot->arch.rmap[i] = kvcalloc(lpages, sz, GFP_KERNEL_ACCOUNT); 11321 if (!slot->arch.rmap[i]) { 11322 memslot_rmap_free(slot); 11323 return -ENOMEM; 11324 } 11325 } 11326 11327 return 0; 11328 } 11329 11330 int alloc_all_memslots_rmaps(struct kvm *kvm) 11331 { 11332 struct kvm_memslots *slots; 11333 struct kvm_memory_slot *slot; 11334 int r, i; 11335 11336 /* 11337 * Check if memslots alreday have rmaps early before acquiring 11338 * the slots_arch_lock below. 11339 */ 11340 if (kvm_memslots_have_rmaps(kvm)) 11341 return 0; 11342 11343 mutex_lock(&kvm->slots_arch_lock); 11344 11345 /* 11346 * Read memslots_have_rmaps again, under the slots arch lock, 11347 * before allocating the rmaps 11348 */ 11349 if (kvm_memslots_have_rmaps(kvm)) { 11350 mutex_unlock(&kvm->slots_arch_lock); 11351 return 0; 11352 } 11353 11354 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 11355 slots = __kvm_memslots(kvm, i); 11356 kvm_for_each_memslot(slot, slots) { 11357 r = memslot_rmap_alloc(slot, slot->npages); 11358 if (r) { 11359 mutex_unlock(&kvm->slots_arch_lock); 11360 return r; 11361 } 11362 } 11363 } 11364 11365 /* 11366 * Ensure that memslots_have_rmaps becomes true strictly after 11367 * all the rmap pointers are set. 11368 */ 11369 smp_store_release(&kvm->arch.memslots_have_rmaps, true); 11370 mutex_unlock(&kvm->slots_arch_lock); 11371 return 0; 11372 } 11373 11374 static int kvm_alloc_memslot_metadata(struct kvm *kvm, 11375 struct kvm_memory_slot *slot, 11376 unsigned long npages) 11377 { 11378 int i, r; 11379 11380 /* 11381 * Clear out the previous array pointers for the KVM_MR_MOVE case. The 11382 * old arrays will be freed by __kvm_set_memory_region() if installing 11383 * the new memslot is successful. 11384 */ 11385 memset(&slot->arch, 0, sizeof(slot->arch)); 11386 11387 if (kvm_memslots_have_rmaps(kvm)) { 11388 r = memslot_rmap_alloc(slot, npages); 11389 if (r) 11390 return r; 11391 } 11392 11393 for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) { 11394 struct kvm_lpage_info *linfo; 11395 unsigned long ugfn; 11396 int lpages; 11397 int level = i + 1; 11398 11399 lpages = gfn_to_index(slot->base_gfn + npages - 1, 11400 slot->base_gfn, level) + 1; 11401 11402 linfo = kvcalloc(lpages, sizeof(*linfo), GFP_KERNEL_ACCOUNT); 11403 if (!linfo) 11404 goto out_free; 11405 11406 slot->arch.lpage_info[i - 1] = linfo; 11407 11408 if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1)) 11409 linfo[0].disallow_lpage = 1; 11410 if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1)) 11411 linfo[lpages - 1].disallow_lpage = 1; 11412 ugfn = slot->userspace_addr >> PAGE_SHIFT; 11413 /* 11414 * If the gfn and userspace address are not aligned wrt each 11415 * other, disable large page support for this slot. 11416 */ 11417 if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1)) { 11418 unsigned long j; 11419 11420 for (j = 0; j < lpages; ++j) 11421 linfo[j].disallow_lpage = 1; 11422 } 11423 } 11424 11425 if (kvm_page_track_create_memslot(slot, npages)) 11426 goto out_free; 11427 11428 return 0; 11429 11430 out_free: 11431 memslot_rmap_free(slot); 11432 11433 for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) { 11434 kvfree(slot->arch.lpage_info[i - 1]); 11435 slot->arch.lpage_info[i - 1] = NULL; 11436 } 11437 return -ENOMEM; 11438 } 11439 11440 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) 11441 { 11442 struct kvm_vcpu *vcpu; 11443 int i; 11444 11445 /* 11446 * memslots->generation has been incremented. 11447 * mmio generation may have reached its maximum value. 11448 */ 11449 kvm_mmu_invalidate_mmio_sptes(kvm, gen); 11450 11451 /* Force re-initialization of steal_time cache */ 11452 kvm_for_each_vcpu(i, vcpu, kvm) 11453 kvm_vcpu_kick(vcpu); 11454 } 11455 11456 int kvm_arch_prepare_memory_region(struct kvm *kvm, 11457 struct kvm_memory_slot *memslot, 11458 const struct kvm_userspace_memory_region *mem, 11459 enum kvm_mr_change change) 11460 { 11461 if (change == KVM_MR_CREATE || change == KVM_MR_MOVE) 11462 return kvm_alloc_memslot_metadata(kvm, memslot, 11463 mem->memory_size >> PAGE_SHIFT); 11464 return 0; 11465 } 11466 11467 11468 static void kvm_mmu_update_cpu_dirty_logging(struct kvm *kvm, bool enable) 11469 { 11470 struct kvm_arch *ka = &kvm->arch; 11471 11472 if (!kvm_x86_ops.cpu_dirty_log_size) 11473 return; 11474 11475 if ((enable && ++ka->cpu_dirty_logging_count == 1) || 11476 (!enable && --ka->cpu_dirty_logging_count == 0)) 11477 kvm_make_all_cpus_request(kvm, KVM_REQ_UPDATE_CPU_DIRTY_LOGGING); 11478 11479 WARN_ON_ONCE(ka->cpu_dirty_logging_count < 0); 11480 } 11481 11482 static void kvm_mmu_slot_apply_flags(struct kvm *kvm, 11483 struct kvm_memory_slot *old, 11484 struct kvm_memory_slot *new, 11485 enum kvm_mr_change change) 11486 { 11487 bool log_dirty_pages = new->flags & KVM_MEM_LOG_DIRTY_PAGES; 11488 11489 /* 11490 * Update CPU dirty logging if dirty logging is being toggled. This 11491 * applies to all operations. 11492 */ 11493 if ((old->flags ^ new->flags) & KVM_MEM_LOG_DIRTY_PAGES) 11494 kvm_mmu_update_cpu_dirty_logging(kvm, log_dirty_pages); 11495 11496 /* 11497 * Nothing more to do for RO slots (which can't be dirtied and can't be 11498 * made writable) or CREATE/MOVE/DELETE of a slot. 11499 * 11500 * For a memslot with dirty logging disabled: 11501 * CREATE: No dirty mappings will already exist. 11502 * MOVE/DELETE: The old mappings will already have been cleaned up by 11503 * kvm_arch_flush_shadow_memslot() 11504 * 11505 * For a memslot with dirty logging enabled: 11506 * CREATE: No shadow pages exist, thus nothing to write-protect 11507 * and no dirty bits to clear. 11508 * MOVE/DELETE: The old mappings will already have been cleaned up by 11509 * kvm_arch_flush_shadow_memslot(). 11510 */ 11511 if ((change != KVM_MR_FLAGS_ONLY) || (new->flags & KVM_MEM_READONLY)) 11512 return; 11513 11514 /* 11515 * READONLY and non-flags changes were filtered out above, and the only 11516 * other flag is LOG_DIRTY_PAGES, i.e. something is wrong if dirty 11517 * logging isn't being toggled on or off. 11518 */ 11519 if (WARN_ON_ONCE(!((old->flags ^ new->flags) & KVM_MEM_LOG_DIRTY_PAGES))) 11520 return; 11521 11522 if (!log_dirty_pages) { 11523 /* 11524 * Dirty logging tracks sptes in 4k granularity, meaning that 11525 * large sptes have to be split. If live migration succeeds, 11526 * the guest in the source machine will be destroyed and large 11527 * sptes will be created in the destination. However, if the 11528 * guest continues to run in the source machine (for example if 11529 * live migration fails), small sptes will remain around and 11530 * cause bad performance. 11531 * 11532 * Scan sptes if dirty logging has been stopped, dropping those 11533 * which can be collapsed into a single large-page spte. Later 11534 * page faults will create the large-page sptes. 11535 */ 11536 kvm_mmu_zap_collapsible_sptes(kvm, new); 11537 } else { 11538 /* 11539 * Initially-all-set does not require write protecting any page, 11540 * because they're all assumed to be dirty. 11541 */ 11542 if (kvm_dirty_log_manual_protect_and_init_set(kvm)) 11543 return; 11544 11545 if (kvm_x86_ops.cpu_dirty_log_size) { 11546 kvm_mmu_slot_leaf_clear_dirty(kvm, new); 11547 kvm_mmu_slot_remove_write_access(kvm, new, PG_LEVEL_2M); 11548 } else { 11549 kvm_mmu_slot_remove_write_access(kvm, new, PG_LEVEL_4K); 11550 } 11551 } 11552 } 11553 11554 void kvm_arch_commit_memory_region(struct kvm *kvm, 11555 const struct kvm_userspace_memory_region *mem, 11556 struct kvm_memory_slot *old, 11557 const struct kvm_memory_slot *new, 11558 enum kvm_mr_change change) 11559 { 11560 if (!kvm->arch.n_requested_mmu_pages) 11561 kvm_mmu_change_mmu_pages(kvm, 11562 kvm_mmu_calculate_default_mmu_pages(kvm)); 11563 11564 /* 11565 * FIXME: const-ify all uses of struct kvm_memory_slot. 11566 */ 11567 kvm_mmu_slot_apply_flags(kvm, old, (struct kvm_memory_slot *) new, change); 11568 11569 /* Free the arrays associated with the old memslot. */ 11570 if (change == KVM_MR_MOVE) 11571 kvm_arch_free_memslot(kvm, old); 11572 } 11573 11574 void kvm_arch_flush_shadow_all(struct kvm *kvm) 11575 { 11576 kvm_mmu_zap_all(kvm); 11577 } 11578 11579 void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 11580 struct kvm_memory_slot *slot) 11581 { 11582 kvm_page_track_flush_slot(kvm, slot); 11583 } 11584 11585 static inline bool kvm_guest_apic_has_interrupt(struct kvm_vcpu *vcpu) 11586 { 11587 return (is_guest_mode(vcpu) && 11588 kvm_x86_ops.guest_apic_has_interrupt && 11589 static_call(kvm_x86_guest_apic_has_interrupt)(vcpu)); 11590 } 11591 11592 static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) 11593 { 11594 if (!list_empty_careful(&vcpu->async_pf.done)) 11595 return true; 11596 11597 if (kvm_apic_has_events(vcpu)) 11598 return true; 11599 11600 if (vcpu->arch.pv.pv_unhalted) 11601 return true; 11602 11603 if (vcpu->arch.exception.pending) 11604 return true; 11605 11606 if (kvm_test_request(KVM_REQ_NMI, vcpu) || 11607 (vcpu->arch.nmi_pending && 11608 static_call(kvm_x86_nmi_allowed)(vcpu, false))) 11609 return true; 11610 11611 if (kvm_test_request(KVM_REQ_SMI, vcpu) || 11612 (vcpu->arch.smi_pending && 11613 static_call(kvm_x86_smi_allowed)(vcpu, false))) 11614 return true; 11615 11616 if (kvm_arch_interrupt_allowed(vcpu) && 11617 (kvm_cpu_has_interrupt(vcpu) || 11618 kvm_guest_apic_has_interrupt(vcpu))) 11619 return true; 11620 11621 if (kvm_hv_has_stimer_pending(vcpu)) 11622 return true; 11623 11624 if (is_guest_mode(vcpu) && 11625 kvm_x86_ops.nested_ops->hv_timer_pending && 11626 kvm_x86_ops.nested_ops->hv_timer_pending(vcpu)) 11627 return true; 11628 11629 return false; 11630 } 11631 11632 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) 11633 { 11634 return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu); 11635 } 11636 11637 bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu) 11638 { 11639 if (vcpu->arch.apicv_active && static_call(kvm_x86_dy_apicv_has_pending_interrupt)(vcpu)) 11640 return true; 11641 11642 return false; 11643 } 11644 11645 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) 11646 { 11647 if (READ_ONCE(vcpu->arch.pv.pv_unhalted)) 11648 return true; 11649 11650 if (kvm_test_request(KVM_REQ_NMI, vcpu) || 11651 kvm_test_request(KVM_REQ_SMI, vcpu) || 11652 kvm_test_request(KVM_REQ_EVENT, vcpu)) 11653 return true; 11654 11655 return kvm_arch_dy_has_pending_interrupt(vcpu); 11656 } 11657 11658 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) 11659 { 11660 if (vcpu->arch.guest_state_protected) 11661 return true; 11662 11663 return vcpu->arch.preempted_in_kernel; 11664 } 11665 11666 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) 11667 { 11668 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; 11669 } 11670 11671 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu) 11672 { 11673 return static_call(kvm_x86_interrupt_allowed)(vcpu, false); 11674 } 11675 11676 unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu) 11677 { 11678 /* Can't read the RIP when guest state is protected, just return 0 */ 11679 if (vcpu->arch.guest_state_protected) 11680 return 0; 11681 11682 if (is_64_bit_mode(vcpu)) 11683 return kvm_rip_read(vcpu); 11684 return (u32)(get_segment_base(vcpu, VCPU_SREG_CS) + 11685 kvm_rip_read(vcpu)); 11686 } 11687 EXPORT_SYMBOL_GPL(kvm_get_linear_rip); 11688 11689 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip) 11690 { 11691 return kvm_get_linear_rip(vcpu) == linear_rip; 11692 } 11693 EXPORT_SYMBOL_GPL(kvm_is_linear_rip); 11694 11695 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu) 11696 { 11697 unsigned long rflags; 11698 11699 rflags = static_call(kvm_x86_get_rflags)(vcpu); 11700 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) 11701 rflags &= ~X86_EFLAGS_TF; 11702 return rflags; 11703 } 11704 EXPORT_SYMBOL_GPL(kvm_get_rflags); 11705 11706 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) 11707 { 11708 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP && 11709 kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) 11710 rflags |= X86_EFLAGS_TF; 11711 static_call(kvm_x86_set_rflags)(vcpu, rflags); 11712 } 11713 11714 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) 11715 { 11716 __kvm_set_rflags(vcpu, rflags); 11717 kvm_make_request(KVM_REQ_EVENT, vcpu); 11718 } 11719 EXPORT_SYMBOL_GPL(kvm_set_rflags); 11720 11721 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) 11722 { 11723 int r; 11724 11725 if ((vcpu->arch.mmu->direct_map != work->arch.direct_map) || 11726 work->wakeup_all) 11727 return; 11728 11729 r = kvm_mmu_reload(vcpu); 11730 if (unlikely(r)) 11731 return; 11732 11733 if (!vcpu->arch.mmu->direct_map && 11734 work->arch.cr3 != vcpu->arch.mmu->get_guest_pgd(vcpu)) 11735 return; 11736 11737 kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true); 11738 } 11739 11740 static inline u32 kvm_async_pf_hash_fn(gfn_t gfn) 11741 { 11742 BUILD_BUG_ON(!is_power_of_2(ASYNC_PF_PER_VCPU)); 11743 11744 return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU)); 11745 } 11746 11747 static inline u32 kvm_async_pf_next_probe(u32 key) 11748 { 11749 return (key + 1) & (ASYNC_PF_PER_VCPU - 1); 11750 } 11751 11752 static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 11753 { 11754 u32 key = kvm_async_pf_hash_fn(gfn); 11755 11756 while (vcpu->arch.apf.gfns[key] != ~0) 11757 key = kvm_async_pf_next_probe(key); 11758 11759 vcpu->arch.apf.gfns[key] = gfn; 11760 } 11761 11762 static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn) 11763 { 11764 int i; 11765 u32 key = kvm_async_pf_hash_fn(gfn); 11766 11767 for (i = 0; i < ASYNC_PF_PER_VCPU && 11768 (vcpu->arch.apf.gfns[key] != gfn && 11769 vcpu->arch.apf.gfns[key] != ~0); i++) 11770 key = kvm_async_pf_next_probe(key); 11771 11772 return key; 11773 } 11774 11775 bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 11776 { 11777 return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn; 11778 } 11779 11780 static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 11781 { 11782 u32 i, j, k; 11783 11784 i = j = kvm_async_pf_gfn_slot(vcpu, gfn); 11785 11786 if (WARN_ON_ONCE(vcpu->arch.apf.gfns[i] != gfn)) 11787 return; 11788 11789 while (true) { 11790 vcpu->arch.apf.gfns[i] = ~0; 11791 do { 11792 j = kvm_async_pf_next_probe(j); 11793 if (vcpu->arch.apf.gfns[j] == ~0) 11794 return; 11795 k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]); 11796 /* 11797 * k lies cyclically in ]i,j] 11798 * | i.k.j | 11799 * |....j i.k.| or |.k..j i...| 11800 */ 11801 } while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j)); 11802 vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j]; 11803 i = j; 11804 } 11805 } 11806 11807 static inline int apf_put_user_notpresent(struct kvm_vcpu *vcpu) 11808 { 11809 u32 reason = KVM_PV_REASON_PAGE_NOT_PRESENT; 11810 11811 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &reason, 11812 sizeof(reason)); 11813 } 11814 11815 static inline int apf_put_user_ready(struct kvm_vcpu *vcpu, u32 token) 11816 { 11817 unsigned int offset = offsetof(struct kvm_vcpu_pv_apf_data, token); 11818 11819 return kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data, 11820 &token, offset, sizeof(token)); 11821 } 11822 11823 static inline bool apf_pageready_slot_free(struct kvm_vcpu *vcpu) 11824 { 11825 unsigned int offset = offsetof(struct kvm_vcpu_pv_apf_data, token); 11826 u32 val; 11827 11828 if (kvm_read_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data, 11829 &val, offset, sizeof(val))) 11830 return false; 11831 11832 return !val; 11833 } 11834 11835 static bool kvm_can_deliver_async_pf(struct kvm_vcpu *vcpu) 11836 { 11837 if (!vcpu->arch.apf.delivery_as_pf_vmexit && is_guest_mode(vcpu)) 11838 return false; 11839 11840 if (!kvm_pv_async_pf_enabled(vcpu) || 11841 (vcpu->arch.apf.send_user_only && static_call(kvm_x86_get_cpl)(vcpu) == 0)) 11842 return false; 11843 11844 return true; 11845 } 11846 11847 bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu) 11848 { 11849 if (unlikely(!lapic_in_kernel(vcpu) || 11850 kvm_event_needs_reinjection(vcpu) || 11851 vcpu->arch.exception.pending)) 11852 return false; 11853 11854 if (kvm_hlt_in_guest(vcpu->kvm) && !kvm_can_deliver_async_pf(vcpu)) 11855 return false; 11856 11857 /* 11858 * If interrupts are off we cannot even use an artificial 11859 * halt state. 11860 */ 11861 return kvm_arch_interrupt_allowed(vcpu); 11862 } 11863 11864 bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, 11865 struct kvm_async_pf *work) 11866 { 11867 struct x86_exception fault; 11868 11869 trace_kvm_async_pf_not_present(work->arch.token, work->cr2_or_gpa); 11870 kvm_add_async_pf_gfn(vcpu, work->arch.gfn); 11871 11872 if (kvm_can_deliver_async_pf(vcpu) && 11873 !apf_put_user_notpresent(vcpu)) { 11874 fault.vector = PF_VECTOR; 11875 fault.error_code_valid = true; 11876 fault.error_code = 0; 11877 fault.nested_page_fault = false; 11878 fault.address = work->arch.token; 11879 fault.async_page_fault = true; 11880 kvm_inject_page_fault(vcpu, &fault); 11881 return true; 11882 } else { 11883 /* 11884 * It is not possible to deliver a paravirtualized asynchronous 11885 * page fault, but putting the guest in an artificial halt state 11886 * can be beneficial nevertheless: if an interrupt arrives, we 11887 * can deliver it timely and perhaps the guest will schedule 11888 * another process. When the instruction that triggered a page 11889 * fault is retried, hopefully the page will be ready in the host. 11890 */ 11891 kvm_make_request(KVM_REQ_APF_HALT, vcpu); 11892 return false; 11893 } 11894 } 11895 11896 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, 11897 struct kvm_async_pf *work) 11898 { 11899 struct kvm_lapic_irq irq = { 11900 .delivery_mode = APIC_DM_FIXED, 11901 .vector = vcpu->arch.apf.vec 11902 }; 11903 11904 if (work->wakeup_all) 11905 work->arch.token = ~0; /* broadcast wakeup */ 11906 else 11907 kvm_del_async_pf_gfn(vcpu, work->arch.gfn); 11908 trace_kvm_async_pf_ready(work->arch.token, work->cr2_or_gpa); 11909 11910 if ((work->wakeup_all || work->notpresent_injected) && 11911 kvm_pv_async_pf_enabled(vcpu) && 11912 !apf_put_user_ready(vcpu, work->arch.token)) { 11913 vcpu->arch.apf.pageready_pending = true; 11914 kvm_apic_set_irq(vcpu, &irq, NULL); 11915 } 11916 11917 vcpu->arch.apf.halted = false; 11918 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 11919 } 11920 11921 void kvm_arch_async_page_present_queued(struct kvm_vcpu *vcpu) 11922 { 11923 kvm_make_request(KVM_REQ_APF_READY, vcpu); 11924 if (!vcpu->arch.apf.pageready_pending) 11925 kvm_vcpu_kick(vcpu); 11926 } 11927 11928 bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu) 11929 { 11930 if (!kvm_pv_async_pf_enabled(vcpu)) 11931 return true; 11932 else 11933 return kvm_lapic_enabled(vcpu) && apf_pageready_slot_free(vcpu); 11934 } 11935 11936 void kvm_arch_start_assignment(struct kvm *kvm) 11937 { 11938 if (atomic_inc_return(&kvm->arch.assigned_device_count) == 1) 11939 static_call_cond(kvm_x86_start_assignment)(kvm); 11940 } 11941 EXPORT_SYMBOL_GPL(kvm_arch_start_assignment); 11942 11943 void kvm_arch_end_assignment(struct kvm *kvm) 11944 { 11945 atomic_dec(&kvm->arch.assigned_device_count); 11946 } 11947 EXPORT_SYMBOL_GPL(kvm_arch_end_assignment); 11948 11949 bool kvm_arch_has_assigned_device(struct kvm *kvm) 11950 { 11951 return atomic_read(&kvm->arch.assigned_device_count); 11952 } 11953 EXPORT_SYMBOL_GPL(kvm_arch_has_assigned_device); 11954 11955 void kvm_arch_register_noncoherent_dma(struct kvm *kvm) 11956 { 11957 atomic_inc(&kvm->arch.noncoherent_dma_count); 11958 } 11959 EXPORT_SYMBOL_GPL(kvm_arch_register_noncoherent_dma); 11960 11961 void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm) 11962 { 11963 atomic_dec(&kvm->arch.noncoherent_dma_count); 11964 } 11965 EXPORT_SYMBOL_GPL(kvm_arch_unregister_noncoherent_dma); 11966 11967 bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) 11968 { 11969 return atomic_read(&kvm->arch.noncoherent_dma_count); 11970 } 11971 EXPORT_SYMBOL_GPL(kvm_arch_has_noncoherent_dma); 11972 11973 bool kvm_arch_has_irq_bypass(void) 11974 { 11975 return true; 11976 } 11977 11978 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons, 11979 struct irq_bypass_producer *prod) 11980 { 11981 struct kvm_kernel_irqfd *irqfd = 11982 container_of(cons, struct kvm_kernel_irqfd, consumer); 11983 int ret; 11984 11985 irqfd->producer = prod; 11986 kvm_arch_start_assignment(irqfd->kvm); 11987 ret = static_call(kvm_x86_update_pi_irte)(irqfd->kvm, 11988 prod->irq, irqfd->gsi, 1); 11989 11990 if (ret) 11991 kvm_arch_end_assignment(irqfd->kvm); 11992 11993 return ret; 11994 } 11995 11996 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, 11997 struct irq_bypass_producer *prod) 11998 { 11999 int ret; 12000 struct kvm_kernel_irqfd *irqfd = 12001 container_of(cons, struct kvm_kernel_irqfd, consumer); 12002 12003 WARN_ON(irqfd->producer != prod); 12004 irqfd->producer = NULL; 12005 12006 /* 12007 * When producer of consumer is unregistered, we change back to 12008 * remapped mode, so we can re-use the current implementation 12009 * when the irq is masked/disabled or the consumer side (KVM 12010 * int this case doesn't want to receive the interrupts. 12011 */ 12012 ret = static_call(kvm_x86_update_pi_irte)(irqfd->kvm, prod->irq, irqfd->gsi, 0); 12013 if (ret) 12014 printk(KERN_INFO "irq bypass consumer (token %p) unregistration" 12015 " fails: %d\n", irqfd->consumer.token, ret); 12016 12017 kvm_arch_end_assignment(irqfd->kvm); 12018 } 12019 12020 int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, 12021 uint32_t guest_irq, bool set) 12022 { 12023 return static_call(kvm_x86_update_pi_irte)(kvm, host_irq, guest_irq, set); 12024 } 12025 12026 bool kvm_vector_hashing_enabled(void) 12027 { 12028 return vector_hashing; 12029 } 12030 12031 bool kvm_arch_no_poll(struct kvm_vcpu *vcpu) 12032 { 12033 return (vcpu->arch.msr_kvm_poll_control & 1) == 0; 12034 } 12035 EXPORT_SYMBOL_GPL(kvm_arch_no_poll); 12036 12037 12038 int kvm_spec_ctrl_test_value(u64 value) 12039 { 12040 /* 12041 * test that setting IA32_SPEC_CTRL to given value 12042 * is allowed by the host processor 12043 */ 12044 12045 u64 saved_value; 12046 unsigned long flags; 12047 int ret = 0; 12048 12049 local_irq_save(flags); 12050 12051 if (rdmsrl_safe(MSR_IA32_SPEC_CTRL, &saved_value)) 12052 ret = 1; 12053 else if (wrmsrl_safe(MSR_IA32_SPEC_CTRL, value)) 12054 ret = 1; 12055 else 12056 wrmsrl(MSR_IA32_SPEC_CTRL, saved_value); 12057 12058 local_irq_restore(flags); 12059 12060 return ret; 12061 } 12062 EXPORT_SYMBOL_GPL(kvm_spec_ctrl_test_value); 12063 12064 void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code) 12065 { 12066 struct x86_exception fault; 12067 u32 access = error_code & 12068 (PFERR_WRITE_MASK | PFERR_FETCH_MASK | PFERR_USER_MASK); 12069 12070 if (!(error_code & PFERR_PRESENT_MASK) || 12071 vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, &fault) != UNMAPPED_GVA) { 12072 /* 12073 * If vcpu->arch.walk_mmu->gva_to_gpa succeeded, the page 12074 * tables probably do not match the TLB. Just proceed 12075 * with the error code that the processor gave. 12076 */ 12077 fault.vector = PF_VECTOR; 12078 fault.error_code_valid = true; 12079 fault.error_code = error_code; 12080 fault.nested_page_fault = false; 12081 fault.address = gva; 12082 } 12083 vcpu->arch.walk_mmu->inject_page_fault(vcpu, &fault); 12084 } 12085 EXPORT_SYMBOL_GPL(kvm_fixup_and_inject_pf_error); 12086 12087 /* 12088 * Handles kvm_read/write_guest_virt*() result and either injects #PF or returns 12089 * KVM_EXIT_INTERNAL_ERROR for cases not currently handled by KVM. Return value 12090 * indicates whether exit to userspace is needed. 12091 */ 12092 int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r, 12093 struct x86_exception *e) 12094 { 12095 if (r == X86EMUL_PROPAGATE_FAULT) { 12096 kvm_inject_emulated_page_fault(vcpu, e); 12097 return 1; 12098 } 12099 12100 /* 12101 * In case kvm_read/write_guest_virt*() failed with X86EMUL_IO_NEEDED 12102 * while handling a VMX instruction KVM could've handled the request 12103 * correctly by exiting to userspace and performing I/O but there 12104 * doesn't seem to be a real use-case behind such requests, just return 12105 * KVM_EXIT_INTERNAL_ERROR for now. 12106 */ 12107 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 12108 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; 12109 vcpu->run->internal.ndata = 0; 12110 12111 return 0; 12112 } 12113 EXPORT_SYMBOL_GPL(kvm_handle_memory_failure); 12114 12115 int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva) 12116 { 12117 bool pcid_enabled; 12118 struct x86_exception e; 12119 struct { 12120 u64 pcid; 12121 u64 gla; 12122 } operand; 12123 int r; 12124 12125 r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e); 12126 if (r != X86EMUL_CONTINUE) 12127 return kvm_handle_memory_failure(vcpu, r, &e); 12128 12129 if (operand.pcid >> 12 != 0) { 12130 kvm_inject_gp(vcpu, 0); 12131 return 1; 12132 } 12133 12134 pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE); 12135 12136 switch (type) { 12137 case INVPCID_TYPE_INDIV_ADDR: 12138 if ((!pcid_enabled && (operand.pcid != 0)) || 12139 is_noncanonical_address(operand.gla, vcpu)) { 12140 kvm_inject_gp(vcpu, 0); 12141 return 1; 12142 } 12143 kvm_mmu_invpcid_gva(vcpu, operand.gla, operand.pcid); 12144 return kvm_skip_emulated_instruction(vcpu); 12145 12146 case INVPCID_TYPE_SINGLE_CTXT: 12147 if (!pcid_enabled && (operand.pcid != 0)) { 12148 kvm_inject_gp(vcpu, 0); 12149 return 1; 12150 } 12151 12152 kvm_invalidate_pcid(vcpu, operand.pcid); 12153 return kvm_skip_emulated_instruction(vcpu); 12154 12155 case INVPCID_TYPE_ALL_NON_GLOBAL: 12156 /* 12157 * Currently, KVM doesn't mark global entries in the shadow 12158 * page tables, so a non-global flush just degenerates to a 12159 * global flush. If needed, we could optimize this later by 12160 * keeping track of global entries in shadow page tables. 12161 */ 12162 12163 fallthrough; 12164 case INVPCID_TYPE_ALL_INCL_GLOBAL: 12165 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 12166 return kvm_skip_emulated_instruction(vcpu); 12167 12168 default: 12169 BUG(); /* We have already checked above that type <= 3 */ 12170 } 12171 } 12172 EXPORT_SYMBOL_GPL(kvm_handle_invpcid); 12173 12174 static int complete_sev_es_emulated_mmio(struct kvm_vcpu *vcpu) 12175 { 12176 struct kvm_run *run = vcpu->run; 12177 struct kvm_mmio_fragment *frag; 12178 unsigned int len; 12179 12180 BUG_ON(!vcpu->mmio_needed); 12181 12182 /* Complete previous fragment */ 12183 frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment]; 12184 len = min(8u, frag->len); 12185 if (!vcpu->mmio_is_write) 12186 memcpy(frag->data, run->mmio.data, len); 12187 12188 if (frag->len <= 8) { 12189 /* Switch to the next fragment. */ 12190 frag++; 12191 vcpu->mmio_cur_fragment++; 12192 } else { 12193 /* Go forward to the next mmio piece. */ 12194 frag->data += len; 12195 frag->gpa += len; 12196 frag->len -= len; 12197 } 12198 12199 if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) { 12200 vcpu->mmio_needed = 0; 12201 12202 // VMG change, at this point, we're always done 12203 // RIP has already been advanced 12204 return 1; 12205 } 12206 12207 // More MMIO is needed 12208 run->mmio.phys_addr = frag->gpa; 12209 run->mmio.len = min(8u, frag->len); 12210 run->mmio.is_write = vcpu->mmio_is_write; 12211 if (run->mmio.is_write) 12212 memcpy(run->mmio.data, frag->data, min(8u, frag->len)); 12213 run->exit_reason = KVM_EXIT_MMIO; 12214 12215 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio; 12216 12217 return 0; 12218 } 12219 12220 int kvm_sev_es_mmio_write(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes, 12221 void *data) 12222 { 12223 int handled; 12224 struct kvm_mmio_fragment *frag; 12225 12226 if (!data) 12227 return -EINVAL; 12228 12229 handled = write_emultor.read_write_mmio(vcpu, gpa, bytes, data); 12230 if (handled == bytes) 12231 return 1; 12232 12233 bytes -= handled; 12234 gpa += handled; 12235 data += handled; 12236 12237 /*TODO: Check if need to increment number of frags */ 12238 frag = vcpu->mmio_fragments; 12239 vcpu->mmio_nr_fragments = 1; 12240 frag->len = bytes; 12241 frag->gpa = gpa; 12242 frag->data = data; 12243 12244 vcpu->mmio_needed = 1; 12245 vcpu->mmio_cur_fragment = 0; 12246 12247 vcpu->run->mmio.phys_addr = gpa; 12248 vcpu->run->mmio.len = min(8u, frag->len); 12249 vcpu->run->mmio.is_write = 1; 12250 memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); 12251 vcpu->run->exit_reason = KVM_EXIT_MMIO; 12252 12253 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio; 12254 12255 return 0; 12256 } 12257 EXPORT_SYMBOL_GPL(kvm_sev_es_mmio_write); 12258 12259 int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes, 12260 void *data) 12261 { 12262 int handled; 12263 struct kvm_mmio_fragment *frag; 12264 12265 if (!data) 12266 return -EINVAL; 12267 12268 handled = read_emultor.read_write_mmio(vcpu, gpa, bytes, data); 12269 if (handled == bytes) 12270 return 1; 12271 12272 bytes -= handled; 12273 gpa += handled; 12274 data += handled; 12275 12276 /*TODO: Check if need to increment number of frags */ 12277 frag = vcpu->mmio_fragments; 12278 vcpu->mmio_nr_fragments = 1; 12279 frag->len = bytes; 12280 frag->gpa = gpa; 12281 frag->data = data; 12282 12283 vcpu->mmio_needed = 1; 12284 vcpu->mmio_cur_fragment = 0; 12285 12286 vcpu->run->mmio.phys_addr = gpa; 12287 vcpu->run->mmio.len = min(8u, frag->len); 12288 vcpu->run->mmio.is_write = 0; 12289 vcpu->run->exit_reason = KVM_EXIT_MMIO; 12290 12291 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio; 12292 12293 return 0; 12294 } 12295 EXPORT_SYMBOL_GPL(kvm_sev_es_mmio_read); 12296 12297 static int complete_sev_es_emulated_ins(struct kvm_vcpu *vcpu) 12298 { 12299 memcpy(vcpu->arch.guest_ins_data, vcpu->arch.pio_data, 12300 vcpu->arch.pio.count * vcpu->arch.pio.size); 12301 vcpu->arch.pio.count = 0; 12302 12303 return 1; 12304 } 12305 12306 static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size, 12307 unsigned int port, void *data, unsigned int count) 12308 { 12309 int ret; 12310 12311 ret = emulator_pio_out_emulated(vcpu->arch.emulate_ctxt, size, port, 12312 data, count); 12313 if (ret) 12314 return ret; 12315 12316 vcpu->arch.pio.count = 0; 12317 12318 return 0; 12319 } 12320 12321 static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size, 12322 unsigned int port, void *data, unsigned int count) 12323 { 12324 int ret; 12325 12326 ret = emulator_pio_in_emulated(vcpu->arch.emulate_ctxt, size, port, 12327 data, count); 12328 if (ret) { 12329 vcpu->arch.pio.count = 0; 12330 } else { 12331 vcpu->arch.guest_ins_data = data; 12332 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_ins; 12333 } 12334 12335 return 0; 12336 } 12337 12338 int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size, 12339 unsigned int port, void *data, unsigned int count, 12340 int in) 12341 { 12342 return in ? kvm_sev_es_ins(vcpu, size, port, data, count) 12343 : kvm_sev_es_outs(vcpu, size, port, data, count); 12344 } 12345 EXPORT_SYMBOL_GPL(kvm_sev_es_string_io); 12346 12347 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_entry); 12348 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit); 12349 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio); 12350 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq); 12351 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault); 12352 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr); 12353 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr); 12354 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun); 12355 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit); 12356 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject); 12357 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit); 12358 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmenter_failed); 12359 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga); 12360 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit); 12361 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts); 12362 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_write_tsc_offset); 12363 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ple_window_update); 12364 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pml_full); 12365 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pi_irte_update); 12366 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_unaccelerated_access); 12367 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_incomplete_ipi); 12368 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_ga_log); 12369 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_apicv_update_request); 12370 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_enter); 12371 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_exit); 12372 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_enter); 12373 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_exit); 12374