1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Kernel-based Virtual Machine driver for Linux 4 * 5 * This module enables machines with Intel VT-x extensions to run virtual 6 * machines without emulation or binary translation. 7 * 8 * Copyright (C) 2006 Qumranet, Inc. 9 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 10 * 11 * Authors: 12 * Avi Kivity <avi@qumranet.com> 13 * Yaniv Kamay <yaniv@qumranet.com> 14 */ 15 16 #include <linux/highmem.h> 17 #include <linux/hrtimer.h> 18 #include <linux/kernel.h> 19 #include <linux/kvm_host.h> 20 #include <linux/module.h> 21 #include <linux/moduleparam.h> 22 #include <linux/mod_devicetable.h> 23 #include <linux/mm.h> 24 #include <linux/objtool.h> 25 #include <linux/sched.h> 26 #include <linux/sched/smt.h> 27 #include <linux/slab.h> 28 #include <linux/tboot.h> 29 #include <linux/trace_events.h> 30 #include <linux/entry-kvm.h> 31 32 #include <asm/apic.h> 33 #include <asm/asm.h> 34 #include <asm/cpu.h> 35 #include <asm/cpu_device_id.h> 36 #include <asm/debugreg.h> 37 #include <asm/desc.h> 38 #include <asm/fpu/api.h> 39 #include <asm/fpu/xstate.h> 40 #include <asm/idtentry.h> 41 #include <asm/io.h> 42 #include <asm/irq_remapping.h> 43 #include <asm/kexec.h> 44 #include <asm/perf_event.h> 45 #include <asm/mmu_context.h> 46 #include <asm/mshyperv.h> 47 #include <asm/mwait.h> 48 #include <asm/spec-ctrl.h> 49 #include <asm/virtext.h> 50 #include <asm/vmx.h> 51 52 #include "capabilities.h" 53 #include "cpuid.h" 54 #include "evmcs.h" 55 #include "hyperv.h" 56 #include "kvm_onhyperv.h" 57 #include "irq.h" 58 #include "kvm_cache_regs.h" 59 #include "lapic.h" 60 #include "mmu.h" 61 #include "nested.h" 62 #include "pmu.h" 63 #include "sgx.h" 64 #include "trace.h" 65 #include "vmcs.h" 66 #include "vmcs12.h" 67 #include "vmx.h" 68 #include "x86.h" 69 70 MODULE_AUTHOR("Qumranet"); 71 MODULE_LICENSE("GPL"); 72 73 #ifdef MODULE 74 static const struct x86_cpu_id vmx_cpu_id[] = { 75 X86_MATCH_FEATURE(X86_FEATURE_VMX, NULL), 76 {} 77 }; 78 MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id); 79 #endif 80 81 bool __read_mostly enable_vpid = 1; 82 module_param_named(vpid, enable_vpid, bool, 0444); 83 84 static bool __read_mostly enable_vnmi = 1; 85 module_param_named(vnmi, enable_vnmi, bool, S_IRUGO); 86 87 bool __read_mostly flexpriority_enabled = 1; 88 module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO); 89 90 bool __read_mostly enable_ept = 1; 91 module_param_named(ept, enable_ept, bool, S_IRUGO); 92 93 bool __read_mostly enable_unrestricted_guest = 1; 94 module_param_named(unrestricted_guest, 95 enable_unrestricted_guest, bool, S_IRUGO); 96 97 bool __read_mostly enable_ept_ad_bits = 1; 98 module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO); 99 100 static bool __read_mostly emulate_invalid_guest_state = true; 101 module_param(emulate_invalid_guest_state, bool, S_IRUGO); 102 103 static bool __read_mostly fasteoi = 1; 104 module_param(fasteoi, bool, S_IRUGO); 105 106 module_param(enable_apicv, bool, S_IRUGO); 107 108 /* 109 * If nested=1, nested virtualization is supported, i.e., guests may use 110 * VMX and be a hypervisor for its own guests. If nested=0, guests may not 111 * use VMX instructions. 112 */ 113 static bool __read_mostly nested = 1; 114 module_param(nested, bool, S_IRUGO); 115 116 bool __read_mostly enable_pml = 1; 117 module_param_named(pml, enable_pml, bool, S_IRUGO); 118 119 static bool __read_mostly dump_invalid_vmcs = 0; 120 module_param(dump_invalid_vmcs, bool, 0644); 121 122 #define MSR_BITMAP_MODE_X2APIC 1 123 #define MSR_BITMAP_MODE_X2APIC_APICV 2 124 125 #define KVM_VMX_TSC_MULTIPLIER_MAX 0xffffffffffffffffULL 126 127 /* Guest_tsc -> host_tsc conversion requires 64-bit division. */ 128 static int __read_mostly cpu_preemption_timer_multi; 129 static bool __read_mostly enable_preemption_timer = 1; 130 #ifdef CONFIG_X86_64 131 module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO); 132 #endif 133 134 extern bool __read_mostly allow_smaller_maxphyaddr; 135 module_param(allow_smaller_maxphyaddr, bool, S_IRUGO); 136 137 #define KVM_VM_CR0_ALWAYS_OFF (X86_CR0_NW | X86_CR0_CD) 138 #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR0_NE 139 #define KVM_VM_CR0_ALWAYS_ON \ 140 (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE) 141 142 #define KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR4_VMXE 143 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) 144 #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE) 145 146 #define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM)) 147 148 #define MSR_IA32_RTIT_STATUS_MASK (~(RTIT_STATUS_FILTEREN | \ 149 RTIT_STATUS_CONTEXTEN | RTIT_STATUS_TRIGGEREN | \ 150 RTIT_STATUS_ERROR | RTIT_STATUS_STOPPED | \ 151 RTIT_STATUS_BYTECNT)) 152 153 /* 154 * List of MSRs that can be directly passed to the guest. 155 * In addition to these x2apic and PT MSRs are handled specially. 156 */ 157 static u32 vmx_possible_passthrough_msrs[MAX_POSSIBLE_PASSTHROUGH_MSRS] = { 158 MSR_IA32_SPEC_CTRL, 159 MSR_IA32_PRED_CMD, 160 MSR_IA32_TSC, 161 #ifdef CONFIG_X86_64 162 MSR_FS_BASE, 163 MSR_GS_BASE, 164 MSR_KERNEL_GS_BASE, 165 MSR_IA32_XFD, 166 MSR_IA32_XFD_ERR, 167 #endif 168 MSR_IA32_SYSENTER_CS, 169 MSR_IA32_SYSENTER_ESP, 170 MSR_IA32_SYSENTER_EIP, 171 MSR_CORE_C1_RES, 172 MSR_CORE_C3_RESIDENCY, 173 MSR_CORE_C6_RESIDENCY, 174 MSR_CORE_C7_RESIDENCY, 175 }; 176 177 /* 178 * These 2 parameters are used to config the controls for Pause-Loop Exiting: 179 * ple_gap: upper bound on the amount of time between two successive 180 * executions of PAUSE in a loop. Also indicate if ple enabled. 181 * According to test, this time is usually smaller than 128 cycles. 182 * ple_window: upper bound on the amount of time a guest is allowed to execute 183 * in a PAUSE loop. Tests indicate that most spinlocks are held for 184 * less than 2^12 cycles 185 * Time is measured based on a counter that runs at the same rate as the TSC, 186 * refer SDM volume 3b section 21.6.13 & 22.1.3. 187 */ 188 static unsigned int ple_gap = KVM_DEFAULT_PLE_GAP; 189 module_param(ple_gap, uint, 0444); 190 191 static unsigned int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW; 192 module_param(ple_window, uint, 0444); 193 194 /* Default doubles per-vcpu window every exit. */ 195 static unsigned int ple_window_grow = KVM_DEFAULT_PLE_WINDOW_GROW; 196 module_param(ple_window_grow, uint, 0444); 197 198 /* Default resets per-vcpu window every exit to ple_window. */ 199 static unsigned int ple_window_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK; 200 module_param(ple_window_shrink, uint, 0444); 201 202 /* Default is to compute the maximum so we can never overflow. */ 203 static unsigned int ple_window_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX; 204 module_param(ple_window_max, uint, 0444); 205 206 /* Default is SYSTEM mode, 1 for host-guest mode */ 207 int __read_mostly pt_mode = PT_MODE_SYSTEM; 208 module_param(pt_mode, int, S_IRUGO); 209 210 static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush); 211 static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond); 212 static DEFINE_MUTEX(vmx_l1d_flush_mutex); 213 214 /* Storage for pre module init parameter parsing */ 215 static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_L1D_FLUSH_AUTO; 216 217 static const struct { 218 const char *option; 219 bool for_parse; 220 } vmentry_l1d_param[] = { 221 [VMENTER_L1D_FLUSH_AUTO] = {"auto", true}, 222 [VMENTER_L1D_FLUSH_NEVER] = {"never", true}, 223 [VMENTER_L1D_FLUSH_COND] = {"cond", true}, 224 [VMENTER_L1D_FLUSH_ALWAYS] = {"always", true}, 225 [VMENTER_L1D_FLUSH_EPT_DISABLED] = {"EPT disabled", false}, 226 [VMENTER_L1D_FLUSH_NOT_REQUIRED] = {"not required", false}, 227 }; 228 229 #define L1D_CACHE_ORDER 4 230 static void *vmx_l1d_flush_pages; 231 232 /* Control for disabling CPU Fill buffer clear */ 233 static bool __read_mostly vmx_fb_clear_ctrl_available; 234 235 static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf) 236 { 237 struct page *page; 238 unsigned int i; 239 240 if (!boot_cpu_has_bug(X86_BUG_L1TF)) { 241 l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED; 242 return 0; 243 } 244 245 if (!enable_ept) { 246 l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED; 247 return 0; 248 } 249 250 if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) { 251 u64 msr; 252 253 rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr); 254 if (msr & ARCH_CAP_SKIP_VMENTRY_L1DFLUSH) { 255 l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED; 256 return 0; 257 } 258 } 259 260 /* If set to auto use the default l1tf mitigation method */ 261 if (l1tf == VMENTER_L1D_FLUSH_AUTO) { 262 switch (l1tf_mitigation) { 263 case L1TF_MITIGATION_OFF: 264 l1tf = VMENTER_L1D_FLUSH_NEVER; 265 break; 266 case L1TF_MITIGATION_FLUSH_NOWARN: 267 case L1TF_MITIGATION_FLUSH: 268 case L1TF_MITIGATION_FLUSH_NOSMT: 269 l1tf = VMENTER_L1D_FLUSH_COND; 270 break; 271 case L1TF_MITIGATION_FULL: 272 case L1TF_MITIGATION_FULL_FORCE: 273 l1tf = VMENTER_L1D_FLUSH_ALWAYS; 274 break; 275 } 276 } else if (l1tf_mitigation == L1TF_MITIGATION_FULL_FORCE) { 277 l1tf = VMENTER_L1D_FLUSH_ALWAYS; 278 } 279 280 if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages && 281 !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) { 282 /* 283 * This allocation for vmx_l1d_flush_pages is not tied to a VM 284 * lifetime and so should not be charged to a memcg. 285 */ 286 page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER); 287 if (!page) 288 return -ENOMEM; 289 vmx_l1d_flush_pages = page_address(page); 290 291 /* 292 * Initialize each page with a different pattern in 293 * order to protect against KSM in the nested 294 * virtualization case. 295 */ 296 for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) { 297 memset(vmx_l1d_flush_pages + i * PAGE_SIZE, i + 1, 298 PAGE_SIZE); 299 } 300 } 301 302 l1tf_vmx_mitigation = l1tf; 303 304 if (l1tf != VMENTER_L1D_FLUSH_NEVER) 305 static_branch_enable(&vmx_l1d_should_flush); 306 else 307 static_branch_disable(&vmx_l1d_should_flush); 308 309 if (l1tf == VMENTER_L1D_FLUSH_COND) 310 static_branch_enable(&vmx_l1d_flush_cond); 311 else 312 static_branch_disable(&vmx_l1d_flush_cond); 313 return 0; 314 } 315 316 static int vmentry_l1d_flush_parse(const char *s) 317 { 318 unsigned int i; 319 320 if (s) { 321 for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) { 322 if (vmentry_l1d_param[i].for_parse && 323 sysfs_streq(s, vmentry_l1d_param[i].option)) 324 return i; 325 } 326 } 327 return -EINVAL; 328 } 329 330 static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp) 331 { 332 int l1tf, ret; 333 334 l1tf = vmentry_l1d_flush_parse(s); 335 if (l1tf < 0) 336 return l1tf; 337 338 if (!boot_cpu_has(X86_BUG_L1TF)) 339 return 0; 340 341 /* 342 * Has vmx_init() run already? If not then this is the pre init 343 * parameter parsing. In that case just store the value and let 344 * vmx_init() do the proper setup after enable_ept has been 345 * established. 346 */ 347 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) { 348 vmentry_l1d_flush_param = l1tf; 349 return 0; 350 } 351 352 mutex_lock(&vmx_l1d_flush_mutex); 353 ret = vmx_setup_l1d_flush(l1tf); 354 mutex_unlock(&vmx_l1d_flush_mutex); 355 return ret; 356 } 357 358 static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp) 359 { 360 if (WARN_ON_ONCE(l1tf_vmx_mitigation >= ARRAY_SIZE(vmentry_l1d_param))) 361 return sprintf(s, "???\n"); 362 363 return sprintf(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option); 364 } 365 366 static void vmx_setup_fb_clear_ctrl(void) 367 { 368 u64 msr; 369 370 if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES) && 371 !boot_cpu_has_bug(X86_BUG_MDS) && 372 !boot_cpu_has_bug(X86_BUG_TAA)) { 373 rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr); 374 if (msr & ARCH_CAP_FB_CLEAR_CTRL) 375 vmx_fb_clear_ctrl_available = true; 376 } 377 } 378 379 static __always_inline void vmx_disable_fb_clear(struct vcpu_vmx *vmx) 380 { 381 u64 msr; 382 383 if (!vmx->disable_fb_clear) 384 return; 385 386 msr = __rdmsr(MSR_IA32_MCU_OPT_CTRL); 387 msr |= FB_CLEAR_DIS; 388 native_wrmsrl(MSR_IA32_MCU_OPT_CTRL, msr); 389 /* Cache the MSR value to avoid reading it later */ 390 vmx->msr_ia32_mcu_opt_ctrl = msr; 391 } 392 393 static __always_inline void vmx_enable_fb_clear(struct vcpu_vmx *vmx) 394 { 395 if (!vmx->disable_fb_clear) 396 return; 397 398 vmx->msr_ia32_mcu_opt_ctrl &= ~FB_CLEAR_DIS; 399 native_wrmsrl(MSR_IA32_MCU_OPT_CTRL, vmx->msr_ia32_mcu_opt_ctrl); 400 } 401 402 static void vmx_update_fb_clear_dis(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx) 403 { 404 vmx->disable_fb_clear = vmx_fb_clear_ctrl_available; 405 406 /* 407 * If guest will not execute VERW, there is no need to set FB_CLEAR_DIS 408 * at VMEntry. Skip the MSR read/write when a guest has no use case to 409 * execute VERW. 410 */ 411 if ((vcpu->arch.arch_capabilities & ARCH_CAP_FB_CLEAR) || 412 ((vcpu->arch.arch_capabilities & ARCH_CAP_MDS_NO) && 413 (vcpu->arch.arch_capabilities & ARCH_CAP_TAA_NO) && 414 (vcpu->arch.arch_capabilities & ARCH_CAP_PSDP_NO) && 415 (vcpu->arch.arch_capabilities & ARCH_CAP_FBSDP_NO) && 416 (vcpu->arch.arch_capabilities & ARCH_CAP_SBDR_SSDP_NO))) 417 vmx->disable_fb_clear = false; 418 } 419 420 static const struct kernel_param_ops vmentry_l1d_flush_ops = { 421 .set = vmentry_l1d_flush_set, 422 .get = vmentry_l1d_flush_get, 423 }; 424 module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, 0644); 425 426 static u32 vmx_segment_access_rights(struct kvm_segment *var); 427 428 void vmx_vmexit(void); 429 430 #define vmx_insn_failed(fmt...) \ 431 do { \ 432 WARN_ONCE(1, fmt); \ 433 pr_warn_ratelimited(fmt); \ 434 } while (0) 435 436 asmlinkage void vmread_error(unsigned long field, bool fault) 437 { 438 if (fault) 439 kvm_spurious_fault(); 440 else 441 vmx_insn_failed("kvm: vmread failed: field=%lx\n", field); 442 } 443 444 noinline void vmwrite_error(unsigned long field, unsigned long value) 445 { 446 vmx_insn_failed("kvm: vmwrite failed: field=%lx val=%lx err=%d\n", 447 field, value, vmcs_read32(VM_INSTRUCTION_ERROR)); 448 } 449 450 noinline void vmclear_error(struct vmcs *vmcs, u64 phys_addr) 451 { 452 vmx_insn_failed("kvm: vmclear failed: %p/%llx\n", vmcs, phys_addr); 453 } 454 455 noinline void vmptrld_error(struct vmcs *vmcs, u64 phys_addr) 456 { 457 vmx_insn_failed("kvm: vmptrld failed: %p/%llx\n", vmcs, phys_addr); 458 } 459 460 noinline void invvpid_error(unsigned long ext, u16 vpid, gva_t gva) 461 { 462 vmx_insn_failed("kvm: invvpid failed: ext=0x%lx vpid=%u gva=0x%lx\n", 463 ext, vpid, gva); 464 } 465 466 noinline void invept_error(unsigned long ext, u64 eptp, gpa_t gpa) 467 { 468 vmx_insn_failed("kvm: invept failed: ext=0x%lx eptp=%llx gpa=0x%llx\n", 469 ext, eptp, gpa); 470 } 471 472 static DEFINE_PER_CPU(struct vmcs *, vmxarea); 473 DEFINE_PER_CPU(struct vmcs *, current_vmcs); 474 /* 475 * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed 476 * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it. 477 */ 478 static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu); 479 480 static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS); 481 static DEFINE_SPINLOCK(vmx_vpid_lock); 482 483 struct vmcs_config vmcs_config; 484 struct vmx_capability vmx_capability; 485 486 #define VMX_SEGMENT_FIELD(seg) \ 487 [VCPU_SREG_##seg] = { \ 488 .selector = GUEST_##seg##_SELECTOR, \ 489 .base = GUEST_##seg##_BASE, \ 490 .limit = GUEST_##seg##_LIMIT, \ 491 .ar_bytes = GUEST_##seg##_AR_BYTES, \ 492 } 493 494 static const struct kvm_vmx_segment_field { 495 unsigned selector; 496 unsigned base; 497 unsigned limit; 498 unsigned ar_bytes; 499 } kvm_vmx_segment_fields[] = { 500 VMX_SEGMENT_FIELD(CS), 501 VMX_SEGMENT_FIELD(DS), 502 VMX_SEGMENT_FIELD(ES), 503 VMX_SEGMENT_FIELD(FS), 504 VMX_SEGMENT_FIELD(GS), 505 VMX_SEGMENT_FIELD(SS), 506 VMX_SEGMENT_FIELD(TR), 507 VMX_SEGMENT_FIELD(LDTR), 508 }; 509 510 static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx) 511 { 512 vmx->segment_cache.bitmask = 0; 513 } 514 515 static unsigned long host_idt_base; 516 517 #if IS_ENABLED(CONFIG_HYPERV) 518 static bool __read_mostly enlightened_vmcs = true; 519 module_param(enlightened_vmcs, bool, 0444); 520 521 static int hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu) 522 { 523 struct hv_enlightened_vmcs *evmcs; 524 struct hv_partition_assist_pg **p_hv_pa_pg = 525 &to_kvm_hv(vcpu->kvm)->hv_pa_pg; 526 /* 527 * Synthetic VM-Exit is not enabled in current code and so All 528 * evmcs in singe VM shares same assist page. 529 */ 530 if (!*p_hv_pa_pg) 531 *p_hv_pa_pg = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT); 532 533 if (!*p_hv_pa_pg) 534 return -ENOMEM; 535 536 evmcs = (struct hv_enlightened_vmcs *)to_vmx(vcpu)->loaded_vmcs->vmcs; 537 538 evmcs->partition_assist_page = 539 __pa(*p_hv_pa_pg); 540 evmcs->hv_vm_id = (unsigned long)vcpu->kvm; 541 evmcs->hv_enlightenments_control.nested_flush_hypercall = 1; 542 543 return 0; 544 } 545 546 #endif /* IS_ENABLED(CONFIG_HYPERV) */ 547 548 /* 549 * Comment's format: document - errata name - stepping - processor name. 550 * Refer from 551 * https://www.virtualbox.org/svn/vbox/trunk/src/VBox/VMM/VMMR0/HMR0.cpp 552 */ 553 static u32 vmx_preemption_cpu_tfms[] = { 554 /* 323344.pdf - BA86 - D0 - Xeon 7500 Series */ 555 0x000206E6, 556 /* 323056.pdf - AAX65 - C2 - Xeon L3406 */ 557 /* 322814.pdf - AAT59 - C2 - i7-600, i5-500, i5-400 and i3-300 Mobile */ 558 /* 322911.pdf - AAU65 - C2 - i5-600, i3-500 Desktop and Pentium G6950 */ 559 0x00020652, 560 /* 322911.pdf - AAU65 - K0 - i5-600, i3-500 Desktop and Pentium G6950 */ 561 0x00020655, 562 /* 322373.pdf - AAO95 - B1 - Xeon 3400 Series */ 563 /* 322166.pdf - AAN92 - B1 - i7-800 and i5-700 Desktop */ 564 /* 565 * 320767.pdf - AAP86 - B1 - 566 * i7-900 Mobile Extreme, i7-800 and i7-700 Mobile 567 */ 568 0x000106E5, 569 /* 321333.pdf - AAM126 - C0 - Xeon 3500 */ 570 0x000106A0, 571 /* 321333.pdf - AAM126 - C1 - Xeon 3500 */ 572 0x000106A1, 573 /* 320836.pdf - AAJ124 - C0 - i7-900 Desktop Extreme and i7-900 Desktop */ 574 0x000106A4, 575 /* 321333.pdf - AAM126 - D0 - Xeon 3500 */ 576 /* 321324.pdf - AAK139 - D0 - Xeon 5500 */ 577 /* 320836.pdf - AAJ124 - D0 - i7-900 Extreme and i7-900 Desktop */ 578 0x000106A5, 579 /* Xeon E3-1220 V2 */ 580 0x000306A8, 581 }; 582 583 static inline bool cpu_has_broken_vmx_preemption_timer(void) 584 { 585 u32 eax = cpuid_eax(0x00000001), i; 586 587 /* Clear the reserved bits */ 588 eax &= ~(0x3U << 14 | 0xfU << 28); 589 for (i = 0; i < ARRAY_SIZE(vmx_preemption_cpu_tfms); i++) 590 if (eax == vmx_preemption_cpu_tfms[i]) 591 return true; 592 593 return false; 594 } 595 596 static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu) 597 { 598 return flexpriority_enabled && lapic_in_kernel(vcpu); 599 } 600 601 static int possible_passthrough_msr_slot(u32 msr) 602 { 603 u32 i; 604 605 for (i = 0; i < ARRAY_SIZE(vmx_possible_passthrough_msrs); i++) 606 if (vmx_possible_passthrough_msrs[i] == msr) 607 return i; 608 609 return -ENOENT; 610 } 611 612 static bool is_valid_passthrough_msr(u32 msr) 613 { 614 bool r; 615 616 switch (msr) { 617 case 0x800 ... 0x8ff: 618 /* x2APIC MSRs. These are handled in vmx_update_msr_bitmap_x2apic() */ 619 return true; 620 case MSR_IA32_RTIT_STATUS: 621 case MSR_IA32_RTIT_OUTPUT_BASE: 622 case MSR_IA32_RTIT_OUTPUT_MASK: 623 case MSR_IA32_RTIT_CR3_MATCH: 624 case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: 625 /* PT MSRs. These are handled in pt_update_intercept_for_msr() */ 626 case MSR_LBR_SELECT: 627 case MSR_LBR_TOS: 628 case MSR_LBR_INFO_0 ... MSR_LBR_INFO_0 + 31: 629 case MSR_LBR_NHM_FROM ... MSR_LBR_NHM_FROM + 31: 630 case MSR_LBR_NHM_TO ... MSR_LBR_NHM_TO + 31: 631 case MSR_LBR_CORE_FROM ... MSR_LBR_CORE_FROM + 8: 632 case MSR_LBR_CORE_TO ... MSR_LBR_CORE_TO + 8: 633 /* LBR MSRs. These are handled in vmx_update_intercept_for_lbr_msrs() */ 634 return true; 635 } 636 637 r = possible_passthrough_msr_slot(msr) != -ENOENT; 638 639 WARN(!r, "Invalid MSR %x, please adapt vmx_possible_passthrough_msrs[]", msr); 640 641 return r; 642 } 643 644 struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr) 645 { 646 int i; 647 648 i = kvm_find_user_return_msr(msr); 649 if (i >= 0) 650 return &vmx->guest_uret_msrs[i]; 651 return NULL; 652 } 653 654 static int vmx_set_guest_uret_msr(struct vcpu_vmx *vmx, 655 struct vmx_uret_msr *msr, u64 data) 656 { 657 unsigned int slot = msr - vmx->guest_uret_msrs; 658 int ret = 0; 659 660 if (msr->load_into_hardware) { 661 preempt_disable(); 662 ret = kvm_set_user_return_msr(slot, data, msr->mask); 663 preempt_enable(); 664 } 665 if (!ret) 666 msr->data = data; 667 return ret; 668 } 669 670 #ifdef CONFIG_KEXEC_CORE 671 static void crash_vmclear_local_loaded_vmcss(void) 672 { 673 int cpu = raw_smp_processor_id(); 674 struct loaded_vmcs *v; 675 676 list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu), 677 loaded_vmcss_on_cpu_link) 678 vmcs_clear(v->vmcs); 679 } 680 #endif /* CONFIG_KEXEC_CORE */ 681 682 static void __loaded_vmcs_clear(void *arg) 683 { 684 struct loaded_vmcs *loaded_vmcs = arg; 685 int cpu = raw_smp_processor_id(); 686 687 if (loaded_vmcs->cpu != cpu) 688 return; /* vcpu migration can race with cpu offline */ 689 if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs) 690 per_cpu(current_vmcs, cpu) = NULL; 691 692 vmcs_clear(loaded_vmcs->vmcs); 693 if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched) 694 vmcs_clear(loaded_vmcs->shadow_vmcs); 695 696 list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link); 697 698 /* 699 * Ensure all writes to loaded_vmcs, including deleting it from its 700 * current percpu list, complete before setting loaded_vmcs->cpu to 701 * -1, otherwise a different cpu can see loaded_vmcs->cpu == -1 first 702 * and add loaded_vmcs to its percpu list before it's deleted from this 703 * cpu's list. Pairs with the smp_rmb() in vmx_vcpu_load_vmcs(). 704 */ 705 smp_wmb(); 706 707 loaded_vmcs->cpu = -1; 708 loaded_vmcs->launched = 0; 709 } 710 711 void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs) 712 { 713 int cpu = loaded_vmcs->cpu; 714 715 if (cpu != -1) 716 smp_call_function_single(cpu, 717 __loaded_vmcs_clear, loaded_vmcs, 1); 718 } 719 720 static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg, 721 unsigned field) 722 { 723 bool ret; 724 u32 mask = 1 << (seg * SEG_FIELD_NR + field); 725 726 if (!kvm_register_is_available(&vmx->vcpu, VCPU_EXREG_SEGMENTS)) { 727 kvm_register_mark_available(&vmx->vcpu, VCPU_EXREG_SEGMENTS); 728 vmx->segment_cache.bitmask = 0; 729 } 730 ret = vmx->segment_cache.bitmask & mask; 731 vmx->segment_cache.bitmask |= mask; 732 return ret; 733 } 734 735 static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg) 736 { 737 u16 *p = &vmx->segment_cache.seg[seg].selector; 738 739 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL)) 740 *p = vmcs_read16(kvm_vmx_segment_fields[seg].selector); 741 return *p; 742 } 743 744 static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg) 745 { 746 ulong *p = &vmx->segment_cache.seg[seg].base; 747 748 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE)) 749 *p = vmcs_readl(kvm_vmx_segment_fields[seg].base); 750 return *p; 751 } 752 753 static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg) 754 { 755 u32 *p = &vmx->segment_cache.seg[seg].limit; 756 757 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT)) 758 *p = vmcs_read32(kvm_vmx_segment_fields[seg].limit); 759 return *p; 760 } 761 762 static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg) 763 { 764 u32 *p = &vmx->segment_cache.seg[seg].ar; 765 766 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR)) 767 *p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes); 768 return *p; 769 } 770 771 void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu) 772 { 773 u32 eb; 774 775 eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) | 776 (1u << DB_VECTOR) | (1u << AC_VECTOR); 777 /* 778 * Guest access to VMware backdoor ports could legitimately 779 * trigger #GP because of TSS I/O permission bitmap. 780 * We intercept those #GP and allow access to them anyway 781 * as VMware does. 782 */ 783 if (enable_vmware_backdoor) 784 eb |= (1u << GP_VECTOR); 785 if ((vcpu->guest_debug & 786 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) == 787 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) 788 eb |= 1u << BP_VECTOR; 789 if (to_vmx(vcpu)->rmode.vm86_active) 790 eb = ~0; 791 if (!vmx_need_pf_intercept(vcpu)) 792 eb &= ~(1u << PF_VECTOR); 793 794 /* When we are running a nested L2 guest and L1 specified for it a 795 * certain exception bitmap, we must trap the same exceptions and pass 796 * them to L1. When running L2, we will only handle the exceptions 797 * specified above if L1 did not want them. 798 */ 799 if (is_guest_mode(vcpu)) 800 eb |= get_vmcs12(vcpu)->exception_bitmap; 801 else { 802 int mask = 0, match = 0; 803 804 if (enable_ept && (eb & (1u << PF_VECTOR))) { 805 /* 806 * If EPT is enabled, #PF is currently only intercepted 807 * if MAXPHYADDR is smaller on the guest than on the 808 * host. In that case we only care about present, 809 * non-reserved faults. For vmcs02, however, PFEC_MASK 810 * and PFEC_MATCH are set in prepare_vmcs02_rare. 811 */ 812 mask = PFERR_PRESENT_MASK | PFERR_RSVD_MASK; 813 match = PFERR_PRESENT_MASK; 814 } 815 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, mask); 816 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, match); 817 } 818 819 /* 820 * Disabling xfd interception indicates that dynamic xfeatures 821 * might be used in the guest. Always trap #NM in this case 822 * to save guest xfd_err timely. 823 */ 824 if (vcpu->arch.xfd_no_write_intercept) 825 eb |= (1u << NM_VECTOR); 826 827 vmcs_write32(EXCEPTION_BITMAP, eb); 828 } 829 830 /* 831 * Check if MSR is intercepted for currently loaded MSR bitmap. 832 */ 833 static bool msr_write_intercepted(struct vcpu_vmx *vmx, u32 msr) 834 { 835 if (!(exec_controls_get(vmx) & CPU_BASED_USE_MSR_BITMAPS)) 836 return true; 837 838 return vmx_test_msr_bitmap_write(vmx->loaded_vmcs->msr_bitmap, 839 MSR_IA32_SPEC_CTRL); 840 } 841 842 unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx) 843 { 844 unsigned int flags = 0; 845 846 if (vmx->loaded_vmcs->launched) 847 flags |= VMX_RUN_VMRESUME; 848 849 /* 850 * If writes to the SPEC_CTRL MSR aren't intercepted, the guest is free 851 * to change it directly without causing a vmexit. In that case read 852 * it after vmexit and store it in vmx->spec_ctrl. 853 */ 854 if (unlikely(!msr_write_intercepted(vmx, MSR_IA32_SPEC_CTRL))) 855 flags |= VMX_RUN_SAVE_SPEC_CTRL; 856 857 return flags; 858 } 859 860 static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx, 861 unsigned long entry, unsigned long exit) 862 { 863 vm_entry_controls_clearbit(vmx, entry); 864 vm_exit_controls_clearbit(vmx, exit); 865 } 866 867 int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr) 868 { 869 unsigned int i; 870 871 for (i = 0; i < m->nr; ++i) { 872 if (m->val[i].index == msr) 873 return i; 874 } 875 return -ENOENT; 876 } 877 878 static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) 879 { 880 int i; 881 struct msr_autoload *m = &vmx->msr_autoload; 882 883 switch (msr) { 884 case MSR_EFER: 885 if (cpu_has_load_ia32_efer()) { 886 clear_atomic_switch_msr_special(vmx, 887 VM_ENTRY_LOAD_IA32_EFER, 888 VM_EXIT_LOAD_IA32_EFER); 889 return; 890 } 891 break; 892 case MSR_CORE_PERF_GLOBAL_CTRL: 893 if (cpu_has_load_perf_global_ctrl()) { 894 clear_atomic_switch_msr_special(vmx, 895 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, 896 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL); 897 return; 898 } 899 break; 900 } 901 i = vmx_find_loadstore_msr_slot(&m->guest, msr); 902 if (i < 0) 903 goto skip_guest; 904 --m->guest.nr; 905 m->guest.val[i] = m->guest.val[m->guest.nr]; 906 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr); 907 908 skip_guest: 909 i = vmx_find_loadstore_msr_slot(&m->host, msr); 910 if (i < 0) 911 return; 912 913 --m->host.nr; 914 m->host.val[i] = m->host.val[m->host.nr]; 915 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr); 916 } 917 918 static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx, 919 unsigned long entry, unsigned long exit, 920 unsigned long guest_val_vmcs, unsigned long host_val_vmcs, 921 u64 guest_val, u64 host_val) 922 { 923 vmcs_write64(guest_val_vmcs, guest_val); 924 if (host_val_vmcs != HOST_IA32_EFER) 925 vmcs_write64(host_val_vmcs, host_val); 926 vm_entry_controls_setbit(vmx, entry); 927 vm_exit_controls_setbit(vmx, exit); 928 } 929 930 static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, 931 u64 guest_val, u64 host_val, bool entry_only) 932 { 933 int i, j = 0; 934 struct msr_autoload *m = &vmx->msr_autoload; 935 936 switch (msr) { 937 case MSR_EFER: 938 if (cpu_has_load_ia32_efer()) { 939 add_atomic_switch_msr_special(vmx, 940 VM_ENTRY_LOAD_IA32_EFER, 941 VM_EXIT_LOAD_IA32_EFER, 942 GUEST_IA32_EFER, 943 HOST_IA32_EFER, 944 guest_val, host_val); 945 return; 946 } 947 break; 948 case MSR_CORE_PERF_GLOBAL_CTRL: 949 if (cpu_has_load_perf_global_ctrl()) { 950 add_atomic_switch_msr_special(vmx, 951 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, 952 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL, 953 GUEST_IA32_PERF_GLOBAL_CTRL, 954 HOST_IA32_PERF_GLOBAL_CTRL, 955 guest_val, host_val); 956 return; 957 } 958 break; 959 case MSR_IA32_PEBS_ENABLE: 960 /* PEBS needs a quiescent period after being disabled (to write 961 * a record). Disabling PEBS through VMX MSR swapping doesn't 962 * provide that period, so a CPU could write host's record into 963 * guest's memory. 964 */ 965 wrmsrl(MSR_IA32_PEBS_ENABLE, 0); 966 } 967 968 i = vmx_find_loadstore_msr_slot(&m->guest, msr); 969 if (!entry_only) 970 j = vmx_find_loadstore_msr_slot(&m->host, msr); 971 972 if ((i < 0 && m->guest.nr == MAX_NR_LOADSTORE_MSRS) || 973 (j < 0 && m->host.nr == MAX_NR_LOADSTORE_MSRS)) { 974 printk_once(KERN_WARNING "Not enough msr switch entries. " 975 "Can't add msr %x\n", msr); 976 return; 977 } 978 if (i < 0) { 979 i = m->guest.nr++; 980 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr); 981 } 982 m->guest.val[i].index = msr; 983 m->guest.val[i].value = guest_val; 984 985 if (entry_only) 986 return; 987 988 if (j < 0) { 989 j = m->host.nr++; 990 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr); 991 } 992 m->host.val[j].index = msr; 993 m->host.val[j].value = host_val; 994 } 995 996 static bool update_transition_efer(struct vcpu_vmx *vmx) 997 { 998 u64 guest_efer = vmx->vcpu.arch.efer; 999 u64 ignore_bits = 0; 1000 int i; 1001 1002 /* Shadow paging assumes NX to be available. */ 1003 if (!enable_ept) 1004 guest_efer |= EFER_NX; 1005 1006 /* 1007 * LMA and LME handled by hardware; SCE meaningless outside long mode. 1008 */ 1009 ignore_bits |= EFER_SCE; 1010 #ifdef CONFIG_X86_64 1011 ignore_bits |= EFER_LMA | EFER_LME; 1012 /* SCE is meaningful only in long mode on Intel */ 1013 if (guest_efer & EFER_LMA) 1014 ignore_bits &= ~(u64)EFER_SCE; 1015 #endif 1016 1017 /* 1018 * On EPT, we can't emulate NX, so we must switch EFER atomically. 1019 * On CPUs that support "load IA32_EFER", always switch EFER 1020 * atomically, since it's faster than switching it manually. 1021 */ 1022 if (cpu_has_load_ia32_efer() || 1023 (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) { 1024 if (!(guest_efer & EFER_LMA)) 1025 guest_efer &= ~EFER_LME; 1026 if (guest_efer != host_efer) 1027 add_atomic_switch_msr(vmx, MSR_EFER, 1028 guest_efer, host_efer, false); 1029 else 1030 clear_atomic_switch_msr(vmx, MSR_EFER); 1031 return false; 1032 } 1033 1034 i = kvm_find_user_return_msr(MSR_EFER); 1035 if (i < 0) 1036 return false; 1037 1038 clear_atomic_switch_msr(vmx, MSR_EFER); 1039 1040 guest_efer &= ~ignore_bits; 1041 guest_efer |= host_efer & ignore_bits; 1042 1043 vmx->guest_uret_msrs[i].data = guest_efer; 1044 vmx->guest_uret_msrs[i].mask = ~ignore_bits; 1045 1046 return true; 1047 } 1048 1049 #ifdef CONFIG_X86_32 1050 /* 1051 * On 32-bit kernels, VM exits still load the FS and GS bases from the 1052 * VMCS rather than the segment table. KVM uses this helper to figure 1053 * out the current bases to poke them into the VMCS before entry. 1054 */ 1055 static unsigned long segment_base(u16 selector) 1056 { 1057 struct desc_struct *table; 1058 unsigned long v; 1059 1060 if (!(selector & ~SEGMENT_RPL_MASK)) 1061 return 0; 1062 1063 table = get_current_gdt_ro(); 1064 1065 if ((selector & SEGMENT_TI_MASK) == SEGMENT_LDT) { 1066 u16 ldt_selector = kvm_read_ldt(); 1067 1068 if (!(ldt_selector & ~SEGMENT_RPL_MASK)) 1069 return 0; 1070 1071 table = (struct desc_struct *)segment_base(ldt_selector); 1072 } 1073 v = get_desc_base(&table[selector >> 3]); 1074 return v; 1075 } 1076 #endif 1077 1078 static inline bool pt_can_write_msr(struct vcpu_vmx *vmx) 1079 { 1080 return vmx_pt_mode_is_host_guest() && 1081 !(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN); 1082 } 1083 1084 static inline bool pt_output_base_valid(struct kvm_vcpu *vcpu, u64 base) 1085 { 1086 /* The base must be 128-byte aligned and a legal physical address. */ 1087 return kvm_vcpu_is_legal_aligned_gpa(vcpu, base, 128); 1088 } 1089 1090 static inline void pt_load_msr(struct pt_ctx *ctx, u32 addr_range) 1091 { 1092 u32 i; 1093 1094 wrmsrl(MSR_IA32_RTIT_STATUS, ctx->status); 1095 wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base); 1096 wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask); 1097 wrmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match); 1098 for (i = 0; i < addr_range; i++) { 1099 wrmsrl(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]); 1100 wrmsrl(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]); 1101 } 1102 } 1103 1104 static inline void pt_save_msr(struct pt_ctx *ctx, u32 addr_range) 1105 { 1106 u32 i; 1107 1108 rdmsrl(MSR_IA32_RTIT_STATUS, ctx->status); 1109 rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base); 1110 rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask); 1111 rdmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match); 1112 for (i = 0; i < addr_range; i++) { 1113 rdmsrl(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]); 1114 rdmsrl(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]); 1115 } 1116 } 1117 1118 static void pt_guest_enter(struct vcpu_vmx *vmx) 1119 { 1120 if (vmx_pt_mode_is_system()) 1121 return; 1122 1123 /* 1124 * GUEST_IA32_RTIT_CTL is already set in the VMCS. 1125 * Save host state before VM entry. 1126 */ 1127 rdmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl); 1128 if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) { 1129 wrmsrl(MSR_IA32_RTIT_CTL, 0); 1130 pt_save_msr(&vmx->pt_desc.host, vmx->pt_desc.num_address_ranges); 1131 pt_load_msr(&vmx->pt_desc.guest, vmx->pt_desc.num_address_ranges); 1132 } 1133 } 1134 1135 static void pt_guest_exit(struct vcpu_vmx *vmx) 1136 { 1137 if (vmx_pt_mode_is_system()) 1138 return; 1139 1140 if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) { 1141 pt_save_msr(&vmx->pt_desc.guest, vmx->pt_desc.num_address_ranges); 1142 pt_load_msr(&vmx->pt_desc.host, vmx->pt_desc.num_address_ranges); 1143 } 1144 1145 /* 1146 * KVM requires VM_EXIT_CLEAR_IA32_RTIT_CTL to expose PT to the guest, 1147 * i.e. RTIT_CTL is always cleared on VM-Exit. Restore it if necessary. 1148 */ 1149 if (vmx->pt_desc.host.ctl) 1150 wrmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl); 1151 } 1152 1153 void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel, 1154 unsigned long fs_base, unsigned long gs_base) 1155 { 1156 if (unlikely(fs_sel != host->fs_sel)) { 1157 if (!(fs_sel & 7)) 1158 vmcs_write16(HOST_FS_SELECTOR, fs_sel); 1159 else 1160 vmcs_write16(HOST_FS_SELECTOR, 0); 1161 host->fs_sel = fs_sel; 1162 } 1163 if (unlikely(gs_sel != host->gs_sel)) { 1164 if (!(gs_sel & 7)) 1165 vmcs_write16(HOST_GS_SELECTOR, gs_sel); 1166 else 1167 vmcs_write16(HOST_GS_SELECTOR, 0); 1168 host->gs_sel = gs_sel; 1169 } 1170 if (unlikely(fs_base != host->fs_base)) { 1171 vmcs_writel(HOST_FS_BASE, fs_base); 1172 host->fs_base = fs_base; 1173 } 1174 if (unlikely(gs_base != host->gs_base)) { 1175 vmcs_writel(HOST_GS_BASE, gs_base); 1176 host->gs_base = gs_base; 1177 } 1178 } 1179 1180 void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) 1181 { 1182 struct vcpu_vmx *vmx = to_vmx(vcpu); 1183 struct vmcs_host_state *host_state; 1184 #ifdef CONFIG_X86_64 1185 int cpu = raw_smp_processor_id(); 1186 #endif 1187 unsigned long fs_base, gs_base; 1188 u16 fs_sel, gs_sel; 1189 int i; 1190 1191 vmx->req_immediate_exit = false; 1192 1193 /* 1194 * Note that guest MSRs to be saved/restored can also be changed 1195 * when guest state is loaded. This happens when guest transitions 1196 * to/from long-mode by setting MSR_EFER.LMA. 1197 */ 1198 if (!vmx->guest_uret_msrs_loaded) { 1199 vmx->guest_uret_msrs_loaded = true; 1200 for (i = 0; i < kvm_nr_uret_msrs; ++i) { 1201 if (!vmx->guest_uret_msrs[i].load_into_hardware) 1202 continue; 1203 1204 kvm_set_user_return_msr(i, 1205 vmx->guest_uret_msrs[i].data, 1206 vmx->guest_uret_msrs[i].mask); 1207 } 1208 } 1209 1210 if (vmx->nested.need_vmcs12_to_shadow_sync) 1211 nested_sync_vmcs12_to_shadow(vcpu); 1212 1213 if (vmx->guest_state_loaded) 1214 return; 1215 1216 host_state = &vmx->loaded_vmcs->host_state; 1217 1218 /* 1219 * Set host fs and gs selectors. Unfortunately, 22.2.3 does not 1220 * allow segment selectors with cpl > 0 or ti == 1. 1221 */ 1222 host_state->ldt_sel = kvm_read_ldt(); 1223 1224 #ifdef CONFIG_X86_64 1225 savesegment(ds, host_state->ds_sel); 1226 savesegment(es, host_state->es_sel); 1227 1228 gs_base = cpu_kernelmode_gs_base(cpu); 1229 if (likely(is_64bit_mm(current->mm))) { 1230 current_save_fsgs(); 1231 fs_sel = current->thread.fsindex; 1232 gs_sel = current->thread.gsindex; 1233 fs_base = current->thread.fsbase; 1234 vmx->msr_host_kernel_gs_base = current->thread.gsbase; 1235 } else { 1236 savesegment(fs, fs_sel); 1237 savesegment(gs, gs_sel); 1238 fs_base = read_msr(MSR_FS_BASE); 1239 vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE); 1240 } 1241 1242 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); 1243 #else 1244 savesegment(fs, fs_sel); 1245 savesegment(gs, gs_sel); 1246 fs_base = segment_base(fs_sel); 1247 gs_base = segment_base(gs_sel); 1248 #endif 1249 1250 vmx_set_host_fs_gs(host_state, fs_sel, gs_sel, fs_base, gs_base); 1251 vmx->guest_state_loaded = true; 1252 } 1253 1254 static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx) 1255 { 1256 struct vmcs_host_state *host_state; 1257 1258 if (!vmx->guest_state_loaded) 1259 return; 1260 1261 host_state = &vmx->loaded_vmcs->host_state; 1262 1263 ++vmx->vcpu.stat.host_state_reload; 1264 1265 #ifdef CONFIG_X86_64 1266 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); 1267 #endif 1268 if (host_state->ldt_sel || (host_state->gs_sel & 7)) { 1269 kvm_load_ldt(host_state->ldt_sel); 1270 #ifdef CONFIG_X86_64 1271 load_gs_index(host_state->gs_sel); 1272 #else 1273 loadsegment(gs, host_state->gs_sel); 1274 #endif 1275 } 1276 if (host_state->fs_sel & 7) 1277 loadsegment(fs, host_state->fs_sel); 1278 #ifdef CONFIG_X86_64 1279 if (unlikely(host_state->ds_sel | host_state->es_sel)) { 1280 loadsegment(ds, host_state->ds_sel); 1281 loadsegment(es, host_state->es_sel); 1282 } 1283 #endif 1284 invalidate_tss_limit(); 1285 #ifdef CONFIG_X86_64 1286 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); 1287 #endif 1288 load_fixmap_gdt(raw_smp_processor_id()); 1289 vmx->guest_state_loaded = false; 1290 vmx->guest_uret_msrs_loaded = false; 1291 } 1292 1293 #ifdef CONFIG_X86_64 1294 static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx) 1295 { 1296 preempt_disable(); 1297 if (vmx->guest_state_loaded) 1298 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); 1299 preempt_enable(); 1300 return vmx->msr_guest_kernel_gs_base; 1301 } 1302 1303 static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data) 1304 { 1305 preempt_disable(); 1306 if (vmx->guest_state_loaded) 1307 wrmsrl(MSR_KERNEL_GS_BASE, data); 1308 preempt_enable(); 1309 vmx->msr_guest_kernel_gs_base = data; 1310 } 1311 #endif 1312 1313 void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu, 1314 struct loaded_vmcs *buddy) 1315 { 1316 struct vcpu_vmx *vmx = to_vmx(vcpu); 1317 bool already_loaded = vmx->loaded_vmcs->cpu == cpu; 1318 struct vmcs *prev; 1319 1320 if (!already_loaded) { 1321 loaded_vmcs_clear(vmx->loaded_vmcs); 1322 local_irq_disable(); 1323 1324 /* 1325 * Ensure loaded_vmcs->cpu is read before adding loaded_vmcs to 1326 * this cpu's percpu list, otherwise it may not yet be deleted 1327 * from its previous cpu's percpu list. Pairs with the 1328 * smb_wmb() in __loaded_vmcs_clear(). 1329 */ 1330 smp_rmb(); 1331 1332 list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link, 1333 &per_cpu(loaded_vmcss_on_cpu, cpu)); 1334 local_irq_enable(); 1335 } 1336 1337 prev = per_cpu(current_vmcs, cpu); 1338 if (prev != vmx->loaded_vmcs->vmcs) { 1339 per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs; 1340 vmcs_load(vmx->loaded_vmcs->vmcs); 1341 1342 /* 1343 * No indirect branch prediction barrier needed when switching 1344 * the active VMCS within a guest, e.g. on nested VM-Enter. 1345 * The L1 VMM can protect itself with retpolines, IBPB or IBRS. 1346 */ 1347 if (!buddy || WARN_ON_ONCE(buddy->vmcs != prev)) 1348 indirect_branch_prediction_barrier(); 1349 } 1350 1351 if (!already_loaded) { 1352 void *gdt = get_current_gdt_ro(); 1353 1354 /* 1355 * Flush all EPTP/VPID contexts, the new pCPU may have stale 1356 * TLB entries from its previous association with the vCPU. 1357 */ 1358 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 1359 1360 /* 1361 * Linux uses per-cpu TSS and GDT, so set these when switching 1362 * processors. See 22.2.4. 1363 */ 1364 vmcs_writel(HOST_TR_BASE, 1365 (unsigned long)&get_cpu_entry_area(cpu)->tss.x86_tss); 1366 vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt); /* 22.2.4 */ 1367 1368 if (IS_ENABLED(CONFIG_IA32_EMULATION) || IS_ENABLED(CONFIG_X86_32)) { 1369 /* 22.2.3 */ 1370 vmcs_writel(HOST_IA32_SYSENTER_ESP, 1371 (unsigned long)(cpu_entry_stack(cpu) + 1)); 1372 } 1373 1374 vmx->loaded_vmcs->cpu = cpu; 1375 } 1376 } 1377 1378 /* 1379 * Switches to specified vcpu, until a matching vcpu_put(), but assumes 1380 * vcpu mutex is already taken. 1381 */ 1382 static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 1383 { 1384 struct vcpu_vmx *vmx = to_vmx(vcpu); 1385 1386 vmx_vcpu_load_vmcs(vcpu, cpu, NULL); 1387 1388 vmx_vcpu_pi_load(vcpu, cpu); 1389 1390 vmx->host_debugctlmsr = get_debugctlmsr(); 1391 } 1392 1393 static void vmx_vcpu_put(struct kvm_vcpu *vcpu) 1394 { 1395 vmx_vcpu_pi_put(vcpu); 1396 1397 vmx_prepare_switch_to_host(to_vmx(vcpu)); 1398 } 1399 1400 bool vmx_emulation_required(struct kvm_vcpu *vcpu) 1401 { 1402 return emulate_invalid_guest_state && !vmx_guest_state_valid(vcpu); 1403 } 1404 1405 unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) 1406 { 1407 struct vcpu_vmx *vmx = to_vmx(vcpu); 1408 unsigned long rflags, save_rflags; 1409 1410 if (!kvm_register_is_available(vcpu, VCPU_EXREG_RFLAGS)) { 1411 kvm_register_mark_available(vcpu, VCPU_EXREG_RFLAGS); 1412 rflags = vmcs_readl(GUEST_RFLAGS); 1413 if (vmx->rmode.vm86_active) { 1414 rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS; 1415 save_rflags = vmx->rmode.save_rflags; 1416 rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; 1417 } 1418 vmx->rflags = rflags; 1419 } 1420 return vmx->rflags; 1421 } 1422 1423 void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) 1424 { 1425 struct vcpu_vmx *vmx = to_vmx(vcpu); 1426 unsigned long old_rflags; 1427 1428 if (is_unrestricted_guest(vcpu)) { 1429 kvm_register_mark_available(vcpu, VCPU_EXREG_RFLAGS); 1430 vmx->rflags = rflags; 1431 vmcs_writel(GUEST_RFLAGS, rflags); 1432 return; 1433 } 1434 1435 old_rflags = vmx_get_rflags(vcpu); 1436 vmx->rflags = rflags; 1437 if (vmx->rmode.vm86_active) { 1438 vmx->rmode.save_rflags = rflags; 1439 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; 1440 } 1441 vmcs_writel(GUEST_RFLAGS, rflags); 1442 1443 if ((old_rflags ^ vmx->rflags) & X86_EFLAGS_VM) 1444 vmx->emulation_required = vmx_emulation_required(vcpu); 1445 } 1446 1447 static bool vmx_get_if_flag(struct kvm_vcpu *vcpu) 1448 { 1449 return vmx_get_rflags(vcpu) & X86_EFLAGS_IF; 1450 } 1451 1452 u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu) 1453 { 1454 u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); 1455 int ret = 0; 1456 1457 if (interruptibility & GUEST_INTR_STATE_STI) 1458 ret |= KVM_X86_SHADOW_INT_STI; 1459 if (interruptibility & GUEST_INTR_STATE_MOV_SS) 1460 ret |= KVM_X86_SHADOW_INT_MOV_SS; 1461 1462 return ret; 1463 } 1464 1465 void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) 1466 { 1467 u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); 1468 u32 interruptibility = interruptibility_old; 1469 1470 interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS); 1471 1472 if (mask & KVM_X86_SHADOW_INT_MOV_SS) 1473 interruptibility |= GUEST_INTR_STATE_MOV_SS; 1474 else if (mask & KVM_X86_SHADOW_INT_STI) 1475 interruptibility |= GUEST_INTR_STATE_STI; 1476 1477 if ((interruptibility != interruptibility_old)) 1478 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility); 1479 } 1480 1481 static int vmx_rtit_ctl_check(struct kvm_vcpu *vcpu, u64 data) 1482 { 1483 struct vcpu_vmx *vmx = to_vmx(vcpu); 1484 unsigned long value; 1485 1486 /* 1487 * Any MSR write that attempts to change bits marked reserved will 1488 * case a #GP fault. 1489 */ 1490 if (data & vmx->pt_desc.ctl_bitmask) 1491 return 1; 1492 1493 /* 1494 * Any attempt to modify IA32_RTIT_CTL while TraceEn is set will 1495 * result in a #GP unless the same write also clears TraceEn. 1496 */ 1497 if ((vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) && 1498 ((vmx->pt_desc.guest.ctl ^ data) & ~RTIT_CTL_TRACEEN)) 1499 return 1; 1500 1501 /* 1502 * WRMSR to IA32_RTIT_CTL that sets TraceEn but clears this bit 1503 * and FabricEn would cause #GP, if 1504 * CPUID.(EAX=14H, ECX=0):ECX.SNGLRGNOUT[bit 2] = 0 1505 */ 1506 if ((data & RTIT_CTL_TRACEEN) && !(data & RTIT_CTL_TOPA) && 1507 !(data & RTIT_CTL_FABRIC_EN) && 1508 !intel_pt_validate_cap(vmx->pt_desc.caps, 1509 PT_CAP_single_range_output)) 1510 return 1; 1511 1512 /* 1513 * MTCFreq, CycThresh and PSBFreq encodings check, any MSR write that 1514 * utilize encodings marked reserved will cause a #GP fault. 1515 */ 1516 value = intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc_periods); 1517 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc) && 1518 !test_bit((data & RTIT_CTL_MTC_RANGE) >> 1519 RTIT_CTL_MTC_RANGE_OFFSET, &value)) 1520 return 1; 1521 value = intel_pt_validate_cap(vmx->pt_desc.caps, 1522 PT_CAP_cycle_thresholds); 1523 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc) && 1524 !test_bit((data & RTIT_CTL_CYC_THRESH) >> 1525 RTIT_CTL_CYC_THRESH_OFFSET, &value)) 1526 return 1; 1527 value = intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_periods); 1528 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc) && 1529 !test_bit((data & RTIT_CTL_PSB_FREQ) >> 1530 RTIT_CTL_PSB_FREQ_OFFSET, &value)) 1531 return 1; 1532 1533 /* 1534 * If ADDRx_CFG is reserved or the encodings is >2 will 1535 * cause a #GP fault. 1536 */ 1537 value = (data & RTIT_CTL_ADDR0) >> RTIT_CTL_ADDR0_OFFSET; 1538 if ((value && (vmx->pt_desc.num_address_ranges < 1)) || (value > 2)) 1539 return 1; 1540 value = (data & RTIT_CTL_ADDR1) >> RTIT_CTL_ADDR1_OFFSET; 1541 if ((value && (vmx->pt_desc.num_address_ranges < 2)) || (value > 2)) 1542 return 1; 1543 value = (data & RTIT_CTL_ADDR2) >> RTIT_CTL_ADDR2_OFFSET; 1544 if ((value && (vmx->pt_desc.num_address_ranges < 3)) || (value > 2)) 1545 return 1; 1546 value = (data & RTIT_CTL_ADDR3) >> RTIT_CTL_ADDR3_OFFSET; 1547 if ((value && (vmx->pt_desc.num_address_ranges < 4)) || (value > 2)) 1548 return 1; 1549 1550 return 0; 1551 } 1552 1553 static bool vmx_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type, 1554 void *insn, int insn_len) 1555 { 1556 /* 1557 * Emulation of instructions in SGX enclaves is impossible as RIP does 1558 * not point at the failing instruction, and even if it did, the code 1559 * stream is inaccessible. Inject #UD instead of exiting to userspace 1560 * so that guest userspace can't DoS the guest simply by triggering 1561 * emulation (enclaves are CPL3 only). 1562 */ 1563 if (to_vmx(vcpu)->exit_reason.enclave_mode) { 1564 kvm_queue_exception(vcpu, UD_VECTOR); 1565 return false; 1566 } 1567 return true; 1568 } 1569 1570 static int skip_emulated_instruction(struct kvm_vcpu *vcpu) 1571 { 1572 union vmx_exit_reason exit_reason = to_vmx(vcpu)->exit_reason; 1573 unsigned long rip, orig_rip; 1574 u32 instr_len; 1575 1576 /* 1577 * Using VMCS.VM_EXIT_INSTRUCTION_LEN on EPT misconfig depends on 1578 * undefined behavior: Intel's SDM doesn't mandate the VMCS field be 1579 * set when EPT misconfig occurs. In practice, real hardware updates 1580 * VM_EXIT_INSTRUCTION_LEN on EPT misconfig, but other hypervisors 1581 * (namely Hyper-V) don't set it due to it being undefined behavior, 1582 * i.e. we end up advancing IP with some random value. 1583 */ 1584 if (!static_cpu_has(X86_FEATURE_HYPERVISOR) || 1585 exit_reason.basic != EXIT_REASON_EPT_MISCONFIG) { 1586 instr_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); 1587 1588 /* 1589 * Emulating an enclave's instructions isn't supported as KVM 1590 * cannot access the enclave's memory or its true RIP, e.g. the 1591 * vmcs.GUEST_RIP points at the exit point of the enclave, not 1592 * the RIP that actually triggered the VM-Exit. But, because 1593 * most instructions that cause VM-Exit will #UD in an enclave, 1594 * most instruction-based VM-Exits simply do not occur. 1595 * 1596 * There are a few exceptions, notably the debug instructions 1597 * INT1ICEBRK and INT3, as they are allowed in debug enclaves 1598 * and generate #DB/#BP as expected, which KVM might intercept. 1599 * But again, the CPU does the dirty work and saves an instr 1600 * length of zero so VMMs don't shoot themselves in the foot. 1601 * WARN if KVM tries to skip a non-zero length instruction on 1602 * a VM-Exit from an enclave. 1603 */ 1604 if (!instr_len) 1605 goto rip_updated; 1606 1607 WARN(exit_reason.enclave_mode, 1608 "KVM: skipping instruction after SGX enclave VM-Exit"); 1609 1610 orig_rip = kvm_rip_read(vcpu); 1611 rip = orig_rip + instr_len; 1612 #ifdef CONFIG_X86_64 1613 /* 1614 * We need to mask out the high 32 bits of RIP if not in 64-bit 1615 * mode, but just finding out that we are in 64-bit mode is 1616 * quite expensive. Only do it if there was a carry. 1617 */ 1618 if (unlikely(((rip ^ orig_rip) >> 31) == 3) && !is_64_bit_mode(vcpu)) 1619 rip = (u32)rip; 1620 #endif 1621 kvm_rip_write(vcpu, rip); 1622 } else { 1623 if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP)) 1624 return 0; 1625 } 1626 1627 rip_updated: 1628 /* skipping an emulated instruction also counts */ 1629 vmx_set_interrupt_shadow(vcpu, 0); 1630 1631 return 1; 1632 } 1633 1634 /* 1635 * Recognizes a pending MTF VM-exit and records the nested state for later 1636 * delivery. 1637 */ 1638 static void vmx_update_emulated_instruction(struct kvm_vcpu *vcpu) 1639 { 1640 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 1641 struct vcpu_vmx *vmx = to_vmx(vcpu); 1642 1643 if (!is_guest_mode(vcpu)) 1644 return; 1645 1646 /* 1647 * Per the SDM, MTF takes priority over debug-trap exceptions besides 1648 * T-bit traps. As instruction emulation is completed (i.e. at the 1649 * instruction boundary), any #DB exception pending delivery must be a 1650 * debug-trap. Record the pending MTF state to be delivered in 1651 * vmx_check_nested_events(). 1652 */ 1653 if (nested_cpu_has_mtf(vmcs12) && 1654 (!vcpu->arch.exception.pending || 1655 vcpu->arch.exception.nr == DB_VECTOR)) 1656 vmx->nested.mtf_pending = true; 1657 else 1658 vmx->nested.mtf_pending = false; 1659 } 1660 1661 static int vmx_skip_emulated_instruction(struct kvm_vcpu *vcpu) 1662 { 1663 vmx_update_emulated_instruction(vcpu); 1664 return skip_emulated_instruction(vcpu); 1665 } 1666 1667 static void vmx_clear_hlt(struct kvm_vcpu *vcpu) 1668 { 1669 /* 1670 * Ensure that we clear the HLT state in the VMCS. We don't need to 1671 * explicitly skip the instruction because if the HLT state is set, 1672 * then the instruction is already executing and RIP has already been 1673 * advanced. 1674 */ 1675 if (kvm_hlt_in_guest(vcpu->kvm) && 1676 vmcs_read32(GUEST_ACTIVITY_STATE) == GUEST_ACTIVITY_HLT) 1677 vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE); 1678 } 1679 1680 static void vmx_queue_exception(struct kvm_vcpu *vcpu) 1681 { 1682 struct vcpu_vmx *vmx = to_vmx(vcpu); 1683 unsigned nr = vcpu->arch.exception.nr; 1684 bool has_error_code = vcpu->arch.exception.has_error_code; 1685 u32 error_code = vcpu->arch.exception.error_code; 1686 u32 intr_info = nr | INTR_INFO_VALID_MASK; 1687 1688 kvm_deliver_exception_payload(vcpu); 1689 1690 if (has_error_code) { 1691 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); 1692 intr_info |= INTR_INFO_DELIVER_CODE_MASK; 1693 } 1694 1695 if (vmx->rmode.vm86_active) { 1696 int inc_eip = 0; 1697 if (kvm_exception_is_soft(nr)) 1698 inc_eip = vcpu->arch.event_exit_inst_len; 1699 kvm_inject_realmode_interrupt(vcpu, nr, inc_eip); 1700 return; 1701 } 1702 1703 WARN_ON_ONCE(vmx->emulation_required); 1704 1705 if (kvm_exception_is_soft(nr)) { 1706 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1707 vmx->vcpu.arch.event_exit_inst_len); 1708 intr_info |= INTR_TYPE_SOFT_EXCEPTION; 1709 } else 1710 intr_info |= INTR_TYPE_HARD_EXCEPTION; 1711 1712 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info); 1713 1714 vmx_clear_hlt(vcpu); 1715 } 1716 1717 static void vmx_setup_uret_msr(struct vcpu_vmx *vmx, unsigned int msr, 1718 bool load_into_hardware) 1719 { 1720 struct vmx_uret_msr *uret_msr; 1721 1722 uret_msr = vmx_find_uret_msr(vmx, msr); 1723 if (!uret_msr) 1724 return; 1725 1726 uret_msr->load_into_hardware = load_into_hardware; 1727 } 1728 1729 /* 1730 * Configuring user return MSRs to automatically save, load, and restore MSRs 1731 * that need to be shoved into hardware when running the guest. Note, omitting 1732 * an MSR here does _NOT_ mean it's not emulated, only that it will not be 1733 * loaded into hardware when running the guest. 1734 */ 1735 static void vmx_setup_uret_msrs(struct vcpu_vmx *vmx) 1736 { 1737 #ifdef CONFIG_X86_64 1738 bool load_syscall_msrs; 1739 1740 /* 1741 * The SYSCALL MSRs are only needed on long mode guests, and only 1742 * when EFER.SCE is set. 1743 */ 1744 load_syscall_msrs = is_long_mode(&vmx->vcpu) && 1745 (vmx->vcpu.arch.efer & EFER_SCE); 1746 1747 vmx_setup_uret_msr(vmx, MSR_STAR, load_syscall_msrs); 1748 vmx_setup_uret_msr(vmx, MSR_LSTAR, load_syscall_msrs); 1749 vmx_setup_uret_msr(vmx, MSR_SYSCALL_MASK, load_syscall_msrs); 1750 #endif 1751 vmx_setup_uret_msr(vmx, MSR_EFER, update_transition_efer(vmx)); 1752 1753 vmx_setup_uret_msr(vmx, MSR_TSC_AUX, 1754 guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP) || 1755 guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDPID)); 1756 1757 /* 1758 * hle=0, rtm=0, tsx_ctrl=1 can be found with some combinations of new 1759 * kernel and old userspace. If those guests run on a tsx=off host, do 1760 * allow guests to use TSX_CTRL, but don't change the value in hardware 1761 * so that TSX remains always disabled. 1762 */ 1763 vmx_setup_uret_msr(vmx, MSR_IA32_TSX_CTRL, boot_cpu_has(X86_FEATURE_RTM)); 1764 1765 /* 1766 * The set of MSRs to load may have changed, reload MSRs before the 1767 * next VM-Enter. 1768 */ 1769 vmx->guest_uret_msrs_loaded = false; 1770 } 1771 1772 u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu) 1773 { 1774 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 1775 1776 if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETTING)) 1777 return vmcs12->tsc_offset; 1778 1779 return 0; 1780 } 1781 1782 u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu) 1783 { 1784 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 1785 1786 if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETTING) && 1787 nested_cpu_has2(vmcs12, SECONDARY_EXEC_TSC_SCALING)) 1788 return vmcs12->tsc_multiplier; 1789 1790 return kvm_default_tsc_scaling_ratio; 1791 } 1792 1793 static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) 1794 { 1795 vmcs_write64(TSC_OFFSET, offset); 1796 } 1797 1798 static void vmx_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier) 1799 { 1800 vmcs_write64(TSC_MULTIPLIER, multiplier); 1801 } 1802 1803 /* 1804 * nested_vmx_allowed() checks whether a guest should be allowed to use VMX 1805 * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for 1806 * all guests if the "nested" module option is off, and can also be disabled 1807 * for a single guest by disabling its VMX cpuid bit. 1808 */ 1809 bool nested_vmx_allowed(struct kvm_vcpu *vcpu) 1810 { 1811 return nested && guest_cpuid_has(vcpu, X86_FEATURE_VMX); 1812 } 1813 1814 static inline bool vmx_feature_control_msr_valid(struct kvm_vcpu *vcpu, 1815 uint64_t val) 1816 { 1817 uint64_t valid_bits = to_vmx(vcpu)->msr_ia32_feature_control_valid_bits; 1818 1819 return !(val & ~valid_bits); 1820 } 1821 1822 static int vmx_get_msr_feature(struct kvm_msr_entry *msr) 1823 { 1824 switch (msr->index) { 1825 case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: 1826 if (!nested) 1827 return 1; 1828 return vmx_get_vmx_msr(&vmcs_config.nested, msr->index, &msr->data); 1829 case MSR_IA32_PERF_CAPABILITIES: 1830 msr->data = vmx_get_perf_capabilities(); 1831 return 0; 1832 default: 1833 return KVM_MSR_RET_INVALID; 1834 } 1835 } 1836 1837 /* 1838 * Reads an msr value (of 'msr_info->index') into 'msr_info->data'. 1839 * Returns 0 on success, non-0 otherwise. 1840 * Assumes vcpu_load() was already called. 1841 */ 1842 static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 1843 { 1844 struct vcpu_vmx *vmx = to_vmx(vcpu); 1845 struct vmx_uret_msr *msr; 1846 u32 index; 1847 1848 switch (msr_info->index) { 1849 #ifdef CONFIG_X86_64 1850 case MSR_FS_BASE: 1851 msr_info->data = vmcs_readl(GUEST_FS_BASE); 1852 break; 1853 case MSR_GS_BASE: 1854 msr_info->data = vmcs_readl(GUEST_GS_BASE); 1855 break; 1856 case MSR_KERNEL_GS_BASE: 1857 msr_info->data = vmx_read_guest_kernel_gs_base(vmx); 1858 break; 1859 #endif 1860 case MSR_EFER: 1861 return kvm_get_msr_common(vcpu, msr_info); 1862 case MSR_IA32_TSX_CTRL: 1863 if (!msr_info->host_initiated && 1864 !(vcpu->arch.arch_capabilities & ARCH_CAP_TSX_CTRL_MSR)) 1865 return 1; 1866 goto find_uret_msr; 1867 case MSR_IA32_UMWAIT_CONTROL: 1868 if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx)) 1869 return 1; 1870 1871 msr_info->data = vmx->msr_ia32_umwait_control; 1872 break; 1873 case MSR_IA32_SPEC_CTRL: 1874 if (!msr_info->host_initiated && 1875 !guest_has_spec_ctrl_msr(vcpu)) 1876 return 1; 1877 1878 msr_info->data = to_vmx(vcpu)->spec_ctrl; 1879 break; 1880 case MSR_IA32_SYSENTER_CS: 1881 msr_info->data = vmcs_read32(GUEST_SYSENTER_CS); 1882 break; 1883 case MSR_IA32_SYSENTER_EIP: 1884 msr_info->data = vmcs_readl(GUEST_SYSENTER_EIP); 1885 break; 1886 case MSR_IA32_SYSENTER_ESP: 1887 msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP); 1888 break; 1889 case MSR_IA32_BNDCFGS: 1890 if (!kvm_mpx_supported() || 1891 (!msr_info->host_initiated && 1892 !guest_cpuid_has(vcpu, X86_FEATURE_MPX))) 1893 return 1; 1894 msr_info->data = vmcs_read64(GUEST_BNDCFGS); 1895 break; 1896 case MSR_IA32_MCG_EXT_CTL: 1897 if (!msr_info->host_initiated && 1898 !(vmx->msr_ia32_feature_control & 1899 FEAT_CTL_LMCE_ENABLED)) 1900 return 1; 1901 msr_info->data = vcpu->arch.mcg_ext_ctl; 1902 break; 1903 case MSR_IA32_FEAT_CTL: 1904 msr_info->data = vmx->msr_ia32_feature_control; 1905 break; 1906 case MSR_IA32_SGXLEPUBKEYHASH0 ... MSR_IA32_SGXLEPUBKEYHASH3: 1907 if (!msr_info->host_initiated && 1908 !guest_cpuid_has(vcpu, X86_FEATURE_SGX_LC)) 1909 return 1; 1910 msr_info->data = to_vmx(vcpu)->msr_ia32_sgxlepubkeyhash 1911 [msr_info->index - MSR_IA32_SGXLEPUBKEYHASH0]; 1912 break; 1913 case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: 1914 if (!nested_vmx_allowed(vcpu)) 1915 return 1; 1916 if (vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index, 1917 &msr_info->data)) 1918 return 1; 1919 /* 1920 * Enlightened VMCS v1 doesn't have certain VMCS fields but 1921 * instead of just ignoring the features, different Hyper-V 1922 * versions are either trying to use them and fail or do some 1923 * sanity checking and refuse to boot. Filter all unsupported 1924 * features out. 1925 */ 1926 if (!msr_info->host_initiated && 1927 vmx->nested.enlightened_vmcs_enabled) 1928 nested_evmcs_filter_control_msr(msr_info->index, 1929 &msr_info->data); 1930 break; 1931 case MSR_IA32_RTIT_CTL: 1932 if (!vmx_pt_mode_is_host_guest()) 1933 return 1; 1934 msr_info->data = vmx->pt_desc.guest.ctl; 1935 break; 1936 case MSR_IA32_RTIT_STATUS: 1937 if (!vmx_pt_mode_is_host_guest()) 1938 return 1; 1939 msr_info->data = vmx->pt_desc.guest.status; 1940 break; 1941 case MSR_IA32_RTIT_CR3_MATCH: 1942 if (!vmx_pt_mode_is_host_guest() || 1943 !intel_pt_validate_cap(vmx->pt_desc.caps, 1944 PT_CAP_cr3_filtering)) 1945 return 1; 1946 msr_info->data = vmx->pt_desc.guest.cr3_match; 1947 break; 1948 case MSR_IA32_RTIT_OUTPUT_BASE: 1949 if (!vmx_pt_mode_is_host_guest() || 1950 (!intel_pt_validate_cap(vmx->pt_desc.caps, 1951 PT_CAP_topa_output) && 1952 !intel_pt_validate_cap(vmx->pt_desc.caps, 1953 PT_CAP_single_range_output))) 1954 return 1; 1955 msr_info->data = vmx->pt_desc.guest.output_base; 1956 break; 1957 case MSR_IA32_RTIT_OUTPUT_MASK: 1958 if (!vmx_pt_mode_is_host_guest() || 1959 (!intel_pt_validate_cap(vmx->pt_desc.caps, 1960 PT_CAP_topa_output) && 1961 !intel_pt_validate_cap(vmx->pt_desc.caps, 1962 PT_CAP_single_range_output))) 1963 return 1; 1964 msr_info->data = vmx->pt_desc.guest.output_mask; 1965 break; 1966 case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: 1967 index = msr_info->index - MSR_IA32_RTIT_ADDR0_A; 1968 if (!vmx_pt_mode_is_host_guest() || 1969 (index >= 2 * vmx->pt_desc.num_address_ranges)) 1970 return 1; 1971 if (index % 2) 1972 msr_info->data = vmx->pt_desc.guest.addr_b[index / 2]; 1973 else 1974 msr_info->data = vmx->pt_desc.guest.addr_a[index / 2]; 1975 break; 1976 case MSR_IA32_DEBUGCTLMSR: 1977 msr_info->data = vmcs_read64(GUEST_IA32_DEBUGCTL); 1978 break; 1979 default: 1980 find_uret_msr: 1981 msr = vmx_find_uret_msr(vmx, msr_info->index); 1982 if (msr) { 1983 msr_info->data = msr->data; 1984 break; 1985 } 1986 return kvm_get_msr_common(vcpu, msr_info); 1987 } 1988 1989 return 0; 1990 } 1991 1992 static u64 nested_vmx_truncate_sysenter_addr(struct kvm_vcpu *vcpu, 1993 u64 data) 1994 { 1995 #ifdef CONFIG_X86_64 1996 if (!guest_cpuid_has(vcpu, X86_FEATURE_LM)) 1997 return (u32)data; 1998 #endif 1999 return (unsigned long)data; 2000 } 2001 2002 static u64 vcpu_supported_debugctl(struct kvm_vcpu *vcpu) 2003 { 2004 u64 debugctl = vmx_supported_debugctl(); 2005 2006 if (!intel_pmu_lbr_is_enabled(vcpu)) 2007 debugctl &= ~DEBUGCTLMSR_LBR_MASK; 2008 2009 if (!guest_cpuid_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT)) 2010 debugctl &= ~DEBUGCTLMSR_BUS_LOCK_DETECT; 2011 2012 return debugctl; 2013 } 2014 2015 /* 2016 * Writes msr value into the appropriate "register". 2017 * Returns 0 on success, non-0 otherwise. 2018 * Assumes vcpu_load() was already called. 2019 */ 2020 static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 2021 { 2022 struct vcpu_vmx *vmx = to_vmx(vcpu); 2023 struct vmx_uret_msr *msr; 2024 int ret = 0; 2025 u32 msr_index = msr_info->index; 2026 u64 data = msr_info->data; 2027 u32 index; 2028 2029 switch (msr_index) { 2030 case MSR_EFER: 2031 ret = kvm_set_msr_common(vcpu, msr_info); 2032 break; 2033 #ifdef CONFIG_X86_64 2034 case MSR_FS_BASE: 2035 vmx_segment_cache_clear(vmx); 2036 vmcs_writel(GUEST_FS_BASE, data); 2037 break; 2038 case MSR_GS_BASE: 2039 vmx_segment_cache_clear(vmx); 2040 vmcs_writel(GUEST_GS_BASE, data); 2041 break; 2042 case MSR_KERNEL_GS_BASE: 2043 vmx_write_guest_kernel_gs_base(vmx, data); 2044 break; 2045 case MSR_IA32_XFD: 2046 ret = kvm_set_msr_common(vcpu, msr_info); 2047 /* 2048 * Always intercepting WRMSR could incur non-negligible 2049 * overhead given xfd might be changed frequently in 2050 * guest context switch. Disable write interception 2051 * upon the first write with a non-zero value (indicating 2052 * potential usage on dynamic xfeatures). Also update 2053 * exception bitmap to trap #NM for proper virtualization 2054 * of guest xfd_err. 2055 */ 2056 if (!ret && data) { 2057 vmx_disable_intercept_for_msr(vcpu, MSR_IA32_XFD, 2058 MSR_TYPE_RW); 2059 vcpu->arch.xfd_no_write_intercept = true; 2060 vmx_update_exception_bitmap(vcpu); 2061 } 2062 break; 2063 #endif 2064 case MSR_IA32_SYSENTER_CS: 2065 if (is_guest_mode(vcpu)) 2066 get_vmcs12(vcpu)->guest_sysenter_cs = data; 2067 vmcs_write32(GUEST_SYSENTER_CS, data); 2068 break; 2069 case MSR_IA32_SYSENTER_EIP: 2070 if (is_guest_mode(vcpu)) { 2071 data = nested_vmx_truncate_sysenter_addr(vcpu, data); 2072 get_vmcs12(vcpu)->guest_sysenter_eip = data; 2073 } 2074 vmcs_writel(GUEST_SYSENTER_EIP, data); 2075 break; 2076 case MSR_IA32_SYSENTER_ESP: 2077 if (is_guest_mode(vcpu)) { 2078 data = nested_vmx_truncate_sysenter_addr(vcpu, data); 2079 get_vmcs12(vcpu)->guest_sysenter_esp = data; 2080 } 2081 vmcs_writel(GUEST_SYSENTER_ESP, data); 2082 break; 2083 case MSR_IA32_DEBUGCTLMSR: { 2084 u64 invalid = data & ~vcpu_supported_debugctl(vcpu); 2085 if (invalid & (DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR)) { 2086 if (report_ignored_msrs) 2087 vcpu_unimpl(vcpu, "%s: BTF|LBR in IA32_DEBUGCTLMSR 0x%llx, nop\n", 2088 __func__, data); 2089 data &= ~(DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR); 2090 invalid &= ~(DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR); 2091 } 2092 2093 if (invalid) 2094 return 1; 2095 2096 if (is_guest_mode(vcpu) && get_vmcs12(vcpu)->vm_exit_controls & 2097 VM_EXIT_SAVE_DEBUG_CONTROLS) 2098 get_vmcs12(vcpu)->guest_ia32_debugctl = data; 2099 2100 vmcs_write64(GUEST_IA32_DEBUGCTL, data); 2101 if (intel_pmu_lbr_is_enabled(vcpu) && !to_vmx(vcpu)->lbr_desc.event && 2102 (data & DEBUGCTLMSR_LBR)) 2103 intel_pmu_create_guest_lbr_event(vcpu); 2104 return 0; 2105 } 2106 case MSR_IA32_BNDCFGS: 2107 if (!kvm_mpx_supported() || 2108 (!msr_info->host_initiated && 2109 !guest_cpuid_has(vcpu, X86_FEATURE_MPX))) 2110 return 1; 2111 if (is_noncanonical_address(data & PAGE_MASK, vcpu) || 2112 (data & MSR_IA32_BNDCFGS_RSVD)) 2113 return 1; 2114 vmcs_write64(GUEST_BNDCFGS, data); 2115 break; 2116 case MSR_IA32_UMWAIT_CONTROL: 2117 if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx)) 2118 return 1; 2119 2120 /* The reserved bit 1 and non-32 bit [63:32] should be zero */ 2121 if (data & (BIT_ULL(1) | GENMASK_ULL(63, 32))) 2122 return 1; 2123 2124 vmx->msr_ia32_umwait_control = data; 2125 break; 2126 case MSR_IA32_SPEC_CTRL: 2127 if (!msr_info->host_initiated && 2128 !guest_has_spec_ctrl_msr(vcpu)) 2129 return 1; 2130 2131 if (kvm_spec_ctrl_test_value(data)) 2132 return 1; 2133 2134 vmx->spec_ctrl = data; 2135 if (!data) 2136 break; 2137 2138 /* 2139 * For non-nested: 2140 * When it's written (to non-zero) for the first time, pass 2141 * it through. 2142 * 2143 * For nested: 2144 * The handling of the MSR bitmap for L2 guests is done in 2145 * nested_vmx_prepare_msr_bitmap. We should not touch the 2146 * vmcs02.msr_bitmap here since it gets completely overwritten 2147 * in the merging. We update the vmcs01 here for L1 as well 2148 * since it will end up touching the MSR anyway now. 2149 */ 2150 vmx_disable_intercept_for_msr(vcpu, 2151 MSR_IA32_SPEC_CTRL, 2152 MSR_TYPE_RW); 2153 break; 2154 case MSR_IA32_TSX_CTRL: 2155 if (!msr_info->host_initiated && 2156 !(vcpu->arch.arch_capabilities & ARCH_CAP_TSX_CTRL_MSR)) 2157 return 1; 2158 if (data & ~(TSX_CTRL_RTM_DISABLE | TSX_CTRL_CPUID_CLEAR)) 2159 return 1; 2160 goto find_uret_msr; 2161 case MSR_IA32_PRED_CMD: 2162 if (!msr_info->host_initiated && 2163 !guest_has_pred_cmd_msr(vcpu)) 2164 return 1; 2165 2166 if (data & ~PRED_CMD_IBPB) 2167 return 1; 2168 if (!boot_cpu_has(X86_FEATURE_IBPB)) 2169 return 1; 2170 if (!data) 2171 break; 2172 2173 wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB); 2174 2175 /* 2176 * For non-nested: 2177 * When it's written (to non-zero) for the first time, pass 2178 * it through. 2179 * 2180 * For nested: 2181 * The handling of the MSR bitmap for L2 guests is done in 2182 * nested_vmx_prepare_msr_bitmap. We should not touch the 2183 * vmcs02.msr_bitmap here since it gets completely overwritten 2184 * in the merging. 2185 */ 2186 vmx_disable_intercept_for_msr(vcpu, MSR_IA32_PRED_CMD, MSR_TYPE_W); 2187 break; 2188 case MSR_IA32_CR_PAT: 2189 if (!kvm_pat_valid(data)) 2190 return 1; 2191 2192 if (is_guest_mode(vcpu) && 2193 get_vmcs12(vcpu)->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT) 2194 get_vmcs12(vcpu)->guest_ia32_pat = data; 2195 2196 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { 2197 vmcs_write64(GUEST_IA32_PAT, data); 2198 vcpu->arch.pat = data; 2199 break; 2200 } 2201 ret = kvm_set_msr_common(vcpu, msr_info); 2202 break; 2203 case MSR_IA32_MCG_EXT_CTL: 2204 if ((!msr_info->host_initiated && 2205 !(to_vmx(vcpu)->msr_ia32_feature_control & 2206 FEAT_CTL_LMCE_ENABLED)) || 2207 (data & ~MCG_EXT_CTL_LMCE_EN)) 2208 return 1; 2209 vcpu->arch.mcg_ext_ctl = data; 2210 break; 2211 case MSR_IA32_FEAT_CTL: 2212 if (!vmx_feature_control_msr_valid(vcpu, data) || 2213 (to_vmx(vcpu)->msr_ia32_feature_control & 2214 FEAT_CTL_LOCKED && !msr_info->host_initiated)) 2215 return 1; 2216 vmx->msr_ia32_feature_control = data; 2217 if (msr_info->host_initiated && data == 0) 2218 vmx_leave_nested(vcpu); 2219 2220 /* SGX may be enabled/disabled by guest's firmware */ 2221 vmx_write_encls_bitmap(vcpu, NULL); 2222 break; 2223 case MSR_IA32_SGXLEPUBKEYHASH0 ... MSR_IA32_SGXLEPUBKEYHASH3: 2224 /* 2225 * On real hardware, the LE hash MSRs are writable before 2226 * the firmware sets bit 0 in MSR 0x7a ("activating" SGX), 2227 * at which point SGX related bits in IA32_FEATURE_CONTROL 2228 * become writable. 2229 * 2230 * KVM does not emulate SGX activation for simplicity, so 2231 * allow writes to the LE hash MSRs if IA32_FEATURE_CONTROL 2232 * is unlocked. This is technically not architectural 2233 * behavior, but it's close enough. 2234 */ 2235 if (!msr_info->host_initiated && 2236 (!guest_cpuid_has(vcpu, X86_FEATURE_SGX_LC) || 2237 ((vmx->msr_ia32_feature_control & FEAT_CTL_LOCKED) && 2238 !(vmx->msr_ia32_feature_control & FEAT_CTL_SGX_LC_ENABLED)))) 2239 return 1; 2240 vmx->msr_ia32_sgxlepubkeyhash 2241 [msr_index - MSR_IA32_SGXLEPUBKEYHASH0] = data; 2242 break; 2243 case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: 2244 if (!msr_info->host_initiated) 2245 return 1; /* they are read-only */ 2246 if (!nested_vmx_allowed(vcpu)) 2247 return 1; 2248 return vmx_set_vmx_msr(vcpu, msr_index, data); 2249 case MSR_IA32_RTIT_CTL: 2250 if (!vmx_pt_mode_is_host_guest() || 2251 vmx_rtit_ctl_check(vcpu, data) || 2252 vmx->nested.vmxon) 2253 return 1; 2254 vmcs_write64(GUEST_IA32_RTIT_CTL, data); 2255 vmx->pt_desc.guest.ctl = data; 2256 pt_update_intercept_for_msr(vcpu); 2257 break; 2258 case MSR_IA32_RTIT_STATUS: 2259 if (!pt_can_write_msr(vmx)) 2260 return 1; 2261 if (data & MSR_IA32_RTIT_STATUS_MASK) 2262 return 1; 2263 vmx->pt_desc.guest.status = data; 2264 break; 2265 case MSR_IA32_RTIT_CR3_MATCH: 2266 if (!pt_can_write_msr(vmx)) 2267 return 1; 2268 if (!intel_pt_validate_cap(vmx->pt_desc.caps, 2269 PT_CAP_cr3_filtering)) 2270 return 1; 2271 vmx->pt_desc.guest.cr3_match = data; 2272 break; 2273 case MSR_IA32_RTIT_OUTPUT_BASE: 2274 if (!pt_can_write_msr(vmx)) 2275 return 1; 2276 if (!intel_pt_validate_cap(vmx->pt_desc.caps, 2277 PT_CAP_topa_output) && 2278 !intel_pt_validate_cap(vmx->pt_desc.caps, 2279 PT_CAP_single_range_output)) 2280 return 1; 2281 if (!pt_output_base_valid(vcpu, data)) 2282 return 1; 2283 vmx->pt_desc.guest.output_base = data; 2284 break; 2285 case MSR_IA32_RTIT_OUTPUT_MASK: 2286 if (!pt_can_write_msr(vmx)) 2287 return 1; 2288 if (!intel_pt_validate_cap(vmx->pt_desc.caps, 2289 PT_CAP_topa_output) && 2290 !intel_pt_validate_cap(vmx->pt_desc.caps, 2291 PT_CAP_single_range_output)) 2292 return 1; 2293 vmx->pt_desc.guest.output_mask = data; 2294 break; 2295 case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: 2296 if (!pt_can_write_msr(vmx)) 2297 return 1; 2298 index = msr_info->index - MSR_IA32_RTIT_ADDR0_A; 2299 if (index >= 2 * vmx->pt_desc.num_address_ranges) 2300 return 1; 2301 if (is_noncanonical_address(data, vcpu)) 2302 return 1; 2303 if (index % 2) 2304 vmx->pt_desc.guest.addr_b[index / 2] = data; 2305 else 2306 vmx->pt_desc.guest.addr_a[index / 2] = data; 2307 break; 2308 case MSR_IA32_PERF_CAPABILITIES: 2309 if (data && !vcpu_to_pmu(vcpu)->version) 2310 return 1; 2311 if (data & PMU_CAP_LBR_FMT) { 2312 if ((data & PMU_CAP_LBR_FMT) != 2313 (vmx_get_perf_capabilities() & PMU_CAP_LBR_FMT)) 2314 return 1; 2315 if (!intel_pmu_lbr_is_compatible(vcpu)) 2316 return 1; 2317 } 2318 ret = kvm_set_msr_common(vcpu, msr_info); 2319 break; 2320 2321 default: 2322 find_uret_msr: 2323 msr = vmx_find_uret_msr(vmx, msr_index); 2324 if (msr) 2325 ret = vmx_set_guest_uret_msr(vmx, msr, data); 2326 else 2327 ret = kvm_set_msr_common(vcpu, msr_info); 2328 } 2329 2330 /* FB_CLEAR may have changed, also update the FB_CLEAR_DIS behavior */ 2331 if (msr_index == MSR_IA32_ARCH_CAPABILITIES) 2332 vmx_update_fb_clear_dis(vcpu, vmx); 2333 2334 return ret; 2335 } 2336 2337 static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) 2338 { 2339 unsigned long guest_owned_bits; 2340 2341 kvm_register_mark_available(vcpu, reg); 2342 2343 switch (reg) { 2344 case VCPU_REGS_RSP: 2345 vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP); 2346 break; 2347 case VCPU_REGS_RIP: 2348 vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP); 2349 break; 2350 case VCPU_EXREG_PDPTR: 2351 if (enable_ept) 2352 ept_save_pdptrs(vcpu); 2353 break; 2354 case VCPU_EXREG_CR0: 2355 guest_owned_bits = vcpu->arch.cr0_guest_owned_bits; 2356 2357 vcpu->arch.cr0 &= ~guest_owned_bits; 2358 vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & guest_owned_bits; 2359 break; 2360 case VCPU_EXREG_CR3: 2361 /* 2362 * When intercepting CR3 loads, e.g. for shadowing paging, KVM's 2363 * CR3 is loaded into hardware, not the guest's CR3. 2364 */ 2365 if (!(exec_controls_get(to_vmx(vcpu)) & CPU_BASED_CR3_LOAD_EXITING)) 2366 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); 2367 break; 2368 case VCPU_EXREG_CR4: 2369 guest_owned_bits = vcpu->arch.cr4_guest_owned_bits; 2370 2371 vcpu->arch.cr4 &= ~guest_owned_bits; 2372 vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & guest_owned_bits; 2373 break; 2374 default: 2375 KVM_BUG_ON(1, vcpu->kvm); 2376 break; 2377 } 2378 } 2379 2380 static __init int cpu_has_kvm_support(void) 2381 { 2382 return cpu_has_vmx(); 2383 } 2384 2385 static __init int vmx_disabled_by_bios(void) 2386 { 2387 return !boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) || 2388 !boot_cpu_has(X86_FEATURE_VMX); 2389 } 2390 2391 static int kvm_cpu_vmxon(u64 vmxon_pointer) 2392 { 2393 u64 msr; 2394 2395 cr4_set_bits(X86_CR4_VMXE); 2396 2397 asm_volatile_goto("1: vmxon %[vmxon_pointer]\n\t" 2398 _ASM_EXTABLE(1b, %l[fault]) 2399 : : [vmxon_pointer] "m"(vmxon_pointer) 2400 : : fault); 2401 return 0; 2402 2403 fault: 2404 WARN_ONCE(1, "VMXON faulted, MSR_IA32_FEAT_CTL (0x3a) = 0x%llx\n", 2405 rdmsrl_safe(MSR_IA32_FEAT_CTL, &msr) ? 0xdeadbeef : msr); 2406 cr4_clear_bits(X86_CR4_VMXE); 2407 2408 return -EFAULT; 2409 } 2410 2411 static int vmx_hardware_enable(void) 2412 { 2413 int cpu = raw_smp_processor_id(); 2414 u64 phys_addr = __pa(per_cpu(vmxarea, cpu)); 2415 int r; 2416 2417 if (cr4_read_shadow() & X86_CR4_VMXE) 2418 return -EBUSY; 2419 2420 /* 2421 * This can happen if we hot-added a CPU but failed to allocate 2422 * VP assist page for it. 2423 */ 2424 if (static_branch_unlikely(&enable_evmcs) && 2425 !hv_get_vp_assist_page(cpu)) 2426 return -EFAULT; 2427 2428 intel_pt_handle_vmx(1); 2429 2430 r = kvm_cpu_vmxon(phys_addr); 2431 if (r) { 2432 intel_pt_handle_vmx(0); 2433 return r; 2434 } 2435 2436 if (enable_ept) 2437 ept_sync_global(); 2438 2439 return 0; 2440 } 2441 2442 static void vmclear_local_loaded_vmcss(void) 2443 { 2444 int cpu = raw_smp_processor_id(); 2445 struct loaded_vmcs *v, *n; 2446 2447 list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu), 2448 loaded_vmcss_on_cpu_link) 2449 __loaded_vmcs_clear(v); 2450 } 2451 2452 static void vmx_hardware_disable(void) 2453 { 2454 vmclear_local_loaded_vmcss(); 2455 2456 if (cpu_vmxoff()) 2457 kvm_spurious_fault(); 2458 2459 intel_pt_handle_vmx(0); 2460 } 2461 2462 /* 2463 * There is no X86_FEATURE for SGX yet, but anyway we need to query CPUID 2464 * directly instead of going through cpu_has(), to ensure KVM is trapping 2465 * ENCLS whenever it's supported in hardware. It does not matter whether 2466 * the host OS supports or has enabled SGX. 2467 */ 2468 static bool cpu_has_sgx(void) 2469 { 2470 return cpuid_eax(0) >= 0x12 && (cpuid_eax(0x12) & BIT(0)); 2471 } 2472 2473 static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, 2474 u32 msr, u32 *result) 2475 { 2476 u32 vmx_msr_low, vmx_msr_high; 2477 u32 ctl = ctl_min | ctl_opt; 2478 2479 rdmsr(msr, vmx_msr_low, vmx_msr_high); 2480 2481 ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */ 2482 ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */ 2483 2484 /* Ensure minimum (required) set of control bits are supported. */ 2485 if (ctl_min & ~ctl) 2486 return -EIO; 2487 2488 *result = ctl; 2489 return 0; 2490 } 2491 2492 static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf, 2493 struct vmx_capability *vmx_cap) 2494 { 2495 u32 vmx_msr_low, vmx_msr_high; 2496 u32 min, opt, min2, opt2; 2497 u32 _pin_based_exec_control = 0; 2498 u32 _cpu_based_exec_control = 0; 2499 u32 _cpu_based_2nd_exec_control = 0; 2500 u32 _vmexit_control = 0; 2501 u32 _vmentry_control = 0; 2502 2503 memset(vmcs_conf, 0, sizeof(*vmcs_conf)); 2504 min = CPU_BASED_HLT_EXITING | 2505 #ifdef CONFIG_X86_64 2506 CPU_BASED_CR8_LOAD_EXITING | 2507 CPU_BASED_CR8_STORE_EXITING | 2508 #endif 2509 CPU_BASED_CR3_LOAD_EXITING | 2510 CPU_BASED_CR3_STORE_EXITING | 2511 CPU_BASED_UNCOND_IO_EXITING | 2512 CPU_BASED_MOV_DR_EXITING | 2513 CPU_BASED_USE_TSC_OFFSETTING | 2514 CPU_BASED_MWAIT_EXITING | 2515 CPU_BASED_MONITOR_EXITING | 2516 CPU_BASED_INVLPG_EXITING | 2517 CPU_BASED_RDPMC_EXITING; 2518 2519 opt = CPU_BASED_TPR_SHADOW | 2520 CPU_BASED_USE_MSR_BITMAPS | 2521 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; 2522 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS, 2523 &_cpu_based_exec_control) < 0) 2524 return -EIO; 2525 #ifdef CONFIG_X86_64 2526 if (_cpu_based_exec_control & CPU_BASED_TPR_SHADOW) 2527 _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING & 2528 ~CPU_BASED_CR8_STORE_EXITING; 2529 #endif 2530 if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) { 2531 min2 = 0; 2532 opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2533 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2534 SECONDARY_EXEC_WBINVD_EXITING | 2535 SECONDARY_EXEC_ENABLE_VPID | 2536 SECONDARY_EXEC_ENABLE_EPT | 2537 SECONDARY_EXEC_UNRESTRICTED_GUEST | 2538 SECONDARY_EXEC_PAUSE_LOOP_EXITING | 2539 SECONDARY_EXEC_DESC | 2540 SECONDARY_EXEC_ENABLE_RDTSCP | 2541 SECONDARY_EXEC_ENABLE_INVPCID | 2542 SECONDARY_EXEC_APIC_REGISTER_VIRT | 2543 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2544 SECONDARY_EXEC_SHADOW_VMCS | 2545 SECONDARY_EXEC_XSAVES | 2546 SECONDARY_EXEC_RDSEED_EXITING | 2547 SECONDARY_EXEC_RDRAND_EXITING | 2548 SECONDARY_EXEC_ENABLE_PML | 2549 SECONDARY_EXEC_TSC_SCALING | 2550 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE | 2551 SECONDARY_EXEC_PT_USE_GPA | 2552 SECONDARY_EXEC_PT_CONCEAL_VMX | 2553 SECONDARY_EXEC_ENABLE_VMFUNC | 2554 SECONDARY_EXEC_BUS_LOCK_DETECTION; 2555 if (cpu_has_sgx()) 2556 opt2 |= SECONDARY_EXEC_ENCLS_EXITING; 2557 if (adjust_vmx_controls(min2, opt2, 2558 MSR_IA32_VMX_PROCBASED_CTLS2, 2559 &_cpu_based_2nd_exec_control) < 0) 2560 return -EIO; 2561 } 2562 #ifndef CONFIG_X86_64 2563 if (!(_cpu_based_2nd_exec_control & 2564 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) 2565 _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW; 2566 #endif 2567 2568 if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW)) 2569 _cpu_based_2nd_exec_control &= ~( 2570 SECONDARY_EXEC_APIC_REGISTER_VIRT | 2571 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2572 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); 2573 2574 rdmsr_safe(MSR_IA32_VMX_EPT_VPID_CAP, 2575 &vmx_cap->ept, &vmx_cap->vpid); 2576 2577 if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) { 2578 /* CR3 accesses and invlpg don't need to cause VM Exits when EPT 2579 enabled */ 2580 _cpu_based_exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING | 2581 CPU_BASED_CR3_STORE_EXITING | 2582 CPU_BASED_INVLPG_EXITING); 2583 } else if (vmx_cap->ept) { 2584 vmx_cap->ept = 0; 2585 pr_warn_once("EPT CAP should not exist if not support " 2586 "1-setting enable EPT VM-execution control\n"); 2587 } 2588 if (!(_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_VPID) && 2589 vmx_cap->vpid) { 2590 vmx_cap->vpid = 0; 2591 pr_warn_once("VPID CAP should not exist if not support " 2592 "1-setting enable VPID VM-execution control\n"); 2593 } 2594 2595 min = VM_EXIT_SAVE_DEBUG_CONTROLS | VM_EXIT_ACK_INTR_ON_EXIT; 2596 #ifdef CONFIG_X86_64 2597 min |= VM_EXIT_HOST_ADDR_SPACE_SIZE; 2598 #endif 2599 opt = VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2600 VM_EXIT_LOAD_IA32_PAT | 2601 VM_EXIT_LOAD_IA32_EFER | 2602 VM_EXIT_CLEAR_BNDCFGS | 2603 VM_EXIT_PT_CONCEAL_PIP | 2604 VM_EXIT_CLEAR_IA32_RTIT_CTL; 2605 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS, 2606 &_vmexit_control) < 0) 2607 return -EIO; 2608 2609 min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING; 2610 opt = PIN_BASED_VIRTUAL_NMIS | PIN_BASED_POSTED_INTR | 2611 PIN_BASED_VMX_PREEMPTION_TIMER; 2612 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS, 2613 &_pin_based_exec_control) < 0) 2614 return -EIO; 2615 2616 if (cpu_has_broken_vmx_preemption_timer()) 2617 _pin_based_exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER; 2618 if (!(_cpu_based_2nd_exec_control & 2619 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)) 2620 _pin_based_exec_control &= ~PIN_BASED_POSTED_INTR; 2621 2622 min = VM_ENTRY_LOAD_DEBUG_CONTROLS; 2623 opt = VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | 2624 VM_ENTRY_LOAD_IA32_PAT | 2625 VM_ENTRY_LOAD_IA32_EFER | 2626 VM_ENTRY_LOAD_BNDCFGS | 2627 VM_ENTRY_PT_CONCEAL_PIP | 2628 VM_ENTRY_LOAD_IA32_RTIT_CTL; 2629 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS, 2630 &_vmentry_control) < 0) 2631 return -EIO; 2632 2633 /* 2634 * Some cpus support VM_{ENTRY,EXIT}_IA32_PERF_GLOBAL_CTRL but they 2635 * can't be used due to an errata where VM Exit may incorrectly clear 2636 * IA32_PERF_GLOBAL_CTRL[34:32]. Workaround the errata by using the 2637 * MSR load mechanism to switch IA32_PERF_GLOBAL_CTRL. 2638 */ 2639 if (boot_cpu_data.x86 == 0x6) { 2640 switch (boot_cpu_data.x86_model) { 2641 case 26: /* AAK155 */ 2642 case 30: /* AAP115 */ 2643 case 37: /* AAT100 */ 2644 case 44: /* BC86,AAY89,BD102 */ 2645 case 46: /* BA97 */ 2646 _vmentry_control &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL; 2647 _vmexit_control &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL; 2648 pr_warn_once("kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL " 2649 "does not work properly. Using workaround\n"); 2650 break; 2651 default: 2652 break; 2653 } 2654 } 2655 2656 2657 rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high); 2658 2659 /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */ 2660 if ((vmx_msr_high & 0x1fff) > PAGE_SIZE) 2661 return -EIO; 2662 2663 #ifdef CONFIG_X86_64 2664 /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */ 2665 if (vmx_msr_high & (1u<<16)) 2666 return -EIO; 2667 #endif 2668 2669 /* Require Write-Back (WB) memory type for VMCS accesses. */ 2670 if (((vmx_msr_high >> 18) & 15) != 6) 2671 return -EIO; 2672 2673 vmcs_conf->size = vmx_msr_high & 0x1fff; 2674 vmcs_conf->basic_cap = vmx_msr_high & ~0x1fff; 2675 2676 vmcs_conf->revision_id = vmx_msr_low; 2677 2678 vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control; 2679 vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control; 2680 vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control; 2681 vmcs_conf->vmexit_ctrl = _vmexit_control; 2682 vmcs_conf->vmentry_ctrl = _vmentry_control; 2683 2684 #if IS_ENABLED(CONFIG_HYPERV) 2685 if (enlightened_vmcs) 2686 evmcs_sanitize_exec_ctrls(vmcs_conf); 2687 #endif 2688 2689 return 0; 2690 } 2691 2692 struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags) 2693 { 2694 int node = cpu_to_node(cpu); 2695 struct page *pages; 2696 struct vmcs *vmcs; 2697 2698 pages = __alloc_pages_node(node, flags, 0); 2699 if (!pages) 2700 return NULL; 2701 vmcs = page_address(pages); 2702 memset(vmcs, 0, vmcs_config.size); 2703 2704 /* KVM supports Enlightened VMCS v1 only */ 2705 if (static_branch_unlikely(&enable_evmcs)) 2706 vmcs->hdr.revision_id = KVM_EVMCS_VERSION; 2707 else 2708 vmcs->hdr.revision_id = vmcs_config.revision_id; 2709 2710 if (shadow) 2711 vmcs->hdr.shadow_vmcs = 1; 2712 return vmcs; 2713 } 2714 2715 void free_vmcs(struct vmcs *vmcs) 2716 { 2717 free_page((unsigned long)vmcs); 2718 } 2719 2720 /* 2721 * Free a VMCS, but before that VMCLEAR it on the CPU where it was last loaded 2722 */ 2723 void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs) 2724 { 2725 if (!loaded_vmcs->vmcs) 2726 return; 2727 loaded_vmcs_clear(loaded_vmcs); 2728 free_vmcs(loaded_vmcs->vmcs); 2729 loaded_vmcs->vmcs = NULL; 2730 if (loaded_vmcs->msr_bitmap) 2731 free_page((unsigned long)loaded_vmcs->msr_bitmap); 2732 WARN_ON(loaded_vmcs->shadow_vmcs != NULL); 2733 } 2734 2735 int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs) 2736 { 2737 loaded_vmcs->vmcs = alloc_vmcs(false); 2738 if (!loaded_vmcs->vmcs) 2739 return -ENOMEM; 2740 2741 vmcs_clear(loaded_vmcs->vmcs); 2742 2743 loaded_vmcs->shadow_vmcs = NULL; 2744 loaded_vmcs->hv_timer_soft_disabled = false; 2745 loaded_vmcs->cpu = -1; 2746 loaded_vmcs->launched = 0; 2747 2748 if (cpu_has_vmx_msr_bitmap()) { 2749 loaded_vmcs->msr_bitmap = (unsigned long *) 2750 __get_free_page(GFP_KERNEL_ACCOUNT); 2751 if (!loaded_vmcs->msr_bitmap) 2752 goto out_vmcs; 2753 memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE); 2754 } 2755 2756 memset(&loaded_vmcs->host_state, 0, sizeof(struct vmcs_host_state)); 2757 memset(&loaded_vmcs->controls_shadow, 0, 2758 sizeof(struct vmcs_controls_shadow)); 2759 2760 return 0; 2761 2762 out_vmcs: 2763 free_loaded_vmcs(loaded_vmcs); 2764 return -ENOMEM; 2765 } 2766 2767 static void free_kvm_area(void) 2768 { 2769 int cpu; 2770 2771 for_each_possible_cpu(cpu) { 2772 free_vmcs(per_cpu(vmxarea, cpu)); 2773 per_cpu(vmxarea, cpu) = NULL; 2774 } 2775 } 2776 2777 static __init int alloc_kvm_area(void) 2778 { 2779 int cpu; 2780 2781 for_each_possible_cpu(cpu) { 2782 struct vmcs *vmcs; 2783 2784 vmcs = alloc_vmcs_cpu(false, cpu, GFP_KERNEL); 2785 if (!vmcs) { 2786 free_kvm_area(); 2787 return -ENOMEM; 2788 } 2789 2790 /* 2791 * When eVMCS is enabled, alloc_vmcs_cpu() sets 2792 * vmcs->revision_id to KVM_EVMCS_VERSION instead of 2793 * revision_id reported by MSR_IA32_VMX_BASIC. 2794 * 2795 * However, even though not explicitly documented by 2796 * TLFS, VMXArea passed as VMXON argument should 2797 * still be marked with revision_id reported by 2798 * physical CPU. 2799 */ 2800 if (static_branch_unlikely(&enable_evmcs)) 2801 vmcs->hdr.revision_id = vmcs_config.revision_id; 2802 2803 per_cpu(vmxarea, cpu) = vmcs; 2804 } 2805 return 0; 2806 } 2807 2808 static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg, 2809 struct kvm_segment *save) 2810 { 2811 if (!emulate_invalid_guest_state) { 2812 /* 2813 * CS and SS RPL should be equal during guest entry according 2814 * to VMX spec, but in reality it is not always so. Since vcpu 2815 * is in the middle of the transition from real mode to 2816 * protected mode it is safe to assume that RPL 0 is a good 2817 * default value. 2818 */ 2819 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS) 2820 save->selector &= ~SEGMENT_RPL_MASK; 2821 save->dpl = save->selector & SEGMENT_RPL_MASK; 2822 save->s = 1; 2823 } 2824 __vmx_set_segment(vcpu, save, seg); 2825 } 2826 2827 static void enter_pmode(struct kvm_vcpu *vcpu) 2828 { 2829 unsigned long flags; 2830 struct vcpu_vmx *vmx = to_vmx(vcpu); 2831 2832 /* 2833 * Update real mode segment cache. It may be not up-to-date if segment 2834 * register was written while vcpu was in a guest mode. 2835 */ 2836 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); 2837 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); 2838 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); 2839 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); 2840 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); 2841 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); 2842 2843 vmx->rmode.vm86_active = 0; 2844 2845 __vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); 2846 2847 flags = vmcs_readl(GUEST_RFLAGS); 2848 flags &= RMODE_GUEST_OWNED_EFLAGS_BITS; 2849 flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; 2850 vmcs_writel(GUEST_RFLAGS, flags); 2851 2852 vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) | 2853 (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME)); 2854 2855 vmx_update_exception_bitmap(vcpu); 2856 2857 fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); 2858 fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); 2859 fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); 2860 fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); 2861 fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); 2862 fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); 2863 } 2864 2865 static void fix_rmode_seg(int seg, struct kvm_segment *save) 2866 { 2867 const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; 2868 struct kvm_segment var = *save; 2869 2870 var.dpl = 0x3; 2871 if (seg == VCPU_SREG_CS) 2872 var.type = 0x3; 2873 2874 if (!emulate_invalid_guest_state) { 2875 var.selector = var.base >> 4; 2876 var.base = var.base & 0xffff0; 2877 var.limit = 0xffff; 2878 var.g = 0; 2879 var.db = 0; 2880 var.present = 1; 2881 var.s = 1; 2882 var.l = 0; 2883 var.unusable = 0; 2884 var.type = 0x3; 2885 var.avl = 0; 2886 if (save->base & 0xf) 2887 printk_once(KERN_WARNING "kvm: segment base is not " 2888 "paragraph aligned when entering " 2889 "protected mode (seg=%d)", seg); 2890 } 2891 2892 vmcs_write16(sf->selector, var.selector); 2893 vmcs_writel(sf->base, var.base); 2894 vmcs_write32(sf->limit, var.limit); 2895 vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var)); 2896 } 2897 2898 static void enter_rmode(struct kvm_vcpu *vcpu) 2899 { 2900 unsigned long flags; 2901 struct vcpu_vmx *vmx = to_vmx(vcpu); 2902 struct kvm_vmx *kvm_vmx = to_kvm_vmx(vcpu->kvm); 2903 2904 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); 2905 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); 2906 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); 2907 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); 2908 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); 2909 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); 2910 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); 2911 2912 vmx->rmode.vm86_active = 1; 2913 2914 /* 2915 * Very old userspace does not call KVM_SET_TSS_ADDR before entering 2916 * vcpu. Warn the user that an update is overdue. 2917 */ 2918 if (!kvm_vmx->tss_addr) 2919 printk_once(KERN_WARNING "kvm: KVM_SET_TSS_ADDR need to be " 2920 "called before entering vcpu\n"); 2921 2922 vmx_segment_cache_clear(vmx); 2923 2924 vmcs_writel(GUEST_TR_BASE, kvm_vmx->tss_addr); 2925 vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1); 2926 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); 2927 2928 flags = vmcs_readl(GUEST_RFLAGS); 2929 vmx->rmode.save_rflags = flags; 2930 2931 flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; 2932 2933 vmcs_writel(GUEST_RFLAGS, flags); 2934 vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME); 2935 vmx_update_exception_bitmap(vcpu); 2936 2937 fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); 2938 fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); 2939 fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); 2940 fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); 2941 fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); 2942 fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); 2943 } 2944 2945 int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) 2946 { 2947 struct vcpu_vmx *vmx = to_vmx(vcpu); 2948 2949 /* Nothing to do if hardware doesn't support EFER. */ 2950 if (!vmx_find_uret_msr(vmx, MSR_EFER)) 2951 return 0; 2952 2953 vcpu->arch.efer = efer; 2954 if (efer & EFER_LMA) 2955 vm_entry_controls_setbit(vmx, VM_ENTRY_IA32E_MODE); 2956 else 2957 vm_entry_controls_clearbit(vmx, VM_ENTRY_IA32E_MODE); 2958 2959 vmx_setup_uret_msrs(vmx); 2960 return 0; 2961 } 2962 2963 #ifdef CONFIG_X86_64 2964 2965 static void enter_lmode(struct kvm_vcpu *vcpu) 2966 { 2967 u32 guest_tr_ar; 2968 2969 vmx_segment_cache_clear(to_vmx(vcpu)); 2970 2971 guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES); 2972 if ((guest_tr_ar & VMX_AR_TYPE_MASK) != VMX_AR_TYPE_BUSY_64_TSS) { 2973 pr_debug_ratelimited("%s: tss fixup for long mode. \n", 2974 __func__); 2975 vmcs_write32(GUEST_TR_AR_BYTES, 2976 (guest_tr_ar & ~VMX_AR_TYPE_MASK) 2977 | VMX_AR_TYPE_BUSY_64_TSS); 2978 } 2979 vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA); 2980 } 2981 2982 static void exit_lmode(struct kvm_vcpu *vcpu) 2983 { 2984 vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA); 2985 } 2986 2987 #endif 2988 2989 static void vmx_flush_tlb_all(struct kvm_vcpu *vcpu) 2990 { 2991 struct vcpu_vmx *vmx = to_vmx(vcpu); 2992 2993 /* 2994 * INVEPT must be issued when EPT is enabled, irrespective of VPID, as 2995 * the CPU is not required to invalidate guest-physical mappings on 2996 * VM-Entry, even if VPID is disabled. Guest-physical mappings are 2997 * associated with the root EPT structure and not any particular VPID 2998 * (INVVPID also isn't required to invalidate guest-physical mappings). 2999 */ 3000 if (enable_ept) { 3001 ept_sync_global(); 3002 } else if (enable_vpid) { 3003 if (cpu_has_vmx_invvpid_global()) { 3004 vpid_sync_vcpu_global(); 3005 } else { 3006 vpid_sync_vcpu_single(vmx->vpid); 3007 vpid_sync_vcpu_single(vmx->nested.vpid02); 3008 } 3009 } 3010 } 3011 3012 static inline int vmx_get_current_vpid(struct kvm_vcpu *vcpu) 3013 { 3014 if (is_guest_mode(vcpu)) 3015 return nested_get_vpid02(vcpu); 3016 return to_vmx(vcpu)->vpid; 3017 } 3018 3019 static void vmx_flush_tlb_current(struct kvm_vcpu *vcpu) 3020 { 3021 struct kvm_mmu *mmu = vcpu->arch.mmu; 3022 u64 root_hpa = mmu->root.hpa; 3023 3024 /* No flush required if the current context is invalid. */ 3025 if (!VALID_PAGE(root_hpa)) 3026 return; 3027 3028 if (enable_ept) 3029 ept_sync_context(construct_eptp(vcpu, root_hpa, 3030 mmu->root_role.level)); 3031 else 3032 vpid_sync_context(vmx_get_current_vpid(vcpu)); 3033 } 3034 3035 static void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr) 3036 { 3037 /* 3038 * vpid_sync_vcpu_addr() is a nop if vpid==0, see the comment in 3039 * vmx_flush_tlb_guest() for an explanation of why this is ok. 3040 */ 3041 vpid_sync_vcpu_addr(vmx_get_current_vpid(vcpu), addr); 3042 } 3043 3044 static void vmx_flush_tlb_guest(struct kvm_vcpu *vcpu) 3045 { 3046 /* 3047 * vpid_sync_context() is a nop if vpid==0, e.g. if enable_vpid==0 or a 3048 * vpid couldn't be allocated for this vCPU. VM-Enter and VM-Exit are 3049 * required to flush GVA->{G,H}PA mappings from the TLB if vpid is 3050 * disabled (VM-Enter with vpid enabled and vpid==0 is disallowed), 3051 * i.e. no explicit INVVPID is necessary. 3052 */ 3053 vpid_sync_context(vmx_get_current_vpid(vcpu)); 3054 } 3055 3056 void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu) 3057 { 3058 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 3059 3060 if (!kvm_register_is_dirty(vcpu, VCPU_EXREG_PDPTR)) 3061 return; 3062 3063 if (is_pae_paging(vcpu)) { 3064 vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]); 3065 vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]); 3066 vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]); 3067 vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]); 3068 } 3069 } 3070 3071 void ept_save_pdptrs(struct kvm_vcpu *vcpu) 3072 { 3073 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 3074 3075 if (WARN_ON_ONCE(!is_pae_paging(vcpu))) 3076 return; 3077 3078 mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0); 3079 mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1); 3080 mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2); 3081 mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3); 3082 3083 kvm_register_mark_available(vcpu, VCPU_EXREG_PDPTR); 3084 } 3085 3086 #define CR3_EXITING_BITS (CPU_BASED_CR3_LOAD_EXITING | \ 3087 CPU_BASED_CR3_STORE_EXITING) 3088 3089 void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 3090 { 3091 struct vcpu_vmx *vmx = to_vmx(vcpu); 3092 unsigned long hw_cr0, old_cr0_pg; 3093 u32 tmp; 3094 3095 old_cr0_pg = kvm_read_cr0_bits(vcpu, X86_CR0_PG); 3096 3097 hw_cr0 = (cr0 & ~KVM_VM_CR0_ALWAYS_OFF); 3098 if (is_unrestricted_guest(vcpu)) 3099 hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST; 3100 else { 3101 hw_cr0 |= KVM_VM_CR0_ALWAYS_ON; 3102 if (!enable_ept) 3103 hw_cr0 |= X86_CR0_WP; 3104 3105 if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE)) 3106 enter_pmode(vcpu); 3107 3108 if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE)) 3109 enter_rmode(vcpu); 3110 } 3111 3112 vmcs_writel(CR0_READ_SHADOW, cr0); 3113 vmcs_writel(GUEST_CR0, hw_cr0); 3114 vcpu->arch.cr0 = cr0; 3115 kvm_register_mark_available(vcpu, VCPU_EXREG_CR0); 3116 3117 #ifdef CONFIG_X86_64 3118 if (vcpu->arch.efer & EFER_LME) { 3119 if (!old_cr0_pg && (cr0 & X86_CR0_PG)) 3120 enter_lmode(vcpu); 3121 else if (old_cr0_pg && !(cr0 & X86_CR0_PG)) 3122 exit_lmode(vcpu); 3123 } 3124 #endif 3125 3126 if (enable_ept && !is_unrestricted_guest(vcpu)) { 3127 /* 3128 * Ensure KVM has an up-to-date snapshot of the guest's CR3. If 3129 * the below code _enables_ CR3 exiting, vmx_cache_reg() will 3130 * (correctly) stop reading vmcs.GUEST_CR3 because it thinks 3131 * KVM's CR3 is installed. 3132 */ 3133 if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3)) 3134 vmx_cache_reg(vcpu, VCPU_EXREG_CR3); 3135 3136 /* 3137 * When running with EPT but not unrestricted guest, KVM must 3138 * intercept CR3 accesses when paging is _disabled_. This is 3139 * necessary because restricted guests can't actually run with 3140 * paging disabled, and so KVM stuffs its own CR3 in order to 3141 * run the guest when identity mapped page tables. 3142 * 3143 * Do _NOT_ check the old CR0.PG, e.g. to optimize away the 3144 * update, it may be stale with respect to CR3 interception, 3145 * e.g. after nested VM-Enter. 3146 * 3147 * Lastly, honor L1's desires, i.e. intercept CR3 loads and/or 3148 * stores to forward them to L1, even if KVM does not need to 3149 * intercept them to preserve its identity mapped page tables. 3150 */ 3151 if (!(cr0 & X86_CR0_PG)) { 3152 exec_controls_setbit(vmx, CR3_EXITING_BITS); 3153 } else if (!is_guest_mode(vcpu)) { 3154 exec_controls_clearbit(vmx, CR3_EXITING_BITS); 3155 } else { 3156 tmp = exec_controls_get(vmx); 3157 tmp &= ~CR3_EXITING_BITS; 3158 tmp |= get_vmcs12(vcpu)->cpu_based_vm_exec_control & CR3_EXITING_BITS; 3159 exec_controls_set(vmx, tmp); 3160 } 3161 3162 /* Note, vmx_set_cr4() consumes the new vcpu->arch.cr0. */ 3163 if ((old_cr0_pg ^ cr0) & X86_CR0_PG) 3164 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu)); 3165 3166 /* 3167 * When !CR0_PG -> CR0_PG, vcpu->arch.cr3 becomes active, but 3168 * GUEST_CR3 is still vmx->ept_identity_map_addr if EPT + !URG. 3169 */ 3170 if (!(old_cr0_pg & X86_CR0_PG) && (cr0 & X86_CR0_PG)) 3171 kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); 3172 } 3173 3174 /* depends on vcpu->arch.cr0 to be set to a new value */ 3175 vmx->emulation_required = vmx_emulation_required(vcpu); 3176 } 3177 3178 static int vmx_get_max_tdp_level(void) 3179 { 3180 if (cpu_has_vmx_ept_5levels()) 3181 return 5; 3182 return 4; 3183 } 3184 3185 u64 construct_eptp(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level) 3186 { 3187 u64 eptp = VMX_EPTP_MT_WB; 3188 3189 eptp |= (root_level == 5) ? VMX_EPTP_PWL_5 : VMX_EPTP_PWL_4; 3190 3191 if (enable_ept_ad_bits && 3192 (!is_guest_mode(vcpu) || nested_ept_ad_enabled(vcpu))) 3193 eptp |= VMX_EPTP_AD_ENABLE_BIT; 3194 eptp |= root_hpa; 3195 3196 return eptp; 3197 } 3198 3199 static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, 3200 int root_level) 3201 { 3202 struct kvm *kvm = vcpu->kvm; 3203 bool update_guest_cr3 = true; 3204 unsigned long guest_cr3; 3205 u64 eptp; 3206 3207 if (enable_ept) { 3208 eptp = construct_eptp(vcpu, root_hpa, root_level); 3209 vmcs_write64(EPT_POINTER, eptp); 3210 3211 hv_track_root_tdp(vcpu, root_hpa); 3212 3213 if (!enable_unrestricted_guest && !is_paging(vcpu)) 3214 guest_cr3 = to_kvm_vmx(kvm)->ept_identity_map_addr; 3215 else if (kvm_register_is_dirty(vcpu, VCPU_EXREG_CR3)) 3216 guest_cr3 = vcpu->arch.cr3; 3217 else /* vmcs.GUEST_CR3 is already up-to-date. */ 3218 update_guest_cr3 = false; 3219 vmx_ept_load_pdptrs(vcpu); 3220 } else { 3221 guest_cr3 = root_hpa | kvm_get_active_pcid(vcpu); 3222 } 3223 3224 if (update_guest_cr3) 3225 vmcs_writel(GUEST_CR3, guest_cr3); 3226 } 3227 3228 3229 static bool vmx_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 3230 { 3231 /* 3232 * We operate under the default treatment of SMM, so VMX cannot be 3233 * enabled under SMM. Note, whether or not VMXE is allowed at all is 3234 * handled by kvm_is_valid_cr4(). 3235 */ 3236 if ((cr4 & X86_CR4_VMXE) && is_smm(vcpu)) 3237 return false; 3238 3239 if (to_vmx(vcpu)->nested.vmxon && !nested_cr4_valid(vcpu, cr4)) 3240 return false; 3241 3242 return true; 3243 } 3244 3245 void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 3246 { 3247 unsigned long old_cr4 = vcpu->arch.cr4; 3248 struct vcpu_vmx *vmx = to_vmx(vcpu); 3249 /* 3250 * Pass through host's Machine Check Enable value to hw_cr4, which 3251 * is in force while we are in guest mode. Do not let guests control 3252 * this bit, even if host CR4.MCE == 0. 3253 */ 3254 unsigned long hw_cr4; 3255 3256 hw_cr4 = (cr4_read_shadow() & X86_CR4_MCE) | (cr4 & ~X86_CR4_MCE); 3257 if (is_unrestricted_guest(vcpu)) 3258 hw_cr4 |= KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST; 3259 else if (vmx->rmode.vm86_active) 3260 hw_cr4 |= KVM_RMODE_VM_CR4_ALWAYS_ON; 3261 else 3262 hw_cr4 |= KVM_PMODE_VM_CR4_ALWAYS_ON; 3263 3264 if (!boot_cpu_has(X86_FEATURE_UMIP) && vmx_umip_emulated()) { 3265 if (cr4 & X86_CR4_UMIP) { 3266 secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_DESC); 3267 hw_cr4 &= ~X86_CR4_UMIP; 3268 } else if (!is_guest_mode(vcpu) || 3269 !nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC)) { 3270 secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_DESC); 3271 } 3272 } 3273 3274 vcpu->arch.cr4 = cr4; 3275 kvm_register_mark_available(vcpu, VCPU_EXREG_CR4); 3276 3277 if (!is_unrestricted_guest(vcpu)) { 3278 if (enable_ept) { 3279 if (!is_paging(vcpu)) { 3280 hw_cr4 &= ~X86_CR4_PAE; 3281 hw_cr4 |= X86_CR4_PSE; 3282 } else if (!(cr4 & X86_CR4_PAE)) { 3283 hw_cr4 &= ~X86_CR4_PAE; 3284 } 3285 } 3286 3287 /* 3288 * SMEP/SMAP/PKU is disabled if CPU is in non-paging mode in 3289 * hardware. To emulate this behavior, SMEP/SMAP/PKU needs 3290 * to be manually disabled when guest switches to non-paging 3291 * mode. 3292 * 3293 * If !enable_unrestricted_guest, the CPU is always running 3294 * with CR0.PG=1 and CR4 needs to be modified. 3295 * If enable_unrestricted_guest, the CPU automatically 3296 * disables SMEP/SMAP/PKU when the guest sets CR0.PG=0. 3297 */ 3298 if (!is_paging(vcpu)) 3299 hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE); 3300 } 3301 3302 vmcs_writel(CR4_READ_SHADOW, cr4); 3303 vmcs_writel(GUEST_CR4, hw_cr4); 3304 3305 if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE)) 3306 kvm_update_cpuid_runtime(vcpu); 3307 } 3308 3309 void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) 3310 { 3311 struct vcpu_vmx *vmx = to_vmx(vcpu); 3312 u32 ar; 3313 3314 if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { 3315 *var = vmx->rmode.segs[seg]; 3316 if (seg == VCPU_SREG_TR 3317 || var->selector == vmx_read_guest_seg_selector(vmx, seg)) 3318 return; 3319 var->base = vmx_read_guest_seg_base(vmx, seg); 3320 var->selector = vmx_read_guest_seg_selector(vmx, seg); 3321 return; 3322 } 3323 var->base = vmx_read_guest_seg_base(vmx, seg); 3324 var->limit = vmx_read_guest_seg_limit(vmx, seg); 3325 var->selector = vmx_read_guest_seg_selector(vmx, seg); 3326 ar = vmx_read_guest_seg_ar(vmx, seg); 3327 var->unusable = (ar >> 16) & 1; 3328 var->type = ar & 15; 3329 var->s = (ar >> 4) & 1; 3330 var->dpl = (ar >> 5) & 3; 3331 /* 3332 * Some userspaces do not preserve unusable property. Since usable 3333 * segment has to be present according to VMX spec we can use present 3334 * property to amend userspace bug by making unusable segment always 3335 * nonpresent. vmx_segment_access_rights() already marks nonpresent 3336 * segment as unusable. 3337 */ 3338 var->present = !var->unusable; 3339 var->avl = (ar >> 12) & 1; 3340 var->l = (ar >> 13) & 1; 3341 var->db = (ar >> 14) & 1; 3342 var->g = (ar >> 15) & 1; 3343 } 3344 3345 static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg) 3346 { 3347 struct kvm_segment s; 3348 3349 if (to_vmx(vcpu)->rmode.vm86_active) { 3350 vmx_get_segment(vcpu, &s, seg); 3351 return s.base; 3352 } 3353 return vmx_read_guest_seg_base(to_vmx(vcpu), seg); 3354 } 3355 3356 int vmx_get_cpl(struct kvm_vcpu *vcpu) 3357 { 3358 struct vcpu_vmx *vmx = to_vmx(vcpu); 3359 3360 if (unlikely(vmx->rmode.vm86_active)) 3361 return 0; 3362 else { 3363 int ar = vmx_read_guest_seg_ar(vmx, VCPU_SREG_SS); 3364 return VMX_AR_DPL(ar); 3365 } 3366 } 3367 3368 static u32 vmx_segment_access_rights(struct kvm_segment *var) 3369 { 3370 u32 ar; 3371 3372 if (var->unusable || !var->present) 3373 ar = 1 << 16; 3374 else { 3375 ar = var->type & 15; 3376 ar |= (var->s & 1) << 4; 3377 ar |= (var->dpl & 3) << 5; 3378 ar |= (var->present & 1) << 7; 3379 ar |= (var->avl & 1) << 12; 3380 ar |= (var->l & 1) << 13; 3381 ar |= (var->db & 1) << 14; 3382 ar |= (var->g & 1) << 15; 3383 } 3384 3385 return ar; 3386 } 3387 3388 void __vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) 3389 { 3390 struct vcpu_vmx *vmx = to_vmx(vcpu); 3391 const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; 3392 3393 vmx_segment_cache_clear(vmx); 3394 3395 if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { 3396 vmx->rmode.segs[seg] = *var; 3397 if (seg == VCPU_SREG_TR) 3398 vmcs_write16(sf->selector, var->selector); 3399 else if (var->s) 3400 fix_rmode_seg(seg, &vmx->rmode.segs[seg]); 3401 return; 3402 } 3403 3404 vmcs_writel(sf->base, var->base); 3405 vmcs_write32(sf->limit, var->limit); 3406 vmcs_write16(sf->selector, var->selector); 3407 3408 /* 3409 * Fix the "Accessed" bit in AR field of segment registers for older 3410 * qemu binaries. 3411 * IA32 arch specifies that at the time of processor reset the 3412 * "Accessed" bit in the AR field of segment registers is 1. And qemu 3413 * is setting it to 0 in the userland code. This causes invalid guest 3414 * state vmexit when "unrestricted guest" mode is turned on. 3415 * Fix for this setup issue in cpu_reset is being pushed in the qemu 3416 * tree. Newer qemu binaries with that qemu fix would not need this 3417 * kvm hack. 3418 */ 3419 if (is_unrestricted_guest(vcpu) && (seg != VCPU_SREG_LDTR)) 3420 var->type |= 0x1; /* Accessed */ 3421 3422 vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var)); 3423 } 3424 3425 static void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) 3426 { 3427 __vmx_set_segment(vcpu, var, seg); 3428 3429 to_vmx(vcpu)->emulation_required = vmx_emulation_required(vcpu); 3430 } 3431 3432 static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) 3433 { 3434 u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS); 3435 3436 *db = (ar >> 14) & 1; 3437 *l = (ar >> 13) & 1; 3438 } 3439 3440 static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) 3441 { 3442 dt->size = vmcs_read32(GUEST_IDTR_LIMIT); 3443 dt->address = vmcs_readl(GUEST_IDTR_BASE); 3444 } 3445 3446 static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) 3447 { 3448 vmcs_write32(GUEST_IDTR_LIMIT, dt->size); 3449 vmcs_writel(GUEST_IDTR_BASE, dt->address); 3450 } 3451 3452 static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) 3453 { 3454 dt->size = vmcs_read32(GUEST_GDTR_LIMIT); 3455 dt->address = vmcs_readl(GUEST_GDTR_BASE); 3456 } 3457 3458 static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) 3459 { 3460 vmcs_write32(GUEST_GDTR_LIMIT, dt->size); 3461 vmcs_writel(GUEST_GDTR_BASE, dt->address); 3462 } 3463 3464 static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg) 3465 { 3466 struct kvm_segment var; 3467 u32 ar; 3468 3469 vmx_get_segment(vcpu, &var, seg); 3470 var.dpl = 0x3; 3471 if (seg == VCPU_SREG_CS) 3472 var.type = 0x3; 3473 ar = vmx_segment_access_rights(&var); 3474 3475 if (var.base != (var.selector << 4)) 3476 return false; 3477 if (var.limit != 0xffff) 3478 return false; 3479 if (ar != 0xf3) 3480 return false; 3481 3482 return true; 3483 } 3484 3485 static bool code_segment_valid(struct kvm_vcpu *vcpu) 3486 { 3487 struct kvm_segment cs; 3488 unsigned int cs_rpl; 3489 3490 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); 3491 cs_rpl = cs.selector & SEGMENT_RPL_MASK; 3492 3493 if (cs.unusable) 3494 return false; 3495 if (~cs.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_ACCESSES_MASK)) 3496 return false; 3497 if (!cs.s) 3498 return false; 3499 if (cs.type & VMX_AR_TYPE_WRITEABLE_MASK) { 3500 if (cs.dpl > cs_rpl) 3501 return false; 3502 } else { 3503 if (cs.dpl != cs_rpl) 3504 return false; 3505 } 3506 if (!cs.present) 3507 return false; 3508 3509 /* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */ 3510 return true; 3511 } 3512 3513 static bool stack_segment_valid(struct kvm_vcpu *vcpu) 3514 { 3515 struct kvm_segment ss; 3516 unsigned int ss_rpl; 3517 3518 vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); 3519 ss_rpl = ss.selector & SEGMENT_RPL_MASK; 3520 3521 if (ss.unusable) 3522 return true; 3523 if (ss.type != 3 && ss.type != 7) 3524 return false; 3525 if (!ss.s) 3526 return false; 3527 if (ss.dpl != ss_rpl) /* DPL != RPL */ 3528 return false; 3529 if (!ss.present) 3530 return false; 3531 3532 return true; 3533 } 3534 3535 static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg) 3536 { 3537 struct kvm_segment var; 3538 unsigned int rpl; 3539 3540 vmx_get_segment(vcpu, &var, seg); 3541 rpl = var.selector & SEGMENT_RPL_MASK; 3542 3543 if (var.unusable) 3544 return true; 3545 if (!var.s) 3546 return false; 3547 if (!var.present) 3548 return false; 3549 if (~var.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_WRITEABLE_MASK)) { 3550 if (var.dpl < rpl) /* DPL < RPL */ 3551 return false; 3552 } 3553 3554 /* TODO: Add other members to kvm_segment_field to allow checking for other access 3555 * rights flags 3556 */ 3557 return true; 3558 } 3559 3560 static bool tr_valid(struct kvm_vcpu *vcpu) 3561 { 3562 struct kvm_segment tr; 3563 3564 vmx_get_segment(vcpu, &tr, VCPU_SREG_TR); 3565 3566 if (tr.unusable) 3567 return false; 3568 if (tr.selector & SEGMENT_TI_MASK) /* TI = 1 */ 3569 return false; 3570 if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */ 3571 return false; 3572 if (!tr.present) 3573 return false; 3574 3575 return true; 3576 } 3577 3578 static bool ldtr_valid(struct kvm_vcpu *vcpu) 3579 { 3580 struct kvm_segment ldtr; 3581 3582 vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR); 3583 3584 if (ldtr.unusable) 3585 return true; 3586 if (ldtr.selector & SEGMENT_TI_MASK) /* TI = 1 */ 3587 return false; 3588 if (ldtr.type != 2) 3589 return false; 3590 if (!ldtr.present) 3591 return false; 3592 3593 return true; 3594 } 3595 3596 static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu) 3597 { 3598 struct kvm_segment cs, ss; 3599 3600 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); 3601 vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); 3602 3603 return ((cs.selector & SEGMENT_RPL_MASK) == 3604 (ss.selector & SEGMENT_RPL_MASK)); 3605 } 3606 3607 /* 3608 * Check if guest state is valid. Returns true if valid, false if 3609 * not. 3610 * We assume that registers are always usable 3611 */ 3612 bool __vmx_guest_state_valid(struct kvm_vcpu *vcpu) 3613 { 3614 /* real mode guest state checks */ 3615 if (!is_protmode(vcpu) || (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) { 3616 if (!rmode_segment_valid(vcpu, VCPU_SREG_CS)) 3617 return false; 3618 if (!rmode_segment_valid(vcpu, VCPU_SREG_SS)) 3619 return false; 3620 if (!rmode_segment_valid(vcpu, VCPU_SREG_DS)) 3621 return false; 3622 if (!rmode_segment_valid(vcpu, VCPU_SREG_ES)) 3623 return false; 3624 if (!rmode_segment_valid(vcpu, VCPU_SREG_FS)) 3625 return false; 3626 if (!rmode_segment_valid(vcpu, VCPU_SREG_GS)) 3627 return false; 3628 } else { 3629 /* protected mode guest state checks */ 3630 if (!cs_ss_rpl_check(vcpu)) 3631 return false; 3632 if (!code_segment_valid(vcpu)) 3633 return false; 3634 if (!stack_segment_valid(vcpu)) 3635 return false; 3636 if (!data_segment_valid(vcpu, VCPU_SREG_DS)) 3637 return false; 3638 if (!data_segment_valid(vcpu, VCPU_SREG_ES)) 3639 return false; 3640 if (!data_segment_valid(vcpu, VCPU_SREG_FS)) 3641 return false; 3642 if (!data_segment_valid(vcpu, VCPU_SREG_GS)) 3643 return false; 3644 if (!tr_valid(vcpu)) 3645 return false; 3646 if (!ldtr_valid(vcpu)) 3647 return false; 3648 } 3649 /* TODO: 3650 * - Add checks on RIP 3651 * - Add checks on RFLAGS 3652 */ 3653 3654 return true; 3655 } 3656 3657 static int init_rmode_tss(struct kvm *kvm, void __user *ua) 3658 { 3659 const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0))); 3660 u16 data; 3661 int i; 3662 3663 for (i = 0; i < 3; i++) { 3664 if (__copy_to_user(ua + PAGE_SIZE * i, zero_page, PAGE_SIZE)) 3665 return -EFAULT; 3666 } 3667 3668 data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE; 3669 if (__copy_to_user(ua + TSS_IOPB_BASE_OFFSET, &data, sizeof(u16))) 3670 return -EFAULT; 3671 3672 data = ~0; 3673 if (__copy_to_user(ua + RMODE_TSS_SIZE - 1, &data, sizeof(u8))) 3674 return -EFAULT; 3675 3676 return 0; 3677 } 3678 3679 static int init_rmode_identity_map(struct kvm *kvm) 3680 { 3681 struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm); 3682 int i, r = 0; 3683 void __user *uaddr; 3684 u32 tmp; 3685 3686 /* Protect kvm_vmx->ept_identity_pagetable_done. */ 3687 mutex_lock(&kvm->slots_lock); 3688 3689 if (likely(kvm_vmx->ept_identity_pagetable_done)) 3690 goto out; 3691 3692 if (!kvm_vmx->ept_identity_map_addr) 3693 kvm_vmx->ept_identity_map_addr = VMX_EPT_IDENTITY_PAGETABLE_ADDR; 3694 3695 uaddr = __x86_set_memory_region(kvm, 3696 IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 3697 kvm_vmx->ept_identity_map_addr, 3698 PAGE_SIZE); 3699 if (IS_ERR(uaddr)) { 3700 r = PTR_ERR(uaddr); 3701 goto out; 3702 } 3703 3704 /* Set up identity-mapping pagetable for EPT in real mode */ 3705 for (i = 0; i < PT32_ENT_PER_PAGE; i++) { 3706 tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | 3707 _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE); 3708 if (__copy_to_user(uaddr + i * sizeof(tmp), &tmp, sizeof(tmp))) { 3709 r = -EFAULT; 3710 goto out; 3711 } 3712 } 3713 kvm_vmx->ept_identity_pagetable_done = true; 3714 3715 out: 3716 mutex_unlock(&kvm->slots_lock); 3717 return r; 3718 } 3719 3720 static void seg_setup(int seg) 3721 { 3722 const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; 3723 unsigned int ar; 3724 3725 vmcs_write16(sf->selector, 0); 3726 vmcs_writel(sf->base, 0); 3727 vmcs_write32(sf->limit, 0xffff); 3728 ar = 0x93; 3729 if (seg == VCPU_SREG_CS) 3730 ar |= 0x08; /* code segment */ 3731 3732 vmcs_write32(sf->ar_bytes, ar); 3733 } 3734 3735 static int alloc_apic_access_page(struct kvm *kvm) 3736 { 3737 struct page *page; 3738 void __user *hva; 3739 int ret = 0; 3740 3741 mutex_lock(&kvm->slots_lock); 3742 if (kvm->arch.apic_access_memslot_enabled) 3743 goto out; 3744 hva = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 3745 APIC_DEFAULT_PHYS_BASE, PAGE_SIZE); 3746 if (IS_ERR(hva)) { 3747 ret = PTR_ERR(hva); 3748 goto out; 3749 } 3750 3751 page = gfn_to_page(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT); 3752 if (is_error_page(page)) { 3753 ret = -EFAULT; 3754 goto out; 3755 } 3756 3757 /* 3758 * Do not pin the page in memory, so that memory hot-unplug 3759 * is able to migrate it. 3760 */ 3761 put_page(page); 3762 kvm->arch.apic_access_memslot_enabled = true; 3763 out: 3764 mutex_unlock(&kvm->slots_lock); 3765 return ret; 3766 } 3767 3768 int allocate_vpid(void) 3769 { 3770 int vpid; 3771 3772 if (!enable_vpid) 3773 return 0; 3774 spin_lock(&vmx_vpid_lock); 3775 vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS); 3776 if (vpid < VMX_NR_VPIDS) 3777 __set_bit(vpid, vmx_vpid_bitmap); 3778 else 3779 vpid = 0; 3780 spin_unlock(&vmx_vpid_lock); 3781 return vpid; 3782 } 3783 3784 void free_vpid(int vpid) 3785 { 3786 if (!enable_vpid || vpid == 0) 3787 return; 3788 spin_lock(&vmx_vpid_lock); 3789 __clear_bit(vpid, vmx_vpid_bitmap); 3790 spin_unlock(&vmx_vpid_lock); 3791 } 3792 3793 static void vmx_msr_bitmap_l01_changed(struct vcpu_vmx *vmx) 3794 { 3795 /* 3796 * When KVM is a nested hypervisor on top of Hyper-V and uses 3797 * 'Enlightened MSR Bitmap' feature L0 needs to know that MSR 3798 * bitmap has changed. 3799 */ 3800 if (static_branch_unlikely(&enable_evmcs)) 3801 evmcs_touch_msr_bitmap(); 3802 3803 vmx->nested.force_msr_bitmap_recalc = true; 3804 } 3805 3806 void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type) 3807 { 3808 struct vcpu_vmx *vmx = to_vmx(vcpu); 3809 unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap; 3810 3811 if (!cpu_has_vmx_msr_bitmap()) 3812 return; 3813 3814 vmx_msr_bitmap_l01_changed(vmx); 3815 3816 /* 3817 * Mark the desired intercept state in shadow bitmap, this is needed 3818 * for resync when the MSR filters change. 3819 */ 3820 if (is_valid_passthrough_msr(msr)) { 3821 int idx = possible_passthrough_msr_slot(msr); 3822 3823 if (idx != -ENOENT) { 3824 if (type & MSR_TYPE_R) 3825 clear_bit(idx, vmx->shadow_msr_intercept.read); 3826 if (type & MSR_TYPE_W) 3827 clear_bit(idx, vmx->shadow_msr_intercept.write); 3828 } 3829 } 3830 3831 if ((type & MSR_TYPE_R) && 3832 !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ)) { 3833 vmx_set_msr_bitmap_read(msr_bitmap, msr); 3834 type &= ~MSR_TYPE_R; 3835 } 3836 3837 if ((type & MSR_TYPE_W) && 3838 !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE)) { 3839 vmx_set_msr_bitmap_write(msr_bitmap, msr); 3840 type &= ~MSR_TYPE_W; 3841 } 3842 3843 if (type & MSR_TYPE_R) 3844 vmx_clear_msr_bitmap_read(msr_bitmap, msr); 3845 3846 if (type & MSR_TYPE_W) 3847 vmx_clear_msr_bitmap_write(msr_bitmap, msr); 3848 } 3849 3850 void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type) 3851 { 3852 struct vcpu_vmx *vmx = to_vmx(vcpu); 3853 unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap; 3854 3855 if (!cpu_has_vmx_msr_bitmap()) 3856 return; 3857 3858 vmx_msr_bitmap_l01_changed(vmx); 3859 3860 /* 3861 * Mark the desired intercept state in shadow bitmap, this is needed 3862 * for resync when the MSR filter changes. 3863 */ 3864 if (is_valid_passthrough_msr(msr)) { 3865 int idx = possible_passthrough_msr_slot(msr); 3866 3867 if (idx != -ENOENT) { 3868 if (type & MSR_TYPE_R) 3869 set_bit(idx, vmx->shadow_msr_intercept.read); 3870 if (type & MSR_TYPE_W) 3871 set_bit(idx, vmx->shadow_msr_intercept.write); 3872 } 3873 } 3874 3875 if (type & MSR_TYPE_R) 3876 vmx_set_msr_bitmap_read(msr_bitmap, msr); 3877 3878 if (type & MSR_TYPE_W) 3879 vmx_set_msr_bitmap_write(msr_bitmap, msr); 3880 } 3881 3882 static void vmx_reset_x2apic_msrs(struct kvm_vcpu *vcpu, u8 mode) 3883 { 3884 unsigned long *msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap; 3885 unsigned long read_intercept; 3886 int msr; 3887 3888 read_intercept = (mode & MSR_BITMAP_MODE_X2APIC_APICV) ? 0 : ~0; 3889 3890 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { 3891 unsigned int read_idx = msr / BITS_PER_LONG; 3892 unsigned int write_idx = read_idx + (0x800 / sizeof(long)); 3893 3894 msr_bitmap[read_idx] = read_intercept; 3895 msr_bitmap[write_idx] = ~0ul; 3896 } 3897 } 3898 3899 static void vmx_update_msr_bitmap_x2apic(struct kvm_vcpu *vcpu) 3900 { 3901 struct vcpu_vmx *vmx = to_vmx(vcpu); 3902 u8 mode; 3903 3904 if (!cpu_has_vmx_msr_bitmap()) 3905 return; 3906 3907 if (cpu_has_secondary_exec_ctrls() && 3908 (secondary_exec_controls_get(vmx) & 3909 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) { 3910 mode = MSR_BITMAP_MODE_X2APIC; 3911 if (enable_apicv && kvm_vcpu_apicv_active(vcpu)) 3912 mode |= MSR_BITMAP_MODE_X2APIC_APICV; 3913 } else { 3914 mode = 0; 3915 } 3916 3917 if (mode == vmx->x2apic_msr_bitmap_mode) 3918 return; 3919 3920 vmx->x2apic_msr_bitmap_mode = mode; 3921 3922 vmx_reset_x2apic_msrs(vcpu, mode); 3923 3924 /* 3925 * TPR reads and writes can be virtualized even if virtual interrupt 3926 * delivery is not in use. 3927 */ 3928 vmx_set_intercept_for_msr(vcpu, X2APIC_MSR(APIC_TASKPRI), MSR_TYPE_RW, 3929 !(mode & MSR_BITMAP_MODE_X2APIC)); 3930 3931 if (mode & MSR_BITMAP_MODE_X2APIC_APICV) { 3932 vmx_enable_intercept_for_msr(vcpu, X2APIC_MSR(APIC_TMCCT), MSR_TYPE_RW); 3933 vmx_disable_intercept_for_msr(vcpu, X2APIC_MSR(APIC_EOI), MSR_TYPE_W); 3934 vmx_disable_intercept_for_msr(vcpu, X2APIC_MSR(APIC_SELF_IPI), MSR_TYPE_W); 3935 } 3936 } 3937 3938 void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu) 3939 { 3940 struct vcpu_vmx *vmx = to_vmx(vcpu); 3941 bool flag = !(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN); 3942 u32 i; 3943 3944 vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_STATUS, MSR_TYPE_RW, flag); 3945 vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_OUTPUT_BASE, MSR_TYPE_RW, flag); 3946 vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_OUTPUT_MASK, MSR_TYPE_RW, flag); 3947 vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_CR3_MATCH, MSR_TYPE_RW, flag); 3948 for (i = 0; i < vmx->pt_desc.num_address_ranges; i++) { 3949 vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_ADDR0_A + i * 2, MSR_TYPE_RW, flag); 3950 vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_ADDR0_B + i * 2, MSR_TYPE_RW, flag); 3951 } 3952 } 3953 3954 static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu) 3955 { 3956 struct vcpu_vmx *vmx = to_vmx(vcpu); 3957 void *vapic_page; 3958 u32 vppr; 3959 int rvi; 3960 3961 if (WARN_ON_ONCE(!is_guest_mode(vcpu)) || 3962 !nested_cpu_has_vid(get_vmcs12(vcpu)) || 3963 WARN_ON_ONCE(!vmx->nested.virtual_apic_map.gfn)) 3964 return false; 3965 3966 rvi = vmx_get_rvi(); 3967 3968 vapic_page = vmx->nested.virtual_apic_map.hva; 3969 vppr = *((u32 *)(vapic_page + APIC_PROCPRI)); 3970 3971 return ((rvi & 0xf0) > (vppr & 0xf0)); 3972 } 3973 3974 static void vmx_msr_filter_changed(struct kvm_vcpu *vcpu) 3975 { 3976 struct vcpu_vmx *vmx = to_vmx(vcpu); 3977 u32 i; 3978 3979 /* 3980 * Set intercept permissions for all potentially passed through MSRs 3981 * again. They will automatically get filtered through the MSR filter, 3982 * so we are back in sync after this. 3983 */ 3984 for (i = 0; i < ARRAY_SIZE(vmx_possible_passthrough_msrs); i++) { 3985 u32 msr = vmx_possible_passthrough_msrs[i]; 3986 bool read = test_bit(i, vmx->shadow_msr_intercept.read); 3987 bool write = test_bit(i, vmx->shadow_msr_intercept.write); 3988 3989 vmx_set_intercept_for_msr(vcpu, msr, MSR_TYPE_R, read); 3990 vmx_set_intercept_for_msr(vcpu, msr, MSR_TYPE_W, write); 3991 } 3992 3993 pt_update_intercept_for_msr(vcpu); 3994 } 3995 3996 static inline void kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu, 3997 int pi_vec) 3998 { 3999 #ifdef CONFIG_SMP 4000 if (vcpu->mode == IN_GUEST_MODE) { 4001 /* 4002 * The vector of the virtual has already been set in the PIR. 4003 * Send a notification event to deliver the virtual interrupt 4004 * unless the vCPU is the currently running vCPU, i.e. the 4005 * event is being sent from a fastpath VM-Exit handler, in 4006 * which case the PIR will be synced to the vIRR before 4007 * re-entering the guest. 4008 * 4009 * When the target is not the running vCPU, the following 4010 * possibilities emerge: 4011 * 4012 * Case 1: vCPU stays in non-root mode. Sending a notification 4013 * event posts the interrupt to the vCPU. 4014 * 4015 * Case 2: vCPU exits to root mode and is still runnable. The 4016 * PIR will be synced to the vIRR before re-entering the guest. 4017 * Sending a notification event is ok as the host IRQ handler 4018 * will ignore the spurious event. 4019 * 4020 * Case 3: vCPU exits to root mode and is blocked. vcpu_block() 4021 * has already synced PIR to vIRR and never blocks the vCPU if 4022 * the vIRR is not empty. Therefore, a blocked vCPU here does 4023 * not wait for any requested interrupts in PIR, and sending a 4024 * notification event also results in a benign, spurious event. 4025 */ 4026 4027 if (vcpu != kvm_get_running_vcpu()) 4028 apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec); 4029 return; 4030 } 4031 #endif 4032 /* 4033 * The vCPU isn't in the guest; wake the vCPU in case it is blocking, 4034 * otherwise do nothing as KVM will grab the highest priority pending 4035 * IRQ via ->sync_pir_to_irr() in vcpu_enter_guest(). 4036 */ 4037 kvm_vcpu_wake_up(vcpu); 4038 } 4039 4040 static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu, 4041 int vector) 4042 { 4043 struct vcpu_vmx *vmx = to_vmx(vcpu); 4044 4045 if (is_guest_mode(vcpu) && 4046 vector == vmx->nested.posted_intr_nv) { 4047 /* 4048 * If a posted intr is not recognized by hardware, 4049 * we will accomplish it in the next vmentry. 4050 */ 4051 vmx->nested.pi_pending = true; 4052 kvm_make_request(KVM_REQ_EVENT, vcpu); 4053 4054 /* 4055 * This pairs with the smp_mb_*() after setting vcpu->mode in 4056 * vcpu_enter_guest() to guarantee the vCPU sees the event 4057 * request if triggering a posted interrupt "fails" because 4058 * vcpu->mode != IN_GUEST_MODE. The extra barrier is needed as 4059 * the smb_wmb() in kvm_make_request() only ensures everything 4060 * done before making the request is visible when the request 4061 * is visible, it doesn't ensure ordering between the store to 4062 * vcpu->requests and the load from vcpu->mode. 4063 */ 4064 smp_mb__after_atomic(); 4065 4066 /* the PIR and ON have been set by L1. */ 4067 kvm_vcpu_trigger_posted_interrupt(vcpu, POSTED_INTR_NESTED_VECTOR); 4068 return 0; 4069 } 4070 return -1; 4071 } 4072 /* 4073 * Send interrupt to vcpu via posted interrupt way. 4074 * 1. If target vcpu is running(non-root mode), send posted interrupt 4075 * notification to vcpu and hardware will sync PIR to vIRR atomically. 4076 * 2. If target vcpu isn't running(root mode), kick it to pick up the 4077 * interrupt from PIR in next vmentry. 4078 */ 4079 static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector) 4080 { 4081 struct vcpu_vmx *vmx = to_vmx(vcpu); 4082 int r; 4083 4084 r = vmx_deliver_nested_posted_interrupt(vcpu, vector); 4085 if (!r) 4086 return 0; 4087 4088 if (!vcpu->arch.apicv_active) 4089 return -1; 4090 4091 if (pi_test_and_set_pir(vector, &vmx->pi_desc)) 4092 return 0; 4093 4094 /* If a previous notification has sent the IPI, nothing to do. */ 4095 if (pi_test_and_set_on(&vmx->pi_desc)) 4096 return 0; 4097 4098 /* 4099 * The implied barrier in pi_test_and_set_on() pairs with the smp_mb_*() 4100 * after setting vcpu->mode in vcpu_enter_guest(), thus the vCPU is 4101 * guaranteed to see PID.ON=1 and sync the PIR to IRR if triggering a 4102 * posted interrupt "fails" because vcpu->mode != IN_GUEST_MODE. 4103 */ 4104 kvm_vcpu_trigger_posted_interrupt(vcpu, POSTED_INTR_VECTOR); 4105 return 0; 4106 } 4107 4108 static void vmx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode, 4109 int trig_mode, int vector) 4110 { 4111 struct kvm_vcpu *vcpu = apic->vcpu; 4112 4113 if (vmx_deliver_posted_interrupt(vcpu, vector)) { 4114 kvm_lapic_set_irr(vector, apic); 4115 kvm_make_request(KVM_REQ_EVENT, vcpu); 4116 kvm_vcpu_kick(vcpu); 4117 } else { 4118 trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode, 4119 trig_mode, vector); 4120 } 4121 } 4122 4123 /* 4124 * Set up the vmcs's constant host-state fields, i.e., host-state fields that 4125 * will not change in the lifetime of the guest. 4126 * Note that host-state that does change is set elsewhere. E.g., host-state 4127 * that is set differently for each CPU is set in vmx_vcpu_load(), not here. 4128 */ 4129 void vmx_set_constant_host_state(struct vcpu_vmx *vmx) 4130 { 4131 u32 low32, high32; 4132 unsigned long tmpl; 4133 unsigned long cr0, cr3, cr4; 4134 4135 cr0 = read_cr0(); 4136 WARN_ON(cr0 & X86_CR0_TS); 4137 vmcs_writel(HOST_CR0, cr0); /* 22.2.3 */ 4138 4139 /* 4140 * Save the most likely value for this task's CR3 in the VMCS. 4141 * We can't use __get_current_cr3_fast() because we're not atomic. 4142 */ 4143 cr3 = __read_cr3(); 4144 vmcs_writel(HOST_CR3, cr3); /* 22.2.3 FIXME: shadow tables */ 4145 vmx->loaded_vmcs->host_state.cr3 = cr3; 4146 4147 /* Save the most likely value for this task's CR4 in the VMCS. */ 4148 cr4 = cr4_read_shadow(); 4149 vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */ 4150 vmx->loaded_vmcs->host_state.cr4 = cr4; 4151 4152 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ 4153 #ifdef CONFIG_X86_64 4154 /* 4155 * Load null selectors, so we can avoid reloading them in 4156 * vmx_prepare_switch_to_host(), in case userspace uses 4157 * the null selectors too (the expected case). 4158 */ 4159 vmcs_write16(HOST_DS_SELECTOR, 0); 4160 vmcs_write16(HOST_ES_SELECTOR, 0); 4161 #else 4162 vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ 4163 vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */ 4164 #endif 4165 vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ 4166 vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */ 4167 4168 vmcs_writel(HOST_IDTR_BASE, host_idt_base); /* 22.2.4 */ 4169 4170 vmcs_writel(HOST_RIP, (unsigned long)vmx_vmexit); /* 22.2.5 */ 4171 4172 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32); 4173 vmcs_write32(HOST_IA32_SYSENTER_CS, low32); 4174 4175 /* 4176 * SYSENTER is used for 32-bit system calls on either 32-bit or 4177 * 64-bit kernels. It is always zero If neither is allowed, otherwise 4178 * vmx_vcpu_load_vmcs loads it with the per-CPU entry stack (and may 4179 * have already done so!). 4180 */ 4181 if (!IS_ENABLED(CONFIG_IA32_EMULATION) && !IS_ENABLED(CONFIG_X86_32)) 4182 vmcs_writel(HOST_IA32_SYSENTER_ESP, 0); 4183 4184 rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl); 4185 vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl); /* 22.2.3 */ 4186 4187 if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) { 4188 rdmsr(MSR_IA32_CR_PAT, low32, high32); 4189 vmcs_write64(HOST_IA32_PAT, low32 | ((u64) high32 << 32)); 4190 } 4191 4192 if (cpu_has_load_ia32_efer()) 4193 vmcs_write64(HOST_IA32_EFER, host_efer); 4194 } 4195 4196 void set_cr4_guest_host_mask(struct vcpu_vmx *vmx) 4197 { 4198 struct kvm_vcpu *vcpu = &vmx->vcpu; 4199 4200 vcpu->arch.cr4_guest_owned_bits = KVM_POSSIBLE_CR4_GUEST_BITS & 4201 ~vcpu->arch.cr4_guest_rsvd_bits; 4202 if (!enable_ept) { 4203 vcpu->arch.cr4_guest_owned_bits &= ~X86_CR4_TLBFLUSH_BITS; 4204 vcpu->arch.cr4_guest_owned_bits &= ~X86_CR4_PDPTR_BITS; 4205 } 4206 if (is_guest_mode(&vmx->vcpu)) 4207 vcpu->arch.cr4_guest_owned_bits &= 4208 ~get_vmcs12(vcpu)->cr4_guest_host_mask; 4209 vmcs_writel(CR4_GUEST_HOST_MASK, ~vcpu->arch.cr4_guest_owned_bits); 4210 } 4211 4212 static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx) 4213 { 4214 u32 pin_based_exec_ctrl = vmcs_config.pin_based_exec_ctrl; 4215 4216 if (!kvm_vcpu_apicv_active(&vmx->vcpu)) 4217 pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR; 4218 4219 if (!enable_vnmi) 4220 pin_based_exec_ctrl &= ~PIN_BASED_VIRTUAL_NMIS; 4221 4222 if (!enable_preemption_timer) 4223 pin_based_exec_ctrl &= ~PIN_BASED_VMX_PREEMPTION_TIMER; 4224 4225 return pin_based_exec_ctrl; 4226 } 4227 4228 static u32 vmx_vmentry_ctrl(void) 4229 { 4230 u32 vmentry_ctrl = vmcs_config.vmentry_ctrl; 4231 4232 if (vmx_pt_mode_is_system()) 4233 vmentry_ctrl &= ~(VM_ENTRY_PT_CONCEAL_PIP | 4234 VM_ENTRY_LOAD_IA32_RTIT_CTL); 4235 /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */ 4236 return vmentry_ctrl & 4237 ~(VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VM_ENTRY_LOAD_IA32_EFER); 4238 } 4239 4240 static u32 vmx_vmexit_ctrl(void) 4241 { 4242 u32 vmexit_ctrl = vmcs_config.vmexit_ctrl; 4243 4244 if (vmx_pt_mode_is_system()) 4245 vmexit_ctrl &= ~(VM_EXIT_PT_CONCEAL_PIP | 4246 VM_EXIT_CLEAR_IA32_RTIT_CTL); 4247 /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */ 4248 return vmexit_ctrl & 4249 ~(VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VM_EXIT_LOAD_IA32_EFER); 4250 } 4251 4252 static void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu) 4253 { 4254 struct vcpu_vmx *vmx = to_vmx(vcpu); 4255 4256 if (is_guest_mode(vcpu)) { 4257 vmx->nested.update_vmcs01_apicv_status = true; 4258 return; 4259 } 4260 4261 pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx)); 4262 if (cpu_has_secondary_exec_ctrls()) { 4263 if (kvm_vcpu_apicv_active(vcpu)) 4264 secondary_exec_controls_setbit(vmx, 4265 SECONDARY_EXEC_APIC_REGISTER_VIRT | 4266 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); 4267 else 4268 secondary_exec_controls_clearbit(vmx, 4269 SECONDARY_EXEC_APIC_REGISTER_VIRT | 4270 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); 4271 } 4272 4273 vmx_update_msr_bitmap_x2apic(vcpu); 4274 } 4275 4276 static u32 vmx_exec_control(struct vcpu_vmx *vmx) 4277 { 4278 u32 exec_control = vmcs_config.cpu_based_exec_ctrl; 4279 4280 if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT) 4281 exec_control &= ~CPU_BASED_MOV_DR_EXITING; 4282 4283 if (!cpu_need_tpr_shadow(&vmx->vcpu)) { 4284 exec_control &= ~CPU_BASED_TPR_SHADOW; 4285 #ifdef CONFIG_X86_64 4286 exec_control |= CPU_BASED_CR8_STORE_EXITING | 4287 CPU_BASED_CR8_LOAD_EXITING; 4288 #endif 4289 } 4290 if (!enable_ept) 4291 exec_control |= CPU_BASED_CR3_STORE_EXITING | 4292 CPU_BASED_CR3_LOAD_EXITING | 4293 CPU_BASED_INVLPG_EXITING; 4294 if (kvm_mwait_in_guest(vmx->vcpu.kvm)) 4295 exec_control &= ~(CPU_BASED_MWAIT_EXITING | 4296 CPU_BASED_MONITOR_EXITING); 4297 if (kvm_hlt_in_guest(vmx->vcpu.kvm)) 4298 exec_control &= ~CPU_BASED_HLT_EXITING; 4299 return exec_control; 4300 } 4301 4302 /* 4303 * Adjust a single secondary execution control bit to intercept/allow an 4304 * instruction in the guest. This is usually done based on whether or not a 4305 * feature has been exposed to the guest in order to correctly emulate faults. 4306 */ 4307 static inline void 4308 vmx_adjust_secondary_exec_control(struct vcpu_vmx *vmx, u32 *exec_control, 4309 u32 control, bool enabled, bool exiting) 4310 { 4311 /* 4312 * If the control is for an opt-in feature, clear the control if the 4313 * feature is not exposed to the guest, i.e. not enabled. If the 4314 * control is opt-out, i.e. an exiting control, clear the control if 4315 * the feature _is_ exposed to the guest, i.e. exiting/interception is 4316 * disabled for the associated instruction. Note, the caller is 4317 * responsible presetting exec_control to set all supported bits. 4318 */ 4319 if (enabled == exiting) 4320 *exec_control &= ~control; 4321 4322 /* 4323 * Update the nested MSR settings so that a nested VMM can/can't set 4324 * controls for features that are/aren't exposed to the guest. 4325 */ 4326 if (nested) { 4327 if (enabled) 4328 vmx->nested.msrs.secondary_ctls_high |= control; 4329 else 4330 vmx->nested.msrs.secondary_ctls_high &= ~control; 4331 } 4332 } 4333 4334 /* 4335 * Wrapper macro for the common case of adjusting a secondary execution control 4336 * based on a single guest CPUID bit, with a dedicated feature bit. This also 4337 * verifies that the control is actually supported by KVM and hardware. 4338 */ 4339 #define vmx_adjust_sec_exec_control(vmx, exec_control, name, feat_name, ctrl_name, exiting) \ 4340 ({ \ 4341 bool __enabled; \ 4342 \ 4343 if (cpu_has_vmx_##name()) { \ 4344 __enabled = guest_cpuid_has(&(vmx)->vcpu, \ 4345 X86_FEATURE_##feat_name); \ 4346 vmx_adjust_secondary_exec_control(vmx, exec_control, \ 4347 SECONDARY_EXEC_##ctrl_name, __enabled, exiting); \ 4348 } \ 4349 }) 4350 4351 /* More macro magic for ENABLE_/opt-in versus _EXITING/opt-out controls. */ 4352 #define vmx_adjust_sec_exec_feature(vmx, exec_control, lname, uname) \ 4353 vmx_adjust_sec_exec_control(vmx, exec_control, lname, uname, ENABLE_##uname, false) 4354 4355 #define vmx_adjust_sec_exec_exiting(vmx, exec_control, lname, uname) \ 4356 vmx_adjust_sec_exec_control(vmx, exec_control, lname, uname, uname##_EXITING, true) 4357 4358 static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx) 4359 { 4360 struct kvm_vcpu *vcpu = &vmx->vcpu; 4361 4362 u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl; 4363 4364 if (vmx_pt_mode_is_system()) 4365 exec_control &= ~(SECONDARY_EXEC_PT_USE_GPA | SECONDARY_EXEC_PT_CONCEAL_VMX); 4366 if (!cpu_need_virtualize_apic_accesses(vcpu)) 4367 exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; 4368 if (vmx->vpid == 0) 4369 exec_control &= ~SECONDARY_EXEC_ENABLE_VPID; 4370 if (!enable_ept) { 4371 exec_control &= ~SECONDARY_EXEC_ENABLE_EPT; 4372 enable_unrestricted_guest = 0; 4373 } 4374 if (!enable_unrestricted_guest) 4375 exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST; 4376 if (kvm_pause_in_guest(vmx->vcpu.kvm)) 4377 exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING; 4378 if (!kvm_vcpu_apicv_active(vcpu)) 4379 exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT | 4380 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); 4381 exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; 4382 4383 /* SECONDARY_EXEC_DESC is enabled/disabled on writes to CR4.UMIP, 4384 * in vmx_set_cr4. */ 4385 exec_control &= ~SECONDARY_EXEC_DESC; 4386 4387 /* SECONDARY_EXEC_SHADOW_VMCS is enabled when L1 executes VMPTRLD 4388 (handle_vmptrld). 4389 We can NOT enable shadow_vmcs here because we don't have yet 4390 a current VMCS12 4391 */ 4392 exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS; 4393 4394 /* 4395 * PML is enabled/disabled when dirty logging of memsmlots changes, but 4396 * it needs to be set here when dirty logging is already active, e.g. 4397 * if this vCPU was created after dirty logging was enabled. 4398 */ 4399 if (!vcpu->kvm->arch.cpu_dirty_logging_count) 4400 exec_control &= ~SECONDARY_EXEC_ENABLE_PML; 4401 4402 if (cpu_has_vmx_xsaves()) { 4403 /* Exposing XSAVES only when XSAVE is exposed */ 4404 bool xsaves_enabled = 4405 boot_cpu_has(X86_FEATURE_XSAVE) && 4406 guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && 4407 guest_cpuid_has(vcpu, X86_FEATURE_XSAVES); 4408 4409 vcpu->arch.xsaves_enabled = xsaves_enabled; 4410 4411 vmx_adjust_secondary_exec_control(vmx, &exec_control, 4412 SECONDARY_EXEC_XSAVES, 4413 xsaves_enabled, false); 4414 } 4415 4416 /* 4417 * RDPID is also gated by ENABLE_RDTSCP, turn on the control if either 4418 * feature is exposed to the guest. This creates a virtualization hole 4419 * if both are supported in hardware but only one is exposed to the 4420 * guest, but letting the guest execute RDTSCP or RDPID when either one 4421 * is advertised is preferable to emulating the advertised instruction 4422 * in KVM on #UD, and obviously better than incorrectly injecting #UD. 4423 */ 4424 if (cpu_has_vmx_rdtscp()) { 4425 bool rdpid_or_rdtscp_enabled = 4426 guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) || 4427 guest_cpuid_has(vcpu, X86_FEATURE_RDPID); 4428 4429 vmx_adjust_secondary_exec_control(vmx, &exec_control, 4430 SECONDARY_EXEC_ENABLE_RDTSCP, 4431 rdpid_or_rdtscp_enabled, false); 4432 } 4433 vmx_adjust_sec_exec_feature(vmx, &exec_control, invpcid, INVPCID); 4434 4435 vmx_adjust_sec_exec_exiting(vmx, &exec_control, rdrand, RDRAND); 4436 vmx_adjust_sec_exec_exiting(vmx, &exec_control, rdseed, RDSEED); 4437 4438 vmx_adjust_sec_exec_control(vmx, &exec_control, waitpkg, WAITPKG, 4439 ENABLE_USR_WAIT_PAUSE, false); 4440 4441 if (!vcpu->kvm->arch.bus_lock_detection_enabled) 4442 exec_control &= ~SECONDARY_EXEC_BUS_LOCK_DETECTION; 4443 4444 return exec_control; 4445 } 4446 4447 #define VMX_XSS_EXIT_BITMAP 0 4448 4449 static void init_vmcs(struct vcpu_vmx *vmx) 4450 { 4451 if (nested) 4452 nested_vmx_set_vmcs_shadowing_bitmap(); 4453 4454 if (cpu_has_vmx_msr_bitmap()) 4455 vmcs_write64(MSR_BITMAP, __pa(vmx->vmcs01.msr_bitmap)); 4456 4457 vmcs_write64(VMCS_LINK_POINTER, INVALID_GPA); /* 22.3.1.5 */ 4458 4459 /* Control */ 4460 pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx)); 4461 4462 exec_controls_set(vmx, vmx_exec_control(vmx)); 4463 4464 if (cpu_has_secondary_exec_ctrls()) 4465 secondary_exec_controls_set(vmx, vmx_secondary_exec_control(vmx)); 4466 4467 if (enable_apicv && lapic_in_kernel(&vmx->vcpu)) { 4468 vmcs_write64(EOI_EXIT_BITMAP0, 0); 4469 vmcs_write64(EOI_EXIT_BITMAP1, 0); 4470 vmcs_write64(EOI_EXIT_BITMAP2, 0); 4471 vmcs_write64(EOI_EXIT_BITMAP3, 0); 4472 4473 vmcs_write16(GUEST_INTR_STATUS, 0); 4474 4475 vmcs_write16(POSTED_INTR_NV, POSTED_INTR_VECTOR); 4476 vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->pi_desc))); 4477 } 4478 4479 if (!kvm_pause_in_guest(vmx->vcpu.kvm)) { 4480 vmcs_write32(PLE_GAP, ple_gap); 4481 vmx->ple_window = ple_window; 4482 vmx->ple_window_dirty = true; 4483 } 4484 4485 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0); 4486 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0); 4487 vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */ 4488 4489 vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */ 4490 vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */ 4491 vmx_set_constant_host_state(vmx); 4492 vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */ 4493 vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */ 4494 4495 if (cpu_has_vmx_vmfunc()) 4496 vmcs_write64(VM_FUNCTION_CONTROL, 0); 4497 4498 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); 4499 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); 4500 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val)); 4501 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0); 4502 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val)); 4503 4504 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) 4505 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); 4506 4507 vm_exit_controls_set(vmx, vmx_vmexit_ctrl()); 4508 4509 /* 22.2.1, 20.8.1 */ 4510 vm_entry_controls_set(vmx, vmx_vmentry_ctrl()); 4511 4512 vmx->vcpu.arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS; 4513 vmcs_writel(CR0_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr0_guest_owned_bits); 4514 4515 set_cr4_guest_host_mask(vmx); 4516 4517 if (vmx->vpid != 0) 4518 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); 4519 4520 if (cpu_has_vmx_xsaves()) 4521 vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP); 4522 4523 if (enable_pml) { 4524 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); 4525 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); 4526 } 4527 4528 vmx_write_encls_bitmap(&vmx->vcpu, NULL); 4529 4530 if (vmx_pt_mode_is_host_guest()) { 4531 memset(&vmx->pt_desc, 0, sizeof(vmx->pt_desc)); 4532 /* Bit[6~0] are forced to 1, writes are ignored. */ 4533 vmx->pt_desc.guest.output_mask = 0x7F; 4534 vmcs_write64(GUEST_IA32_RTIT_CTL, 0); 4535 } 4536 4537 vmcs_write32(GUEST_SYSENTER_CS, 0); 4538 vmcs_writel(GUEST_SYSENTER_ESP, 0); 4539 vmcs_writel(GUEST_SYSENTER_EIP, 0); 4540 vmcs_write64(GUEST_IA32_DEBUGCTL, 0); 4541 4542 if (cpu_has_vmx_tpr_shadow()) { 4543 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0); 4544 if (cpu_need_tpr_shadow(&vmx->vcpu)) 4545 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 4546 __pa(vmx->vcpu.arch.apic->regs)); 4547 vmcs_write32(TPR_THRESHOLD, 0); 4548 } 4549 4550 vmx_setup_uret_msrs(vmx); 4551 } 4552 4553 static void __vmx_vcpu_reset(struct kvm_vcpu *vcpu) 4554 { 4555 struct vcpu_vmx *vmx = to_vmx(vcpu); 4556 4557 init_vmcs(vmx); 4558 4559 if (nested) 4560 memcpy(&vmx->nested.msrs, &vmcs_config.nested, sizeof(vmx->nested.msrs)); 4561 4562 vcpu_setup_sgx_lepubkeyhash(vcpu); 4563 4564 vmx->nested.posted_intr_nv = -1; 4565 vmx->nested.vmxon_ptr = INVALID_GPA; 4566 vmx->nested.current_vmptr = INVALID_GPA; 4567 vmx->nested.hv_evmcs_vmptr = EVMPTR_INVALID; 4568 4569 vcpu->arch.microcode_version = 0x100000000ULL; 4570 vmx->msr_ia32_feature_control_valid_bits = FEAT_CTL_LOCKED; 4571 4572 /* 4573 * Enforce invariant: pi_desc.nv is always either POSTED_INTR_VECTOR 4574 * or POSTED_INTR_WAKEUP_VECTOR. 4575 */ 4576 vmx->pi_desc.nv = POSTED_INTR_VECTOR; 4577 vmx->pi_desc.sn = 1; 4578 } 4579 4580 static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) 4581 { 4582 struct vcpu_vmx *vmx = to_vmx(vcpu); 4583 4584 if (!init_event) 4585 __vmx_vcpu_reset(vcpu); 4586 4587 vmx->rmode.vm86_active = 0; 4588 vmx->spec_ctrl = 0; 4589 4590 vmx->msr_ia32_umwait_control = 0; 4591 4592 vmx->hv_deadline_tsc = -1; 4593 kvm_set_cr8(vcpu, 0); 4594 4595 vmx_segment_cache_clear(vmx); 4596 kvm_register_mark_available(vcpu, VCPU_EXREG_SEGMENTS); 4597 4598 seg_setup(VCPU_SREG_CS); 4599 vmcs_write16(GUEST_CS_SELECTOR, 0xf000); 4600 vmcs_writel(GUEST_CS_BASE, 0xffff0000ul); 4601 4602 seg_setup(VCPU_SREG_DS); 4603 seg_setup(VCPU_SREG_ES); 4604 seg_setup(VCPU_SREG_FS); 4605 seg_setup(VCPU_SREG_GS); 4606 seg_setup(VCPU_SREG_SS); 4607 4608 vmcs_write16(GUEST_TR_SELECTOR, 0); 4609 vmcs_writel(GUEST_TR_BASE, 0); 4610 vmcs_write32(GUEST_TR_LIMIT, 0xffff); 4611 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); 4612 4613 vmcs_write16(GUEST_LDTR_SELECTOR, 0); 4614 vmcs_writel(GUEST_LDTR_BASE, 0); 4615 vmcs_write32(GUEST_LDTR_LIMIT, 0xffff); 4616 vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082); 4617 4618 vmcs_writel(GUEST_GDTR_BASE, 0); 4619 vmcs_write32(GUEST_GDTR_LIMIT, 0xffff); 4620 4621 vmcs_writel(GUEST_IDTR_BASE, 0); 4622 vmcs_write32(GUEST_IDTR_LIMIT, 0xffff); 4623 4624 vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE); 4625 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0); 4626 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, 0); 4627 if (kvm_mpx_supported()) 4628 vmcs_write64(GUEST_BNDCFGS, 0); 4629 4630 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */ 4631 4632 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); 4633 4634 vpid_sync_context(vmx->vpid); 4635 4636 vmx_update_fb_clear_dis(vcpu, vmx); 4637 } 4638 4639 static void vmx_enable_irq_window(struct kvm_vcpu *vcpu) 4640 { 4641 exec_controls_setbit(to_vmx(vcpu), CPU_BASED_INTR_WINDOW_EXITING); 4642 } 4643 4644 static void vmx_enable_nmi_window(struct kvm_vcpu *vcpu) 4645 { 4646 if (!enable_vnmi || 4647 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) { 4648 vmx_enable_irq_window(vcpu); 4649 return; 4650 } 4651 4652 exec_controls_setbit(to_vmx(vcpu), CPU_BASED_NMI_WINDOW_EXITING); 4653 } 4654 4655 static void vmx_inject_irq(struct kvm_vcpu *vcpu) 4656 { 4657 struct vcpu_vmx *vmx = to_vmx(vcpu); 4658 uint32_t intr; 4659 int irq = vcpu->arch.interrupt.nr; 4660 4661 trace_kvm_inj_virq(irq); 4662 4663 ++vcpu->stat.irq_injections; 4664 if (vmx->rmode.vm86_active) { 4665 int inc_eip = 0; 4666 if (vcpu->arch.interrupt.soft) 4667 inc_eip = vcpu->arch.event_exit_inst_len; 4668 kvm_inject_realmode_interrupt(vcpu, irq, inc_eip); 4669 return; 4670 } 4671 intr = irq | INTR_INFO_VALID_MASK; 4672 if (vcpu->arch.interrupt.soft) { 4673 intr |= INTR_TYPE_SOFT_INTR; 4674 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 4675 vmx->vcpu.arch.event_exit_inst_len); 4676 } else 4677 intr |= INTR_TYPE_EXT_INTR; 4678 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr); 4679 4680 vmx_clear_hlt(vcpu); 4681 } 4682 4683 static void vmx_inject_nmi(struct kvm_vcpu *vcpu) 4684 { 4685 struct vcpu_vmx *vmx = to_vmx(vcpu); 4686 4687 if (!enable_vnmi) { 4688 /* 4689 * Tracking the NMI-blocked state in software is built upon 4690 * finding the next open IRQ window. This, in turn, depends on 4691 * well-behaving guests: They have to keep IRQs disabled at 4692 * least as long as the NMI handler runs. Otherwise we may 4693 * cause NMI nesting, maybe breaking the guest. But as this is 4694 * highly unlikely, we can live with the residual risk. 4695 */ 4696 vmx->loaded_vmcs->soft_vnmi_blocked = 1; 4697 vmx->loaded_vmcs->vnmi_blocked_time = 0; 4698 } 4699 4700 ++vcpu->stat.nmi_injections; 4701 vmx->loaded_vmcs->nmi_known_unmasked = false; 4702 4703 if (vmx->rmode.vm86_active) { 4704 kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0); 4705 return; 4706 } 4707 4708 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 4709 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR); 4710 4711 vmx_clear_hlt(vcpu); 4712 } 4713 4714 bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu) 4715 { 4716 struct vcpu_vmx *vmx = to_vmx(vcpu); 4717 bool masked; 4718 4719 if (!enable_vnmi) 4720 return vmx->loaded_vmcs->soft_vnmi_blocked; 4721 if (vmx->loaded_vmcs->nmi_known_unmasked) 4722 return false; 4723 masked = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI; 4724 vmx->loaded_vmcs->nmi_known_unmasked = !masked; 4725 return masked; 4726 } 4727 4728 void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) 4729 { 4730 struct vcpu_vmx *vmx = to_vmx(vcpu); 4731 4732 if (!enable_vnmi) { 4733 if (vmx->loaded_vmcs->soft_vnmi_blocked != masked) { 4734 vmx->loaded_vmcs->soft_vnmi_blocked = masked; 4735 vmx->loaded_vmcs->vnmi_blocked_time = 0; 4736 } 4737 } else { 4738 vmx->loaded_vmcs->nmi_known_unmasked = !masked; 4739 if (masked) 4740 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, 4741 GUEST_INTR_STATE_NMI); 4742 else 4743 vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, 4744 GUEST_INTR_STATE_NMI); 4745 } 4746 } 4747 4748 bool vmx_nmi_blocked(struct kvm_vcpu *vcpu) 4749 { 4750 if (is_guest_mode(vcpu) && nested_exit_on_nmi(vcpu)) 4751 return false; 4752 4753 if (!enable_vnmi && to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked) 4754 return true; 4755 4756 return (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 4757 (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI | 4758 GUEST_INTR_STATE_NMI)); 4759 } 4760 4761 static int vmx_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection) 4762 { 4763 if (to_vmx(vcpu)->nested.nested_run_pending) 4764 return -EBUSY; 4765 4766 /* An NMI must not be injected into L2 if it's supposed to VM-Exit. */ 4767 if (for_injection && is_guest_mode(vcpu) && nested_exit_on_nmi(vcpu)) 4768 return -EBUSY; 4769 4770 return !vmx_nmi_blocked(vcpu); 4771 } 4772 4773 bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu) 4774 { 4775 if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) 4776 return false; 4777 4778 return !(vmx_get_rflags(vcpu) & X86_EFLAGS_IF) || 4779 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 4780 (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS)); 4781 } 4782 4783 static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection) 4784 { 4785 if (to_vmx(vcpu)->nested.nested_run_pending) 4786 return -EBUSY; 4787 4788 /* 4789 * An IRQ must not be injected into L2 if it's supposed to VM-Exit, 4790 * e.g. if the IRQ arrived asynchronously after checking nested events. 4791 */ 4792 if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) 4793 return -EBUSY; 4794 4795 return !vmx_interrupt_blocked(vcpu); 4796 } 4797 4798 static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) 4799 { 4800 void __user *ret; 4801 4802 if (enable_unrestricted_guest) 4803 return 0; 4804 4805 mutex_lock(&kvm->slots_lock); 4806 ret = __x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, addr, 4807 PAGE_SIZE * 3); 4808 mutex_unlock(&kvm->slots_lock); 4809 4810 if (IS_ERR(ret)) 4811 return PTR_ERR(ret); 4812 4813 to_kvm_vmx(kvm)->tss_addr = addr; 4814 4815 return init_rmode_tss(kvm, ret); 4816 } 4817 4818 static int vmx_set_identity_map_addr(struct kvm *kvm, u64 ident_addr) 4819 { 4820 to_kvm_vmx(kvm)->ept_identity_map_addr = ident_addr; 4821 return 0; 4822 } 4823 4824 static bool rmode_exception(struct kvm_vcpu *vcpu, int vec) 4825 { 4826 switch (vec) { 4827 case BP_VECTOR: 4828 /* 4829 * Update instruction length as we may reinject the exception 4830 * from user space while in guest debugging mode. 4831 */ 4832 to_vmx(vcpu)->vcpu.arch.event_exit_inst_len = 4833 vmcs_read32(VM_EXIT_INSTRUCTION_LEN); 4834 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) 4835 return false; 4836 fallthrough; 4837 case DB_VECTOR: 4838 return !(vcpu->guest_debug & 4839 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)); 4840 case DE_VECTOR: 4841 case OF_VECTOR: 4842 case BR_VECTOR: 4843 case UD_VECTOR: 4844 case DF_VECTOR: 4845 case SS_VECTOR: 4846 case GP_VECTOR: 4847 case MF_VECTOR: 4848 return true; 4849 } 4850 return false; 4851 } 4852 4853 static int handle_rmode_exception(struct kvm_vcpu *vcpu, 4854 int vec, u32 err_code) 4855 { 4856 /* 4857 * Instruction with address size override prefix opcode 0x67 4858 * Cause the #SS fault with 0 error code in VM86 mode. 4859 */ 4860 if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) { 4861 if (kvm_emulate_instruction(vcpu, 0)) { 4862 if (vcpu->arch.halt_request) { 4863 vcpu->arch.halt_request = 0; 4864 return kvm_emulate_halt_noskip(vcpu); 4865 } 4866 return 1; 4867 } 4868 return 0; 4869 } 4870 4871 /* 4872 * Forward all other exceptions that are valid in real mode. 4873 * FIXME: Breaks guest debugging in real mode, needs to be fixed with 4874 * the required debugging infrastructure rework. 4875 */ 4876 kvm_queue_exception(vcpu, vec); 4877 return 1; 4878 } 4879 4880 static int handle_machine_check(struct kvm_vcpu *vcpu) 4881 { 4882 /* handled by vmx_vcpu_run() */ 4883 return 1; 4884 } 4885 4886 /* 4887 * If the host has split lock detection disabled, then #AC is 4888 * unconditionally injected into the guest, which is the pre split lock 4889 * detection behaviour. 4890 * 4891 * If the host has split lock detection enabled then #AC is 4892 * only injected into the guest when: 4893 * - Guest CPL == 3 (user mode) 4894 * - Guest has #AC detection enabled in CR0 4895 * - Guest EFLAGS has AC bit set 4896 */ 4897 bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu) 4898 { 4899 if (!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) 4900 return true; 4901 4902 return vmx_get_cpl(vcpu) == 3 && kvm_read_cr0_bits(vcpu, X86_CR0_AM) && 4903 (kvm_get_rflags(vcpu) & X86_EFLAGS_AC); 4904 } 4905 4906 static int handle_exception_nmi(struct kvm_vcpu *vcpu) 4907 { 4908 struct vcpu_vmx *vmx = to_vmx(vcpu); 4909 struct kvm_run *kvm_run = vcpu->run; 4910 u32 intr_info, ex_no, error_code; 4911 unsigned long cr2, dr6; 4912 u32 vect_info; 4913 4914 vect_info = vmx->idt_vectoring_info; 4915 intr_info = vmx_get_intr_info(vcpu); 4916 4917 if (is_machine_check(intr_info) || is_nmi(intr_info)) 4918 return 1; /* handled by handle_exception_nmi_irqoff() */ 4919 4920 /* 4921 * Queue the exception here instead of in handle_nm_fault_irqoff(). 4922 * This ensures the nested_vmx check is not skipped so vmexit can 4923 * be reflected to L1 (when it intercepts #NM) before reaching this 4924 * point. 4925 */ 4926 if (is_nm_fault(intr_info)) { 4927 kvm_queue_exception(vcpu, NM_VECTOR); 4928 return 1; 4929 } 4930 4931 if (is_invalid_opcode(intr_info)) 4932 return handle_ud(vcpu); 4933 4934 error_code = 0; 4935 if (intr_info & INTR_INFO_DELIVER_CODE_MASK) 4936 error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE); 4937 4938 if (!vmx->rmode.vm86_active && is_gp_fault(intr_info)) { 4939 WARN_ON_ONCE(!enable_vmware_backdoor); 4940 4941 /* 4942 * VMware backdoor emulation on #GP interception only handles 4943 * IN{S}, OUT{S}, and RDPMC, none of which generate a non-zero 4944 * error code on #GP. 4945 */ 4946 if (error_code) { 4947 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); 4948 return 1; 4949 } 4950 return kvm_emulate_instruction(vcpu, EMULTYPE_VMWARE_GP); 4951 } 4952 4953 /* 4954 * The #PF with PFEC.RSVD = 1 indicates the guest is accessing 4955 * MMIO, it is better to report an internal error. 4956 * See the comments in vmx_handle_exit. 4957 */ 4958 if ((vect_info & VECTORING_INFO_VALID_MASK) && 4959 !(is_page_fault(intr_info) && !(error_code & PFERR_RSVD_MASK))) { 4960 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 4961 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX; 4962 vcpu->run->internal.ndata = 4; 4963 vcpu->run->internal.data[0] = vect_info; 4964 vcpu->run->internal.data[1] = intr_info; 4965 vcpu->run->internal.data[2] = error_code; 4966 vcpu->run->internal.data[3] = vcpu->arch.last_vmentry_cpu; 4967 return 0; 4968 } 4969 4970 if (is_page_fault(intr_info)) { 4971 cr2 = vmx_get_exit_qual(vcpu); 4972 if (enable_ept && !vcpu->arch.apf.host_apf_flags) { 4973 /* 4974 * EPT will cause page fault only if we need to 4975 * detect illegal GPAs. 4976 */ 4977 WARN_ON_ONCE(!allow_smaller_maxphyaddr); 4978 kvm_fixup_and_inject_pf_error(vcpu, cr2, error_code); 4979 return 1; 4980 } else 4981 return kvm_handle_page_fault(vcpu, error_code, cr2, NULL, 0); 4982 } 4983 4984 ex_no = intr_info & INTR_INFO_VECTOR_MASK; 4985 4986 if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no)) 4987 return handle_rmode_exception(vcpu, ex_no, error_code); 4988 4989 switch (ex_no) { 4990 case DB_VECTOR: 4991 dr6 = vmx_get_exit_qual(vcpu); 4992 if (!(vcpu->guest_debug & 4993 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) { 4994 /* 4995 * If the #DB was due to ICEBP, a.k.a. INT1, skip the 4996 * instruction. ICEBP generates a trap-like #DB, but 4997 * despite its interception control being tied to #DB, 4998 * is an instruction intercept, i.e. the VM-Exit occurs 4999 * on the ICEBP itself. Note, skipping ICEBP also 5000 * clears STI and MOVSS blocking. 5001 * 5002 * For all other #DBs, set vmcs.PENDING_DBG_EXCEPTIONS.BS 5003 * if single-step is enabled in RFLAGS and STI or MOVSS 5004 * blocking is active, as the CPU doesn't set the bit 5005 * on VM-Exit due to #DB interception. VM-Entry has a 5006 * consistency check that a single-step #DB is pending 5007 * in this scenario as the previous instruction cannot 5008 * have toggled RFLAGS.TF 0=>1 (because STI and POP/MOV 5009 * don't modify RFLAGS), therefore the one instruction 5010 * delay when activating single-step breakpoints must 5011 * have already expired. Note, the CPU sets/clears BS 5012 * as appropriate for all other VM-Exits types. 5013 */ 5014 if (is_icebp(intr_info)) 5015 WARN_ON(!skip_emulated_instruction(vcpu)); 5016 else if ((vmx_get_rflags(vcpu) & X86_EFLAGS_TF) && 5017 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 5018 (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS))) 5019 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, 5020 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS) | DR6_BS); 5021 5022 kvm_queue_exception_p(vcpu, DB_VECTOR, dr6); 5023 return 1; 5024 } 5025 kvm_run->debug.arch.dr6 = dr6 | DR6_ACTIVE_LOW; 5026 kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7); 5027 fallthrough; 5028 case BP_VECTOR: 5029 /* 5030 * Update instruction length as we may reinject #BP from 5031 * user space while in guest debugging mode. Reading it for 5032 * #DB as well causes no harm, it is not used in that case. 5033 */ 5034 vmx->vcpu.arch.event_exit_inst_len = 5035 vmcs_read32(VM_EXIT_INSTRUCTION_LEN); 5036 kvm_run->exit_reason = KVM_EXIT_DEBUG; 5037 kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu); 5038 kvm_run->debug.arch.exception = ex_no; 5039 break; 5040 case AC_VECTOR: 5041 if (vmx_guest_inject_ac(vcpu)) { 5042 kvm_queue_exception_e(vcpu, AC_VECTOR, error_code); 5043 return 1; 5044 } 5045 5046 /* 5047 * Handle split lock. Depending on detection mode this will 5048 * either warn and disable split lock detection for this 5049 * task or force SIGBUS on it. 5050 */ 5051 if (handle_guest_split_lock(kvm_rip_read(vcpu))) 5052 return 1; 5053 fallthrough; 5054 default: 5055 kvm_run->exit_reason = KVM_EXIT_EXCEPTION; 5056 kvm_run->ex.exception = ex_no; 5057 kvm_run->ex.error_code = error_code; 5058 break; 5059 } 5060 return 0; 5061 } 5062 5063 static __always_inline int handle_external_interrupt(struct kvm_vcpu *vcpu) 5064 { 5065 ++vcpu->stat.irq_exits; 5066 return 1; 5067 } 5068 5069 static int handle_triple_fault(struct kvm_vcpu *vcpu) 5070 { 5071 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; 5072 vcpu->mmio_needed = 0; 5073 return 0; 5074 } 5075 5076 static int handle_io(struct kvm_vcpu *vcpu) 5077 { 5078 unsigned long exit_qualification; 5079 int size, in, string; 5080 unsigned port; 5081 5082 exit_qualification = vmx_get_exit_qual(vcpu); 5083 string = (exit_qualification & 16) != 0; 5084 5085 ++vcpu->stat.io_exits; 5086 5087 if (string) 5088 return kvm_emulate_instruction(vcpu, 0); 5089 5090 port = exit_qualification >> 16; 5091 size = (exit_qualification & 7) + 1; 5092 in = (exit_qualification & 8) != 0; 5093 5094 return kvm_fast_pio(vcpu, size, port, in); 5095 } 5096 5097 static void 5098 vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) 5099 { 5100 /* 5101 * Patch in the VMCALL instruction: 5102 */ 5103 hypercall[0] = 0x0f; 5104 hypercall[1] = 0x01; 5105 hypercall[2] = 0xc1; 5106 } 5107 5108 /* called to set cr0 as appropriate for a mov-to-cr0 exit. */ 5109 static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val) 5110 { 5111 if (is_guest_mode(vcpu)) { 5112 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 5113 unsigned long orig_val = val; 5114 5115 /* 5116 * We get here when L2 changed cr0 in a way that did not change 5117 * any of L1's shadowed bits (see nested_vmx_exit_handled_cr), 5118 * but did change L0 shadowed bits. So we first calculate the 5119 * effective cr0 value that L1 would like to write into the 5120 * hardware. It consists of the L2-owned bits from the new 5121 * value combined with the L1-owned bits from L1's guest_cr0. 5122 */ 5123 val = (val & ~vmcs12->cr0_guest_host_mask) | 5124 (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask); 5125 5126 if (!nested_guest_cr0_valid(vcpu, val)) 5127 return 1; 5128 5129 if (kvm_set_cr0(vcpu, val)) 5130 return 1; 5131 vmcs_writel(CR0_READ_SHADOW, orig_val); 5132 return 0; 5133 } else { 5134 if (to_vmx(vcpu)->nested.vmxon && 5135 !nested_host_cr0_valid(vcpu, val)) 5136 return 1; 5137 5138 return kvm_set_cr0(vcpu, val); 5139 } 5140 } 5141 5142 static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val) 5143 { 5144 if (is_guest_mode(vcpu)) { 5145 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 5146 unsigned long orig_val = val; 5147 5148 /* analogously to handle_set_cr0 */ 5149 val = (val & ~vmcs12->cr4_guest_host_mask) | 5150 (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask); 5151 if (kvm_set_cr4(vcpu, val)) 5152 return 1; 5153 vmcs_writel(CR4_READ_SHADOW, orig_val); 5154 return 0; 5155 } else 5156 return kvm_set_cr4(vcpu, val); 5157 } 5158 5159 static int handle_desc(struct kvm_vcpu *vcpu) 5160 { 5161 WARN_ON(!(vcpu->arch.cr4 & X86_CR4_UMIP)); 5162 return kvm_emulate_instruction(vcpu, 0); 5163 } 5164 5165 static int handle_cr(struct kvm_vcpu *vcpu) 5166 { 5167 unsigned long exit_qualification, val; 5168 int cr; 5169 int reg; 5170 int err; 5171 int ret; 5172 5173 exit_qualification = vmx_get_exit_qual(vcpu); 5174 cr = exit_qualification & 15; 5175 reg = (exit_qualification >> 8) & 15; 5176 switch ((exit_qualification >> 4) & 3) { 5177 case 0: /* mov to cr */ 5178 val = kvm_register_read(vcpu, reg); 5179 trace_kvm_cr_write(cr, val); 5180 switch (cr) { 5181 case 0: 5182 err = handle_set_cr0(vcpu, val); 5183 return kvm_complete_insn_gp(vcpu, err); 5184 case 3: 5185 WARN_ON_ONCE(enable_unrestricted_guest); 5186 5187 err = kvm_set_cr3(vcpu, val); 5188 return kvm_complete_insn_gp(vcpu, err); 5189 case 4: 5190 err = handle_set_cr4(vcpu, val); 5191 return kvm_complete_insn_gp(vcpu, err); 5192 case 8: { 5193 u8 cr8_prev = kvm_get_cr8(vcpu); 5194 u8 cr8 = (u8)val; 5195 err = kvm_set_cr8(vcpu, cr8); 5196 ret = kvm_complete_insn_gp(vcpu, err); 5197 if (lapic_in_kernel(vcpu)) 5198 return ret; 5199 if (cr8_prev <= cr8) 5200 return ret; 5201 /* 5202 * TODO: we might be squashing a 5203 * KVM_GUESTDBG_SINGLESTEP-triggered 5204 * KVM_EXIT_DEBUG here. 5205 */ 5206 vcpu->run->exit_reason = KVM_EXIT_SET_TPR; 5207 return 0; 5208 } 5209 } 5210 break; 5211 case 2: /* clts */ 5212 KVM_BUG(1, vcpu->kvm, "Guest always owns CR0.TS"); 5213 return -EIO; 5214 case 1: /*mov from cr*/ 5215 switch (cr) { 5216 case 3: 5217 WARN_ON_ONCE(enable_unrestricted_guest); 5218 5219 val = kvm_read_cr3(vcpu); 5220 kvm_register_write(vcpu, reg, val); 5221 trace_kvm_cr_read(cr, val); 5222 return kvm_skip_emulated_instruction(vcpu); 5223 case 8: 5224 val = kvm_get_cr8(vcpu); 5225 kvm_register_write(vcpu, reg, val); 5226 trace_kvm_cr_read(cr, val); 5227 return kvm_skip_emulated_instruction(vcpu); 5228 } 5229 break; 5230 case 3: /* lmsw */ 5231 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f; 5232 trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val); 5233 kvm_lmsw(vcpu, val); 5234 5235 return kvm_skip_emulated_instruction(vcpu); 5236 default: 5237 break; 5238 } 5239 vcpu->run->exit_reason = 0; 5240 vcpu_unimpl(vcpu, "unhandled control register: op %d cr %d\n", 5241 (int)(exit_qualification >> 4) & 3, cr); 5242 return 0; 5243 } 5244 5245 static int handle_dr(struct kvm_vcpu *vcpu) 5246 { 5247 unsigned long exit_qualification; 5248 int dr, dr7, reg; 5249 int err = 1; 5250 5251 exit_qualification = vmx_get_exit_qual(vcpu); 5252 dr = exit_qualification & DEBUG_REG_ACCESS_NUM; 5253 5254 /* First, if DR does not exist, trigger UD */ 5255 if (!kvm_require_dr(vcpu, dr)) 5256 return 1; 5257 5258 if (vmx_get_cpl(vcpu) > 0) 5259 goto out; 5260 5261 dr7 = vmcs_readl(GUEST_DR7); 5262 if (dr7 & DR7_GD) { 5263 /* 5264 * As the vm-exit takes precedence over the debug trap, we 5265 * need to emulate the latter, either for the host or the 5266 * guest debugging itself. 5267 */ 5268 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { 5269 vcpu->run->debug.arch.dr6 = DR6_BD | DR6_ACTIVE_LOW; 5270 vcpu->run->debug.arch.dr7 = dr7; 5271 vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu); 5272 vcpu->run->debug.arch.exception = DB_VECTOR; 5273 vcpu->run->exit_reason = KVM_EXIT_DEBUG; 5274 return 0; 5275 } else { 5276 kvm_queue_exception_p(vcpu, DB_VECTOR, DR6_BD); 5277 return 1; 5278 } 5279 } 5280 5281 if (vcpu->guest_debug == 0) { 5282 exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_MOV_DR_EXITING); 5283 5284 /* 5285 * No more DR vmexits; force a reload of the debug registers 5286 * and reenter on this instruction. The next vmexit will 5287 * retrieve the full state of the debug registers. 5288 */ 5289 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT; 5290 return 1; 5291 } 5292 5293 reg = DEBUG_REG_ACCESS_REG(exit_qualification); 5294 if (exit_qualification & TYPE_MOV_FROM_DR) { 5295 unsigned long val; 5296 5297 kvm_get_dr(vcpu, dr, &val); 5298 kvm_register_write(vcpu, reg, val); 5299 err = 0; 5300 } else { 5301 err = kvm_set_dr(vcpu, dr, kvm_register_read(vcpu, reg)); 5302 } 5303 5304 out: 5305 return kvm_complete_insn_gp(vcpu, err); 5306 } 5307 5308 static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu) 5309 { 5310 get_debugreg(vcpu->arch.db[0], 0); 5311 get_debugreg(vcpu->arch.db[1], 1); 5312 get_debugreg(vcpu->arch.db[2], 2); 5313 get_debugreg(vcpu->arch.db[3], 3); 5314 get_debugreg(vcpu->arch.dr6, 6); 5315 vcpu->arch.dr7 = vmcs_readl(GUEST_DR7); 5316 5317 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT; 5318 exec_controls_setbit(to_vmx(vcpu), CPU_BASED_MOV_DR_EXITING); 5319 5320 /* 5321 * exc_debug expects dr6 to be cleared after it runs, avoid that it sees 5322 * a stale dr6 from the guest. 5323 */ 5324 set_debugreg(DR6_RESERVED, 6); 5325 } 5326 5327 static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val) 5328 { 5329 vmcs_writel(GUEST_DR7, val); 5330 } 5331 5332 static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu) 5333 { 5334 kvm_apic_update_ppr(vcpu); 5335 return 1; 5336 } 5337 5338 static int handle_interrupt_window(struct kvm_vcpu *vcpu) 5339 { 5340 exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_INTR_WINDOW_EXITING); 5341 5342 kvm_make_request(KVM_REQ_EVENT, vcpu); 5343 5344 ++vcpu->stat.irq_window_exits; 5345 return 1; 5346 } 5347 5348 static int handle_invlpg(struct kvm_vcpu *vcpu) 5349 { 5350 unsigned long exit_qualification = vmx_get_exit_qual(vcpu); 5351 5352 kvm_mmu_invlpg(vcpu, exit_qualification); 5353 return kvm_skip_emulated_instruction(vcpu); 5354 } 5355 5356 static int handle_apic_access(struct kvm_vcpu *vcpu) 5357 { 5358 if (likely(fasteoi)) { 5359 unsigned long exit_qualification = vmx_get_exit_qual(vcpu); 5360 int access_type, offset; 5361 5362 access_type = exit_qualification & APIC_ACCESS_TYPE; 5363 offset = exit_qualification & APIC_ACCESS_OFFSET; 5364 /* 5365 * Sane guest uses MOV to write EOI, with written value 5366 * not cared. So make a short-circuit here by avoiding 5367 * heavy instruction emulation. 5368 */ 5369 if ((access_type == TYPE_LINEAR_APIC_INST_WRITE) && 5370 (offset == APIC_EOI)) { 5371 kvm_lapic_set_eoi(vcpu); 5372 return kvm_skip_emulated_instruction(vcpu); 5373 } 5374 } 5375 return kvm_emulate_instruction(vcpu, 0); 5376 } 5377 5378 static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu) 5379 { 5380 unsigned long exit_qualification = vmx_get_exit_qual(vcpu); 5381 int vector = exit_qualification & 0xff; 5382 5383 /* EOI-induced VM exit is trap-like and thus no need to adjust IP */ 5384 kvm_apic_set_eoi_accelerated(vcpu, vector); 5385 return 1; 5386 } 5387 5388 static int handle_apic_write(struct kvm_vcpu *vcpu) 5389 { 5390 unsigned long exit_qualification = vmx_get_exit_qual(vcpu); 5391 5392 /* 5393 * APIC-write VM-Exit is trap-like, KVM doesn't need to advance RIP and 5394 * hardware has done any necessary aliasing, offset adjustments, etc... 5395 * for the access. I.e. the correct value has already been written to 5396 * the vAPIC page for the correct 16-byte chunk. KVM needs only to 5397 * retrieve the register value and emulate the access. 5398 */ 5399 u32 offset = exit_qualification & 0xff0; 5400 5401 kvm_apic_write_nodecode(vcpu, offset); 5402 return 1; 5403 } 5404 5405 static int handle_task_switch(struct kvm_vcpu *vcpu) 5406 { 5407 struct vcpu_vmx *vmx = to_vmx(vcpu); 5408 unsigned long exit_qualification; 5409 bool has_error_code = false; 5410 u32 error_code = 0; 5411 u16 tss_selector; 5412 int reason, type, idt_v, idt_index; 5413 5414 idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK); 5415 idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK); 5416 type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK); 5417 5418 exit_qualification = vmx_get_exit_qual(vcpu); 5419 5420 reason = (u32)exit_qualification >> 30; 5421 if (reason == TASK_SWITCH_GATE && idt_v) { 5422 switch (type) { 5423 case INTR_TYPE_NMI_INTR: 5424 vcpu->arch.nmi_injected = false; 5425 vmx_set_nmi_mask(vcpu, true); 5426 break; 5427 case INTR_TYPE_EXT_INTR: 5428 case INTR_TYPE_SOFT_INTR: 5429 kvm_clear_interrupt_queue(vcpu); 5430 break; 5431 case INTR_TYPE_HARD_EXCEPTION: 5432 if (vmx->idt_vectoring_info & 5433 VECTORING_INFO_DELIVER_CODE_MASK) { 5434 has_error_code = true; 5435 error_code = 5436 vmcs_read32(IDT_VECTORING_ERROR_CODE); 5437 } 5438 fallthrough; 5439 case INTR_TYPE_SOFT_EXCEPTION: 5440 kvm_clear_exception_queue(vcpu); 5441 break; 5442 default: 5443 break; 5444 } 5445 } 5446 tss_selector = exit_qualification; 5447 5448 if (!idt_v || (type != INTR_TYPE_HARD_EXCEPTION && 5449 type != INTR_TYPE_EXT_INTR && 5450 type != INTR_TYPE_NMI_INTR)) 5451 WARN_ON(!skip_emulated_instruction(vcpu)); 5452 5453 /* 5454 * TODO: What about debug traps on tss switch? 5455 * Are we supposed to inject them and update dr6? 5456 */ 5457 return kvm_task_switch(vcpu, tss_selector, 5458 type == INTR_TYPE_SOFT_INTR ? idt_index : -1, 5459 reason, has_error_code, error_code); 5460 } 5461 5462 static int handle_ept_violation(struct kvm_vcpu *vcpu) 5463 { 5464 unsigned long exit_qualification; 5465 gpa_t gpa; 5466 u64 error_code; 5467 5468 exit_qualification = vmx_get_exit_qual(vcpu); 5469 5470 /* 5471 * EPT violation happened while executing iret from NMI, 5472 * "blocked by NMI" bit has to be set before next VM entry. 5473 * There are errata that may cause this bit to not be set: 5474 * AAK134, BY25. 5475 */ 5476 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && 5477 enable_vnmi && 5478 (exit_qualification & INTR_INFO_UNBLOCK_NMI)) 5479 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); 5480 5481 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); 5482 trace_kvm_page_fault(gpa, exit_qualification); 5483 5484 /* Is it a read fault? */ 5485 error_code = (exit_qualification & EPT_VIOLATION_ACC_READ) 5486 ? PFERR_USER_MASK : 0; 5487 /* Is it a write fault? */ 5488 error_code |= (exit_qualification & EPT_VIOLATION_ACC_WRITE) 5489 ? PFERR_WRITE_MASK : 0; 5490 /* Is it a fetch fault? */ 5491 error_code |= (exit_qualification & EPT_VIOLATION_ACC_INSTR) 5492 ? PFERR_FETCH_MASK : 0; 5493 /* ept page table entry is present? */ 5494 error_code |= (exit_qualification & EPT_VIOLATION_RWX_MASK) 5495 ? PFERR_PRESENT_MASK : 0; 5496 5497 error_code |= (exit_qualification & EPT_VIOLATION_GVA_TRANSLATED) != 0 ? 5498 PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK; 5499 5500 vcpu->arch.exit_qualification = exit_qualification; 5501 5502 /* 5503 * Check that the GPA doesn't exceed physical memory limits, as that is 5504 * a guest page fault. We have to emulate the instruction here, because 5505 * if the illegal address is that of a paging structure, then 5506 * EPT_VIOLATION_ACC_WRITE bit is set. Alternatively, if supported we 5507 * would also use advanced VM-exit information for EPT violations to 5508 * reconstruct the page fault error code. 5509 */ 5510 if (unlikely(allow_smaller_maxphyaddr && kvm_vcpu_is_illegal_gpa(vcpu, gpa))) 5511 return kvm_emulate_instruction(vcpu, 0); 5512 5513 return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0); 5514 } 5515 5516 static int handle_ept_misconfig(struct kvm_vcpu *vcpu) 5517 { 5518 gpa_t gpa; 5519 5520 if (!vmx_can_emulate_instruction(vcpu, EMULTYPE_PF, NULL, 0)) 5521 return 1; 5522 5523 /* 5524 * A nested guest cannot optimize MMIO vmexits, because we have an 5525 * nGPA here instead of the required GPA. 5526 */ 5527 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); 5528 if (!is_guest_mode(vcpu) && 5529 !kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) { 5530 trace_kvm_fast_mmio(gpa); 5531 return kvm_skip_emulated_instruction(vcpu); 5532 } 5533 5534 return kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0); 5535 } 5536 5537 static int handle_nmi_window(struct kvm_vcpu *vcpu) 5538 { 5539 if (KVM_BUG_ON(!enable_vnmi, vcpu->kvm)) 5540 return -EIO; 5541 5542 exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_NMI_WINDOW_EXITING); 5543 ++vcpu->stat.nmi_window_exits; 5544 kvm_make_request(KVM_REQ_EVENT, vcpu); 5545 5546 return 1; 5547 } 5548 5549 static bool vmx_emulation_required_with_pending_exception(struct kvm_vcpu *vcpu) 5550 { 5551 struct vcpu_vmx *vmx = to_vmx(vcpu); 5552 5553 return vmx->emulation_required && !vmx->rmode.vm86_active && 5554 (vcpu->arch.exception.pending || vcpu->arch.exception.injected); 5555 } 5556 5557 static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) 5558 { 5559 struct vcpu_vmx *vmx = to_vmx(vcpu); 5560 bool intr_window_requested; 5561 unsigned count = 130; 5562 5563 intr_window_requested = exec_controls_get(vmx) & 5564 CPU_BASED_INTR_WINDOW_EXITING; 5565 5566 while (vmx->emulation_required && count-- != 0) { 5567 if (intr_window_requested && !vmx_interrupt_blocked(vcpu)) 5568 return handle_interrupt_window(&vmx->vcpu); 5569 5570 if (kvm_test_request(KVM_REQ_EVENT, vcpu)) 5571 return 1; 5572 5573 if (!kvm_emulate_instruction(vcpu, 0)) 5574 return 0; 5575 5576 if (vmx_emulation_required_with_pending_exception(vcpu)) { 5577 kvm_prepare_emulation_failure_exit(vcpu); 5578 return 0; 5579 } 5580 5581 if (vcpu->arch.halt_request) { 5582 vcpu->arch.halt_request = 0; 5583 return kvm_emulate_halt_noskip(vcpu); 5584 } 5585 5586 /* 5587 * Note, return 1 and not 0, vcpu_run() will invoke 5588 * xfer_to_guest_mode() which will create a proper return 5589 * code. 5590 */ 5591 if (__xfer_to_guest_mode_work_pending()) 5592 return 1; 5593 } 5594 5595 return 1; 5596 } 5597 5598 static int vmx_vcpu_pre_run(struct kvm_vcpu *vcpu) 5599 { 5600 if (vmx_emulation_required_with_pending_exception(vcpu)) { 5601 kvm_prepare_emulation_failure_exit(vcpu); 5602 return 0; 5603 } 5604 5605 return 1; 5606 } 5607 5608 static void grow_ple_window(struct kvm_vcpu *vcpu) 5609 { 5610 struct vcpu_vmx *vmx = to_vmx(vcpu); 5611 unsigned int old = vmx->ple_window; 5612 5613 vmx->ple_window = __grow_ple_window(old, ple_window, 5614 ple_window_grow, 5615 ple_window_max); 5616 5617 if (vmx->ple_window != old) { 5618 vmx->ple_window_dirty = true; 5619 trace_kvm_ple_window_update(vcpu->vcpu_id, 5620 vmx->ple_window, old); 5621 } 5622 } 5623 5624 static void shrink_ple_window(struct kvm_vcpu *vcpu) 5625 { 5626 struct vcpu_vmx *vmx = to_vmx(vcpu); 5627 unsigned int old = vmx->ple_window; 5628 5629 vmx->ple_window = __shrink_ple_window(old, ple_window, 5630 ple_window_shrink, 5631 ple_window); 5632 5633 if (vmx->ple_window != old) { 5634 vmx->ple_window_dirty = true; 5635 trace_kvm_ple_window_update(vcpu->vcpu_id, 5636 vmx->ple_window, old); 5637 } 5638 } 5639 5640 /* 5641 * Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE 5642 * exiting, so only get here on cpu with PAUSE-Loop-Exiting. 5643 */ 5644 static int handle_pause(struct kvm_vcpu *vcpu) 5645 { 5646 if (!kvm_pause_in_guest(vcpu->kvm)) 5647 grow_ple_window(vcpu); 5648 5649 /* 5650 * Intel sdm vol3 ch-25.1.3 says: The "PAUSE-loop exiting" 5651 * VM-execution control is ignored if CPL > 0. OTOH, KVM 5652 * never set PAUSE_EXITING and just set PLE if supported, 5653 * so the vcpu must be CPL=0 if it gets a PAUSE exit. 5654 */ 5655 kvm_vcpu_on_spin(vcpu, true); 5656 return kvm_skip_emulated_instruction(vcpu); 5657 } 5658 5659 static int handle_monitor_trap(struct kvm_vcpu *vcpu) 5660 { 5661 return 1; 5662 } 5663 5664 static int handle_invpcid(struct kvm_vcpu *vcpu) 5665 { 5666 u32 vmx_instruction_info; 5667 unsigned long type; 5668 gva_t gva; 5669 struct { 5670 u64 pcid; 5671 u64 gla; 5672 } operand; 5673 int gpr_index; 5674 5675 if (!guest_cpuid_has(vcpu, X86_FEATURE_INVPCID)) { 5676 kvm_queue_exception(vcpu, UD_VECTOR); 5677 return 1; 5678 } 5679 5680 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 5681 gpr_index = vmx_get_instr_info_reg2(vmx_instruction_info); 5682 type = kvm_register_read(vcpu, gpr_index); 5683 5684 /* According to the Intel instruction reference, the memory operand 5685 * is read even if it isn't needed (e.g., for type==all) 5686 */ 5687 if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu), 5688 vmx_instruction_info, false, 5689 sizeof(operand), &gva)) 5690 return 1; 5691 5692 return kvm_handle_invpcid(vcpu, type, gva); 5693 } 5694 5695 static int handle_pml_full(struct kvm_vcpu *vcpu) 5696 { 5697 unsigned long exit_qualification; 5698 5699 trace_kvm_pml_full(vcpu->vcpu_id); 5700 5701 exit_qualification = vmx_get_exit_qual(vcpu); 5702 5703 /* 5704 * PML buffer FULL happened while executing iret from NMI, 5705 * "blocked by NMI" bit has to be set before next VM entry. 5706 */ 5707 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && 5708 enable_vnmi && 5709 (exit_qualification & INTR_INFO_UNBLOCK_NMI)) 5710 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, 5711 GUEST_INTR_STATE_NMI); 5712 5713 /* 5714 * PML buffer already flushed at beginning of VMEXIT. Nothing to do 5715 * here.., and there's no userspace involvement needed for PML. 5716 */ 5717 return 1; 5718 } 5719 5720 static fastpath_t handle_fastpath_preemption_timer(struct kvm_vcpu *vcpu) 5721 { 5722 struct vcpu_vmx *vmx = to_vmx(vcpu); 5723 5724 if (!vmx->req_immediate_exit && 5725 !unlikely(vmx->loaded_vmcs->hv_timer_soft_disabled)) { 5726 kvm_lapic_expired_hv_timer(vcpu); 5727 return EXIT_FASTPATH_REENTER_GUEST; 5728 } 5729 5730 return EXIT_FASTPATH_NONE; 5731 } 5732 5733 static int handle_preemption_timer(struct kvm_vcpu *vcpu) 5734 { 5735 handle_fastpath_preemption_timer(vcpu); 5736 return 1; 5737 } 5738 5739 /* 5740 * When nested=0, all VMX instruction VM Exits filter here. The handlers 5741 * are overwritten by nested_vmx_setup() when nested=1. 5742 */ 5743 static int handle_vmx_instruction(struct kvm_vcpu *vcpu) 5744 { 5745 kvm_queue_exception(vcpu, UD_VECTOR); 5746 return 1; 5747 } 5748 5749 #ifndef CONFIG_X86_SGX_KVM 5750 static int handle_encls(struct kvm_vcpu *vcpu) 5751 { 5752 /* 5753 * SGX virtualization is disabled. There is no software enable bit for 5754 * SGX, so KVM intercepts all ENCLS leafs and injects a #UD to prevent 5755 * the guest from executing ENCLS (when SGX is supported by hardware). 5756 */ 5757 kvm_queue_exception(vcpu, UD_VECTOR); 5758 return 1; 5759 } 5760 #endif /* CONFIG_X86_SGX_KVM */ 5761 5762 static int handle_bus_lock_vmexit(struct kvm_vcpu *vcpu) 5763 { 5764 /* 5765 * Hardware may or may not set the BUS_LOCK_DETECTED flag on BUS_LOCK 5766 * VM-Exits. Unconditionally set the flag here and leave the handling to 5767 * vmx_handle_exit(). 5768 */ 5769 to_vmx(vcpu)->exit_reason.bus_lock_detected = true; 5770 return 1; 5771 } 5772 5773 /* 5774 * The exit handlers return 1 if the exit was handled fully and guest execution 5775 * may resume. Otherwise they set the kvm_run parameter to indicate what needs 5776 * to be done to userspace and return 0. 5777 */ 5778 static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { 5779 [EXIT_REASON_EXCEPTION_NMI] = handle_exception_nmi, 5780 [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, 5781 [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault, 5782 [EXIT_REASON_NMI_WINDOW] = handle_nmi_window, 5783 [EXIT_REASON_IO_INSTRUCTION] = handle_io, 5784 [EXIT_REASON_CR_ACCESS] = handle_cr, 5785 [EXIT_REASON_DR_ACCESS] = handle_dr, 5786 [EXIT_REASON_CPUID] = kvm_emulate_cpuid, 5787 [EXIT_REASON_MSR_READ] = kvm_emulate_rdmsr, 5788 [EXIT_REASON_MSR_WRITE] = kvm_emulate_wrmsr, 5789 [EXIT_REASON_INTERRUPT_WINDOW] = handle_interrupt_window, 5790 [EXIT_REASON_HLT] = kvm_emulate_halt, 5791 [EXIT_REASON_INVD] = kvm_emulate_invd, 5792 [EXIT_REASON_INVLPG] = handle_invlpg, 5793 [EXIT_REASON_RDPMC] = kvm_emulate_rdpmc, 5794 [EXIT_REASON_VMCALL] = kvm_emulate_hypercall, 5795 [EXIT_REASON_VMCLEAR] = handle_vmx_instruction, 5796 [EXIT_REASON_VMLAUNCH] = handle_vmx_instruction, 5797 [EXIT_REASON_VMPTRLD] = handle_vmx_instruction, 5798 [EXIT_REASON_VMPTRST] = handle_vmx_instruction, 5799 [EXIT_REASON_VMREAD] = handle_vmx_instruction, 5800 [EXIT_REASON_VMRESUME] = handle_vmx_instruction, 5801 [EXIT_REASON_VMWRITE] = handle_vmx_instruction, 5802 [EXIT_REASON_VMOFF] = handle_vmx_instruction, 5803 [EXIT_REASON_VMON] = handle_vmx_instruction, 5804 [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold, 5805 [EXIT_REASON_APIC_ACCESS] = handle_apic_access, 5806 [EXIT_REASON_APIC_WRITE] = handle_apic_write, 5807 [EXIT_REASON_EOI_INDUCED] = handle_apic_eoi_induced, 5808 [EXIT_REASON_WBINVD] = kvm_emulate_wbinvd, 5809 [EXIT_REASON_XSETBV] = kvm_emulate_xsetbv, 5810 [EXIT_REASON_TASK_SWITCH] = handle_task_switch, 5811 [EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check, 5812 [EXIT_REASON_GDTR_IDTR] = handle_desc, 5813 [EXIT_REASON_LDTR_TR] = handle_desc, 5814 [EXIT_REASON_EPT_VIOLATION] = handle_ept_violation, 5815 [EXIT_REASON_EPT_MISCONFIG] = handle_ept_misconfig, 5816 [EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause, 5817 [EXIT_REASON_MWAIT_INSTRUCTION] = kvm_emulate_mwait, 5818 [EXIT_REASON_MONITOR_TRAP_FLAG] = handle_monitor_trap, 5819 [EXIT_REASON_MONITOR_INSTRUCTION] = kvm_emulate_monitor, 5820 [EXIT_REASON_INVEPT] = handle_vmx_instruction, 5821 [EXIT_REASON_INVVPID] = handle_vmx_instruction, 5822 [EXIT_REASON_RDRAND] = kvm_handle_invalid_op, 5823 [EXIT_REASON_RDSEED] = kvm_handle_invalid_op, 5824 [EXIT_REASON_PML_FULL] = handle_pml_full, 5825 [EXIT_REASON_INVPCID] = handle_invpcid, 5826 [EXIT_REASON_VMFUNC] = handle_vmx_instruction, 5827 [EXIT_REASON_PREEMPTION_TIMER] = handle_preemption_timer, 5828 [EXIT_REASON_ENCLS] = handle_encls, 5829 [EXIT_REASON_BUS_LOCK] = handle_bus_lock_vmexit, 5830 }; 5831 5832 static const int kvm_vmx_max_exit_handlers = 5833 ARRAY_SIZE(kvm_vmx_exit_handlers); 5834 5835 static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason, 5836 u64 *info1, u64 *info2, 5837 u32 *intr_info, u32 *error_code) 5838 { 5839 struct vcpu_vmx *vmx = to_vmx(vcpu); 5840 5841 *reason = vmx->exit_reason.full; 5842 *info1 = vmx_get_exit_qual(vcpu); 5843 if (!(vmx->exit_reason.failed_vmentry)) { 5844 *info2 = vmx->idt_vectoring_info; 5845 *intr_info = vmx_get_intr_info(vcpu); 5846 if (is_exception_with_error_code(*intr_info)) 5847 *error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE); 5848 else 5849 *error_code = 0; 5850 } else { 5851 *info2 = 0; 5852 *intr_info = 0; 5853 *error_code = 0; 5854 } 5855 } 5856 5857 static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx) 5858 { 5859 if (vmx->pml_pg) { 5860 __free_page(vmx->pml_pg); 5861 vmx->pml_pg = NULL; 5862 } 5863 } 5864 5865 static void vmx_flush_pml_buffer(struct kvm_vcpu *vcpu) 5866 { 5867 struct vcpu_vmx *vmx = to_vmx(vcpu); 5868 u64 *pml_buf; 5869 u16 pml_idx; 5870 5871 pml_idx = vmcs_read16(GUEST_PML_INDEX); 5872 5873 /* Do nothing if PML buffer is empty */ 5874 if (pml_idx == (PML_ENTITY_NUM - 1)) 5875 return; 5876 5877 /* PML index always points to next available PML buffer entity */ 5878 if (pml_idx >= PML_ENTITY_NUM) 5879 pml_idx = 0; 5880 else 5881 pml_idx++; 5882 5883 pml_buf = page_address(vmx->pml_pg); 5884 for (; pml_idx < PML_ENTITY_NUM; pml_idx++) { 5885 u64 gpa; 5886 5887 gpa = pml_buf[pml_idx]; 5888 WARN_ON(gpa & (PAGE_SIZE - 1)); 5889 kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT); 5890 } 5891 5892 /* reset PML index */ 5893 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); 5894 } 5895 5896 static void vmx_dump_sel(char *name, uint32_t sel) 5897 { 5898 pr_err("%s sel=0x%04x, attr=0x%05x, limit=0x%08x, base=0x%016lx\n", 5899 name, vmcs_read16(sel), 5900 vmcs_read32(sel + GUEST_ES_AR_BYTES - GUEST_ES_SELECTOR), 5901 vmcs_read32(sel + GUEST_ES_LIMIT - GUEST_ES_SELECTOR), 5902 vmcs_readl(sel + GUEST_ES_BASE - GUEST_ES_SELECTOR)); 5903 } 5904 5905 static void vmx_dump_dtsel(char *name, uint32_t limit) 5906 { 5907 pr_err("%s limit=0x%08x, base=0x%016lx\n", 5908 name, vmcs_read32(limit), 5909 vmcs_readl(limit + GUEST_GDTR_BASE - GUEST_GDTR_LIMIT)); 5910 } 5911 5912 static void vmx_dump_msrs(char *name, struct vmx_msrs *m) 5913 { 5914 unsigned int i; 5915 struct vmx_msr_entry *e; 5916 5917 pr_err("MSR %s:\n", name); 5918 for (i = 0, e = m->val; i < m->nr; ++i, ++e) 5919 pr_err(" %2d: msr=0x%08x value=0x%016llx\n", i, e->index, e->value); 5920 } 5921 5922 void dump_vmcs(struct kvm_vcpu *vcpu) 5923 { 5924 struct vcpu_vmx *vmx = to_vmx(vcpu); 5925 u32 vmentry_ctl, vmexit_ctl; 5926 u32 cpu_based_exec_ctrl, pin_based_exec_ctrl, secondary_exec_control; 5927 unsigned long cr4; 5928 int efer_slot; 5929 5930 if (!dump_invalid_vmcs) { 5931 pr_warn_ratelimited("set kvm_intel.dump_invalid_vmcs=1 to dump internal KVM state.\n"); 5932 return; 5933 } 5934 5935 vmentry_ctl = vmcs_read32(VM_ENTRY_CONTROLS); 5936 vmexit_ctl = vmcs_read32(VM_EXIT_CONTROLS); 5937 cpu_based_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); 5938 pin_based_exec_ctrl = vmcs_read32(PIN_BASED_VM_EXEC_CONTROL); 5939 cr4 = vmcs_readl(GUEST_CR4); 5940 secondary_exec_control = 0; 5941 if (cpu_has_secondary_exec_ctrls()) 5942 secondary_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); 5943 5944 pr_err("VMCS %p, last attempted VM-entry on CPU %d\n", 5945 vmx->loaded_vmcs->vmcs, vcpu->arch.last_vmentry_cpu); 5946 pr_err("*** Guest State ***\n"); 5947 pr_err("CR0: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n", 5948 vmcs_readl(GUEST_CR0), vmcs_readl(CR0_READ_SHADOW), 5949 vmcs_readl(CR0_GUEST_HOST_MASK)); 5950 pr_err("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n", 5951 cr4, vmcs_readl(CR4_READ_SHADOW), vmcs_readl(CR4_GUEST_HOST_MASK)); 5952 pr_err("CR3 = 0x%016lx\n", vmcs_readl(GUEST_CR3)); 5953 if (cpu_has_vmx_ept()) { 5954 pr_err("PDPTR0 = 0x%016llx PDPTR1 = 0x%016llx\n", 5955 vmcs_read64(GUEST_PDPTR0), vmcs_read64(GUEST_PDPTR1)); 5956 pr_err("PDPTR2 = 0x%016llx PDPTR3 = 0x%016llx\n", 5957 vmcs_read64(GUEST_PDPTR2), vmcs_read64(GUEST_PDPTR3)); 5958 } 5959 pr_err("RSP = 0x%016lx RIP = 0x%016lx\n", 5960 vmcs_readl(GUEST_RSP), vmcs_readl(GUEST_RIP)); 5961 pr_err("RFLAGS=0x%08lx DR7 = 0x%016lx\n", 5962 vmcs_readl(GUEST_RFLAGS), vmcs_readl(GUEST_DR7)); 5963 pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n", 5964 vmcs_readl(GUEST_SYSENTER_ESP), 5965 vmcs_read32(GUEST_SYSENTER_CS), vmcs_readl(GUEST_SYSENTER_EIP)); 5966 vmx_dump_sel("CS: ", GUEST_CS_SELECTOR); 5967 vmx_dump_sel("DS: ", GUEST_DS_SELECTOR); 5968 vmx_dump_sel("SS: ", GUEST_SS_SELECTOR); 5969 vmx_dump_sel("ES: ", GUEST_ES_SELECTOR); 5970 vmx_dump_sel("FS: ", GUEST_FS_SELECTOR); 5971 vmx_dump_sel("GS: ", GUEST_GS_SELECTOR); 5972 vmx_dump_dtsel("GDTR:", GUEST_GDTR_LIMIT); 5973 vmx_dump_sel("LDTR:", GUEST_LDTR_SELECTOR); 5974 vmx_dump_dtsel("IDTR:", GUEST_IDTR_LIMIT); 5975 vmx_dump_sel("TR: ", GUEST_TR_SELECTOR); 5976 efer_slot = vmx_find_loadstore_msr_slot(&vmx->msr_autoload.guest, MSR_EFER); 5977 if (vmentry_ctl & VM_ENTRY_LOAD_IA32_EFER) 5978 pr_err("EFER= 0x%016llx\n", vmcs_read64(GUEST_IA32_EFER)); 5979 else if (efer_slot >= 0) 5980 pr_err("EFER= 0x%016llx (autoload)\n", 5981 vmx->msr_autoload.guest.val[efer_slot].value); 5982 else if (vmentry_ctl & VM_ENTRY_IA32E_MODE) 5983 pr_err("EFER= 0x%016llx (effective)\n", 5984 vcpu->arch.efer | (EFER_LMA | EFER_LME)); 5985 else 5986 pr_err("EFER= 0x%016llx (effective)\n", 5987 vcpu->arch.efer & ~(EFER_LMA | EFER_LME)); 5988 if (vmentry_ctl & VM_ENTRY_LOAD_IA32_PAT) 5989 pr_err("PAT = 0x%016llx\n", vmcs_read64(GUEST_IA32_PAT)); 5990 pr_err("DebugCtl = 0x%016llx DebugExceptions = 0x%016lx\n", 5991 vmcs_read64(GUEST_IA32_DEBUGCTL), 5992 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS)); 5993 if (cpu_has_load_perf_global_ctrl() && 5994 vmentry_ctl & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) 5995 pr_err("PerfGlobCtl = 0x%016llx\n", 5996 vmcs_read64(GUEST_IA32_PERF_GLOBAL_CTRL)); 5997 if (vmentry_ctl & VM_ENTRY_LOAD_BNDCFGS) 5998 pr_err("BndCfgS = 0x%016llx\n", vmcs_read64(GUEST_BNDCFGS)); 5999 pr_err("Interruptibility = %08x ActivityState = %08x\n", 6000 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO), 6001 vmcs_read32(GUEST_ACTIVITY_STATE)); 6002 if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) 6003 pr_err("InterruptStatus = %04x\n", 6004 vmcs_read16(GUEST_INTR_STATUS)); 6005 if (vmcs_read32(VM_ENTRY_MSR_LOAD_COUNT) > 0) 6006 vmx_dump_msrs("guest autoload", &vmx->msr_autoload.guest); 6007 if (vmcs_read32(VM_EXIT_MSR_STORE_COUNT) > 0) 6008 vmx_dump_msrs("guest autostore", &vmx->msr_autostore.guest); 6009 6010 pr_err("*** Host State ***\n"); 6011 pr_err("RIP = 0x%016lx RSP = 0x%016lx\n", 6012 vmcs_readl(HOST_RIP), vmcs_readl(HOST_RSP)); 6013 pr_err("CS=%04x SS=%04x DS=%04x ES=%04x FS=%04x GS=%04x TR=%04x\n", 6014 vmcs_read16(HOST_CS_SELECTOR), vmcs_read16(HOST_SS_SELECTOR), 6015 vmcs_read16(HOST_DS_SELECTOR), vmcs_read16(HOST_ES_SELECTOR), 6016 vmcs_read16(HOST_FS_SELECTOR), vmcs_read16(HOST_GS_SELECTOR), 6017 vmcs_read16(HOST_TR_SELECTOR)); 6018 pr_err("FSBase=%016lx GSBase=%016lx TRBase=%016lx\n", 6019 vmcs_readl(HOST_FS_BASE), vmcs_readl(HOST_GS_BASE), 6020 vmcs_readl(HOST_TR_BASE)); 6021 pr_err("GDTBase=%016lx IDTBase=%016lx\n", 6022 vmcs_readl(HOST_GDTR_BASE), vmcs_readl(HOST_IDTR_BASE)); 6023 pr_err("CR0=%016lx CR3=%016lx CR4=%016lx\n", 6024 vmcs_readl(HOST_CR0), vmcs_readl(HOST_CR3), 6025 vmcs_readl(HOST_CR4)); 6026 pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n", 6027 vmcs_readl(HOST_IA32_SYSENTER_ESP), 6028 vmcs_read32(HOST_IA32_SYSENTER_CS), 6029 vmcs_readl(HOST_IA32_SYSENTER_EIP)); 6030 if (vmexit_ctl & VM_EXIT_LOAD_IA32_EFER) 6031 pr_err("EFER= 0x%016llx\n", vmcs_read64(HOST_IA32_EFER)); 6032 if (vmexit_ctl & VM_EXIT_LOAD_IA32_PAT) 6033 pr_err("PAT = 0x%016llx\n", vmcs_read64(HOST_IA32_PAT)); 6034 if (cpu_has_load_perf_global_ctrl() && 6035 vmexit_ctl & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) 6036 pr_err("PerfGlobCtl = 0x%016llx\n", 6037 vmcs_read64(HOST_IA32_PERF_GLOBAL_CTRL)); 6038 if (vmcs_read32(VM_EXIT_MSR_LOAD_COUNT) > 0) 6039 vmx_dump_msrs("host autoload", &vmx->msr_autoload.host); 6040 6041 pr_err("*** Control State ***\n"); 6042 pr_err("PinBased=%08x CPUBased=%08x SecondaryExec=%08x\n", 6043 pin_based_exec_ctrl, cpu_based_exec_ctrl, secondary_exec_control); 6044 pr_err("EntryControls=%08x ExitControls=%08x\n", vmentry_ctl, vmexit_ctl); 6045 pr_err("ExceptionBitmap=%08x PFECmask=%08x PFECmatch=%08x\n", 6046 vmcs_read32(EXCEPTION_BITMAP), 6047 vmcs_read32(PAGE_FAULT_ERROR_CODE_MASK), 6048 vmcs_read32(PAGE_FAULT_ERROR_CODE_MATCH)); 6049 pr_err("VMEntry: intr_info=%08x errcode=%08x ilen=%08x\n", 6050 vmcs_read32(VM_ENTRY_INTR_INFO_FIELD), 6051 vmcs_read32(VM_ENTRY_EXCEPTION_ERROR_CODE), 6052 vmcs_read32(VM_ENTRY_INSTRUCTION_LEN)); 6053 pr_err("VMExit: intr_info=%08x errcode=%08x ilen=%08x\n", 6054 vmcs_read32(VM_EXIT_INTR_INFO), 6055 vmcs_read32(VM_EXIT_INTR_ERROR_CODE), 6056 vmcs_read32(VM_EXIT_INSTRUCTION_LEN)); 6057 pr_err(" reason=%08x qualification=%016lx\n", 6058 vmcs_read32(VM_EXIT_REASON), vmcs_readl(EXIT_QUALIFICATION)); 6059 pr_err("IDTVectoring: info=%08x errcode=%08x\n", 6060 vmcs_read32(IDT_VECTORING_INFO_FIELD), 6061 vmcs_read32(IDT_VECTORING_ERROR_CODE)); 6062 pr_err("TSC Offset = 0x%016llx\n", vmcs_read64(TSC_OFFSET)); 6063 if (secondary_exec_control & SECONDARY_EXEC_TSC_SCALING) 6064 pr_err("TSC Multiplier = 0x%016llx\n", 6065 vmcs_read64(TSC_MULTIPLIER)); 6066 if (cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW) { 6067 if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) { 6068 u16 status = vmcs_read16(GUEST_INTR_STATUS); 6069 pr_err("SVI|RVI = %02x|%02x ", status >> 8, status & 0xff); 6070 } 6071 pr_cont("TPR Threshold = 0x%02x\n", vmcs_read32(TPR_THRESHOLD)); 6072 if (secondary_exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) 6073 pr_err("APIC-access addr = 0x%016llx ", vmcs_read64(APIC_ACCESS_ADDR)); 6074 pr_cont("virt-APIC addr = 0x%016llx\n", vmcs_read64(VIRTUAL_APIC_PAGE_ADDR)); 6075 } 6076 if (pin_based_exec_ctrl & PIN_BASED_POSTED_INTR) 6077 pr_err("PostedIntrVec = 0x%02x\n", vmcs_read16(POSTED_INTR_NV)); 6078 if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT)) 6079 pr_err("EPT pointer = 0x%016llx\n", vmcs_read64(EPT_POINTER)); 6080 if (secondary_exec_control & SECONDARY_EXEC_PAUSE_LOOP_EXITING) 6081 pr_err("PLE Gap=%08x Window=%08x\n", 6082 vmcs_read32(PLE_GAP), vmcs_read32(PLE_WINDOW)); 6083 if (secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID) 6084 pr_err("Virtual processor ID = 0x%04x\n", 6085 vmcs_read16(VIRTUAL_PROCESSOR_ID)); 6086 } 6087 6088 /* 6089 * The guest has exited. See if we can fix it or if we need userspace 6090 * assistance. 6091 */ 6092 static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) 6093 { 6094 struct vcpu_vmx *vmx = to_vmx(vcpu); 6095 union vmx_exit_reason exit_reason = vmx->exit_reason; 6096 u32 vectoring_info = vmx->idt_vectoring_info; 6097 u16 exit_handler_index; 6098 6099 /* 6100 * Flush logged GPAs PML buffer, this will make dirty_bitmap more 6101 * updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before 6102 * querying dirty_bitmap, we only need to kick all vcpus out of guest 6103 * mode as if vcpus is in root mode, the PML buffer must has been 6104 * flushed already. Note, PML is never enabled in hardware while 6105 * running L2. 6106 */ 6107 if (enable_pml && !is_guest_mode(vcpu)) 6108 vmx_flush_pml_buffer(vcpu); 6109 6110 /* 6111 * KVM should never reach this point with a pending nested VM-Enter. 6112 * More specifically, short-circuiting VM-Entry to emulate L2 due to 6113 * invalid guest state should never happen as that means KVM knowingly 6114 * allowed a nested VM-Enter with an invalid vmcs12. More below. 6115 */ 6116 if (KVM_BUG_ON(vmx->nested.nested_run_pending, vcpu->kvm)) 6117 return -EIO; 6118 6119 if (is_guest_mode(vcpu)) { 6120 /* 6121 * PML is never enabled when running L2, bail immediately if a 6122 * PML full exit occurs as something is horribly wrong. 6123 */ 6124 if (exit_reason.basic == EXIT_REASON_PML_FULL) 6125 goto unexpected_vmexit; 6126 6127 /* 6128 * The host physical addresses of some pages of guest memory 6129 * are loaded into the vmcs02 (e.g. vmcs12's Virtual APIC 6130 * Page). The CPU may write to these pages via their host 6131 * physical address while L2 is running, bypassing any 6132 * address-translation-based dirty tracking (e.g. EPT write 6133 * protection). 6134 * 6135 * Mark them dirty on every exit from L2 to prevent them from 6136 * getting out of sync with dirty tracking. 6137 */ 6138 nested_mark_vmcs12_pages_dirty(vcpu); 6139 6140 /* 6141 * Synthesize a triple fault if L2 state is invalid. In normal 6142 * operation, nested VM-Enter rejects any attempt to enter L2 6143 * with invalid state. However, those checks are skipped if 6144 * state is being stuffed via RSM or KVM_SET_NESTED_STATE. If 6145 * L2 state is invalid, it means either L1 modified SMRAM state 6146 * or userspace provided bad state. Synthesize TRIPLE_FAULT as 6147 * doing so is architecturally allowed in the RSM case, and is 6148 * the least awful solution for the userspace case without 6149 * risking false positives. 6150 */ 6151 if (vmx->emulation_required) { 6152 nested_vmx_vmexit(vcpu, EXIT_REASON_TRIPLE_FAULT, 0, 0); 6153 return 1; 6154 } 6155 6156 if (nested_vmx_reflect_vmexit(vcpu)) 6157 return 1; 6158 } 6159 6160 /* If guest state is invalid, start emulating. L2 is handled above. */ 6161 if (vmx->emulation_required) 6162 return handle_invalid_guest_state(vcpu); 6163 6164 if (exit_reason.failed_vmentry) { 6165 dump_vmcs(vcpu); 6166 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; 6167 vcpu->run->fail_entry.hardware_entry_failure_reason 6168 = exit_reason.full; 6169 vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu; 6170 return 0; 6171 } 6172 6173 if (unlikely(vmx->fail)) { 6174 dump_vmcs(vcpu); 6175 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; 6176 vcpu->run->fail_entry.hardware_entry_failure_reason 6177 = vmcs_read32(VM_INSTRUCTION_ERROR); 6178 vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu; 6179 return 0; 6180 } 6181 6182 /* 6183 * Note: 6184 * Do not try to fix EXIT_REASON_EPT_MISCONFIG if it caused by 6185 * delivery event since it indicates guest is accessing MMIO. 6186 * The vm-exit can be triggered again after return to guest that 6187 * will cause infinite loop. 6188 */ 6189 if ((vectoring_info & VECTORING_INFO_VALID_MASK) && 6190 (exit_reason.basic != EXIT_REASON_EXCEPTION_NMI && 6191 exit_reason.basic != EXIT_REASON_EPT_VIOLATION && 6192 exit_reason.basic != EXIT_REASON_PML_FULL && 6193 exit_reason.basic != EXIT_REASON_APIC_ACCESS && 6194 exit_reason.basic != EXIT_REASON_TASK_SWITCH)) { 6195 int ndata = 3; 6196 6197 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 6198 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV; 6199 vcpu->run->internal.data[0] = vectoring_info; 6200 vcpu->run->internal.data[1] = exit_reason.full; 6201 vcpu->run->internal.data[2] = vcpu->arch.exit_qualification; 6202 if (exit_reason.basic == EXIT_REASON_EPT_MISCONFIG) { 6203 vcpu->run->internal.data[ndata++] = 6204 vmcs_read64(GUEST_PHYSICAL_ADDRESS); 6205 } 6206 vcpu->run->internal.data[ndata++] = vcpu->arch.last_vmentry_cpu; 6207 vcpu->run->internal.ndata = ndata; 6208 return 0; 6209 } 6210 6211 if (unlikely(!enable_vnmi && 6212 vmx->loaded_vmcs->soft_vnmi_blocked)) { 6213 if (!vmx_interrupt_blocked(vcpu)) { 6214 vmx->loaded_vmcs->soft_vnmi_blocked = 0; 6215 } else if (vmx->loaded_vmcs->vnmi_blocked_time > 1000000000LL && 6216 vcpu->arch.nmi_pending) { 6217 /* 6218 * This CPU don't support us in finding the end of an 6219 * NMI-blocked window if the guest runs with IRQs 6220 * disabled. So we pull the trigger after 1 s of 6221 * futile waiting, but inform the user about this. 6222 */ 6223 printk(KERN_WARNING "%s: Breaking out of NMI-blocked " 6224 "state on VCPU %d after 1 s timeout\n", 6225 __func__, vcpu->vcpu_id); 6226 vmx->loaded_vmcs->soft_vnmi_blocked = 0; 6227 } 6228 } 6229 6230 if (exit_fastpath != EXIT_FASTPATH_NONE) 6231 return 1; 6232 6233 if (exit_reason.basic >= kvm_vmx_max_exit_handlers) 6234 goto unexpected_vmexit; 6235 #ifdef CONFIG_RETPOLINE 6236 if (exit_reason.basic == EXIT_REASON_MSR_WRITE) 6237 return kvm_emulate_wrmsr(vcpu); 6238 else if (exit_reason.basic == EXIT_REASON_PREEMPTION_TIMER) 6239 return handle_preemption_timer(vcpu); 6240 else if (exit_reason.basic == EXIT_REASON_INTERRUPT_WINDOW) 6241 return handle_interrupt_window(vcpu); 6242 else if (exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT) 6243 return handle_external_interrupt(vcpu); 6244 else if (exit_reason.basic == EXIT_REASON_HLT) 6245 return kvm_emulate_halt(vcpu); 6246 else if (exit_reason.basic == EXIT_REASON_EPT_MISCONFIG) 6247 return handle_ept_misconfig(vcpu); 6248 #endif 6249 6250 exit_handler_index = array_index_nospec((u16)exit_reason.basic, 6251 kvm_vmx_max_exit_handlers); 6252 if (!kvm_vmx_exit_handlers[exit_handler_index]) 6253 goto unexpected_vmexit; 6254 6255 return kvm_vmx_exit_handlers[exit_handler_index](vcpu); 6256 6257 unexpected_vmexit: 6258 vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n", 6259 exit_reason.full); 6260 dump_vmcs(vcpu); 6261 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 6262 vcpu->run->internal.suberror = 6263 KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON; 6264 vcpu->run->internal.ndata = 2; 6265 vcpu->run->internal.data[0] = exit_reason.full; 6266 vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu; 6267 return 0; 6268 } 6269 6270 static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) 6271 { 6272 int ret = __vmx_handle_exit(vcpu, exit_fastpath); 6273 6274 /* 6275 * Exit to user space when bus lock detected to inform that there is 6276 * a bus lock in guest. 6277 */ 6278 if (to_vmx(vcpu)->exit_reason.bus_lock_detected) { 6279 if (ret > 0) 6280 vcpu->run->exit_reason = KVM_EXIT_X86_BUS_LOCK; 6281 6282 vcpu->run->flags |= KVM_RUN_X86_BUS_LOCK; 6283 return 0; 6284 } 6285 return ret; 6286 } 6287 6288 /* 6289 * Software based L1D cache flush which is used when microcode providing 6290 * the cache control MSR is not loaded. 6291 * 6292 * The L1D cache is 32 KiB on Nehalem and later microarchitectures, but to 6293 * flush it is required to read in 64 KiB because the replacement algorithm 6294 * is not exactly LRU. This could be sized at runtime via topology 6295 * information but as all relevant affected CPUs have 32KiB L1D cache size 6296 * there is no point in doing so. 6297 */ 6298 static noinstr void vmx_l1d_flush(struct kvm_vcpu *vcpu) 6299 { 6300 int size = PAGE_SIZE << L1D_CACHE_ORDER; 6301 6302 /* 6303 * This code is only executed when the flush mode is 'cond' or 6304 * 'always' 6305 */ 6306 if (static_branch_likely(&vmx_l1d_flush_cond)) { 6307 bool flush_l1d; 6308 6309 /* 6310 * Clear the per-vcpu flush bit, it gets set again 6311 * either from vcpu_run() or from one of the unsafe 6312 * VMEXIT handlers. 6313 */ 6314 flush_l1d = vcpu->arch.l1tf_flush_l1d; 6315 vcpu->arch.l1tf_flush_l1d = false; 6316 6317 /* 6318 * Clear the per-cpu flush bit, it gets set again from 6319 * the interrupt handlers. 6320 */ 6321 flush_l1d |= kvm_get_cpu_l1tf_flush_l1d(); 6322 kvm_clear_cpu_l1tf_flush_l1d(); 6323 6324 if (!flush_l1d) 6325 return; 6326 } 6327 6328 vcpu->stat.l1d_flush++; 6329 6330 if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) { 6331 native_wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH); 6332 return; 6333 } 6334 6335 asm volatile( 6336 /* First ensure the pages are in the TLB */ 6337 "xorl %%eax, %%eax\n" 6338 ".Lpopulate_tlb:\n\t" 6339 "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t" 6340 "addl $4096, %%eax\n\t" 6341 "cmpl %%eax, %[size]\n\t" 6342 "jne .Lpopulate_tlb\n\t" 6343 "xorl %%eax, %%eax\n\t" 6344 "cpuid\n\t" 6345 /* Now fill the cache */ 6346 "xorl %%eax, %%eax\n" 6347 ".Lfill_cache:\n" 6348 "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t" 6349 "addl $64, %%eax\n\t" 6350 "cmpl %%eax, %[size]\n\t" 6351 "jne .Lfill_cache\n\t" 6352 "lfence\n" 6353 :: [flush_pages] "r" (vmx_l1d_flush_pages), 6354 [size] "r" (size) 6355 : "eax", "ebx", "ecx", "edx"); 6356 } 6357 6358 static void vmx_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) 6359 { 6360 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 6361 int tpr_threshold; 6362 6363 if (is_guest_mode(vcpu) && 6364 nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) 6365 return; 6366 6367 tpr_threshold = (irr == -1 || tpr < irr) ? 0 : irr; 6368 if (is_guest_mode(vcpu)) 6369 to_vmx(vcpu)->nested.l1_tpr_threshold = tpr_threshold; 6370 else 6371 vmcs_write32(TPR_THRESHOLD, tpr_threshold); 6372 } 6373 6374 void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu) 6375 { 6376 struct vcpu_vmx *vmx = to_vmx(vcpu); 6377 u32 sec_exec_control; 6378 6379 if (!lapic_in_kernel(vcpu)) 6380 return; 6381 6382 if (!flexpriority_enabled && 6383 !cpu_has_vmx_virtualize_x2apic_mode()) 6384 return; 6385 6386 /* Postpone execution until vmcs01 is the current VMCS. */ 6387 if (is_guest_mode(vcpu)) { 6388 vmx->nested.change_vmcs01_virtual_apic_mode = true; 6389 return; 6390 } 6391 6392 sec_exec_control = secondary_exec_controls_get(vmx); 6393 sec_exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 6394 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE); 6395 6396 switch (kvm_get_apic_mode(vcpu)) { 6397 case LAPIC_MODE_INVALID: 6398 WARN_ONCE(true, "Invalid local APIC state"); 6399 break; 6400 case LAPIC_MODE_DISABLED: 6401 break; 6402 case LAPIC_MODE_XAPIC: 6403 if (flexpriority_enabled) { 6404 sec_exec_control |= 6405 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; 6406 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); 6407 6408 /* 6409 * Flush the TLB, reloading the APIC access page will 6410 * only do so if its physical address has changed, but 6411 * the guest may have inserted a non-APIC mapping into 6412 * the TLB while the APIC access page was disabled. 6413 */ 6414 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); 6415 } 6416 break; 6417 case LAPIC_MODE_X2APIC: 6418 if (cpu_has_vmx_virtualize_x2apic_mode()) 6419 sec_exec_control |= 6420 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; 6421 break; 6422 } 6423 secondary_exec_controls_set(vmx, sec_exec_control); 6424 6425 vmx_update_msr_bitmap_x2apic(vcpu); 6426 } 6427 6428 static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu) 6429 { 6430 struct page *page; 6431 6432 /* Defer reload until vmcs01 is the current VMCS. */ 6433 if (is_guest_mode(vcpu)) { 6434 to_vmx(vcpu)->nested.reload_vmcs01_apic_access_page = true; 6435 return; 6436 } 6437 6438 if (!(secondary_exec_controls_get(to_vmx(vcpu)) & 6439 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) 6440 return; 6441 6442 page = gfn_to_page(vcpu->kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT); 6443 if (is_error_page(page)) 6444 return; 6445 6446 vmcs_write64(APIC_ACCESS_ADDR, page_to_phys(page)); 6447 vmx_flush_tlb_current(vcpu); 6448 6449 /* 6450 * Do not pin apic access page in memory, the MMU notifier 6451 * will call us again if it is migrated or swapped out. 6452 */ 6453 put_page(page); 6454 } 6455 6456 static void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr) 6457 { 6458 u16 status; 6459 u8 old; 6460 6461 if (max_isr == -1) 6462 max_isr = 0; 6463 6464 status = vmcs_read16(GUEST_INTR_STATUS); 6465 old = status >> 8; 6466 if (max_isr != old) { 6467 status &= 0xff; 6468 status |= max_isr << 8; 6469 vmcs_write16(GUEST_INTR_STATUS, status); 6470 } 6471 } 6472 6473 static void vmx_set_rvi(int vector) 6474 { 6475 u16 status; 6476 u8 old; 6477 6478 if (vector == -1) 6479 vector = 0; 6480 6481 status = vmcs_read16(GUEST_INTR_STATUS); 6482 old = (u8)status & 0xff; 6483 if ((u8)vector != old) { 6484 status &= ~0xff; 6485 status |= (u8)vector; 6486 vmcs_write16(GUEST_INTR_STATUS, status); 6487 } 6488 } 6489 6490 static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr) 6491 { 6492 /* 6493 * When running L2, updating RVI is only relevant when 6494 * vmcs12 virtual-interrupt-delivery enabled. 6495 * However, it can be enabled only when L1 also 6496 * intercepts external-interrupts and in that case 6497 * we should not update vmcs02 RVI but instead intercept 6498 * interrupt. Therefore, do nothing when running L2. 6499 */ 6500 if (!is_guest_mode(vcpu)) 6501 vmx_set_rvi(max_irr); 6502 } 6503 6504 static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu) 6505 { 6506 struct vcpu_vmx *vmx = to_vmx(vcpu); 6507 int max_irr; 6508 bool got_posted_interrupt; 6509 6510 if (KVM_BUG_ON(!enable_apicv, vcpu->kvm)) 6511 return -EIO; 6512 6513 if (pi_test_on(&vmx->pi_desc)) { 6514 pi_clear_on(&vmx->pi_desc); 6515 /* 6516 * IOMMU can write to PID.ON, so the barrier matters even on UP. 6517 * But on x86 this is just a compiler barrier anyway. 6518 */ 6519 smp_mb__after_atomic(); 6520 got_posted_interrupt = 6521 kvm_apic_update_irr(vcpu, vmx->pi_desc.pir, &max_irr); 6522 } else { 6523 max_irr = kvm_lapic_find_highest_irr(vcpu); 6524 got_posted_interrupt = false; 6525 } 6526 6527 /* 6528 * Newly recognized interrupts are injected via either virtual interrupt 6529 * delivery (RVI) or KVM_REQ_EVENT. Virtual interrupt delivery is 6530 * disabled in two cases: 6531 * 6532 * 1) If L2 is running and the vCPU has a new pending interrupt. If L1 6533 * wants to exit on interrupts, KVM_REQ_EVENT is needed to synthesize a 6534 * VM-Exit to L1. If L1 doesn't want to exit, the interrupt is injected 6535 * into L2, but KVM doesn't use virtual interrupt delivery to inject 6536 * interrupts into L2, and so KVM_REQ_EVENT is again needed. 6537 * 6538 * 2) If APICv is disabled for this vCPU, assigned devices may still 6539 * attempt to post interrupts. The posted interrupt vector will cause 6540 * a VM-Exit and the subsequent entry will call sync_pir_to_irr. 6541 */ 6542 if (!is_guest_mode(vcpu) && kvm_vcpu_apicv_active(vcpu)) 6543 vmx_set_rvi(max_irr); 6544 else if (got_posted_interrupt) 6545 kvm_make_request(KVM_REQ_EVENT, vcpu); 6546 6547 return max_irr; 6548 } 6549 6550 static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap) 6551 { 6552 if (!kvm_vcpu_apicv_active(vcpu)) 6553 return; 6554 6555 vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]); 6556 vmcs_write64(EOI_EXIT_BITMAP1, eoi_exit_bitmap[1]); 6557 vmcs_write64(EOI_EXIT_BITMAP2, eoi_exit_bitmap[2]); 6558 vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]); 6559 } 6560 6561 static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu) 6562 { 6563 struct vcpu_vmx *vmx = to_vmx(vcpu); 6564 6565 pi_clear_on(&vmx->pi_desc); 6566 memset(vmx->pi_desc.pir, 0, sizeof(vmx->pi_desc.pir)); 6567 } 6568 6569 void vmx_do_interrupt_nmi_irqoff(unsigned long entry); 6570 6571 static void handle_interrupt_nmi_irqoff(struct kvm_vcpu *vcpu, 6572 unsigned long entry) 6573 { 6574 bool is_nmi = entry == (unsigned long)asm_exc_nmi_noist; 6575 6576 kvm_before_interrupt(vcpu, is_nmi ? KVM_HANDLING_NMI : KVM_HANDLING_IRQ); 6577 vmx_do_interrupt_nmi_irqoff(entry); 6578 kvm_after_interrupt(vcpu); 6579 } 6580 6581 static void handle_nm_fault_irqoff(struct kvm_vcpu *vcpu) 6582 { 6583 /* 6584 * Save xfd_err to guest_fpu before interrupt is enabled, so the 6585 * MSR value is not clobbered by the host activity before the guest 6586 * has chance to consume it. 6587 * 6588 * Do not blindly read xfd_err here, since this exception might 6589 * be caused by L1 interception on a platform which doesn't 6590 * support xfd at all. 6591 * 6592 * Do it conditionally upon guest_fpu::xfd. xfd_err matters 6593 * only when xfd contains a non-zero value. 6594 * 6595 * Queuing exception is done in vmx_handle_exit. See comment there. 6596 */ 6597 if (vcpu->arch.guest_fpu.fpstate->xfd) 6598 rdmsrl(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err); 6599 } 6600 6601 static void handle_exception_nmi_irqoff(struct vcpu_vmx *vmx) 6602 { 6603 const unsigned long nmi_entry = (unsigned long)asm_exc_nmi_noist; 6604 u32 intr_info = vmx_get_intr_info(&vmx->vcpu); 6605 6606 /* if exit due to PF check for async PF */ 6607 if (is_page_fault(intr_info)) 6608 vmx->vcpu.arch.apf.host_apf_flags = kvm_read_and_reset_apf_flags(); 6609 /* if exit due to NM, handle before interrupts are enabled */ 6610 else if (is_nm_fault(intr_info)) 6611 handle_nm_fault_irqoff(&vmx->vcpu); 6612 /* Handle machine checks before interrupts are enabled */ 6613 else if (is_machine_check(intr_info)) 6614 kvm_machine_check(); 6615 /* We need to handle NMIs before interrupts are enabled */ 6616 else if (is_nmi(intr_info)) 6617 handle_interrupt_nmi_irqoff(&vmx->vcpu, nmi_entry); 6618 } 6619 6620 static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu) 6621 { 6622 u32 intr_info = vmx_get_intr_info(vcpu); 6623 unsigned int vector = intr_info & INTR_INFO_VECTOR_MASK; 6624 gate_desc *desc = (gate_desc *)host_idt_base + vector; 6625 6626 if (KVM_BUG(!is_external_intr(intr_info), vcpu->kvm, 6627 "KVM: unexpected VM-Exit interrupt info: 0x%x", intr_info)) 6628 return; 6629 6630 handle_interrupt_nmi_irqoff(vcpu, gate_offset(desc)); 6631 vcpu->arch.at_instruction_boundary = true; 6632 } 6633 6634 static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu) 6635 { 6636 struct vcpu_vmx *vmx = to_vmx(vcpu); 6637 6638 if (vmx->emulation_required) 6639 return; 6640 6641 if (vmx->exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT) 6642 handle_external_interrupt_irqoff(vcpu); 6643 else if (vmx->exit_reason.basic == EXIT_REASON_EXCEPTION_NMI) 6644 handle_exception_nmi_irqoff(vmx); 6645 } 6646 6647 /* 6648 * The kvm parameter can be NULL (module initialization, or invocation before 6649 * VM creation). Be sure to check the kvm parameter before using it. 6650 */ 6651 static bool vmx_has_emulated_msr(struct kvm *kvm, u32 index) 6652 { 6653 switch (index) { 6654 case MSR_IA32_SMBASE: 6655 /* 6656 * We cannot do SMM unless we can run the guest in big 6657 * real mode. 6658 */ 6659 return enable_unrestricted_guest || emulate_invalid_guest_state; 6660 case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: 6661 return nested; 6662 case MSR_AMD64_VIRT_SPEC_CTRL: 6663 case MSR_AMD64_TSC_RATIO: 6664 /* This is AMD only. */ 6665 return false; 6666 default: 6667 return true; 6668 } 6669 } 6670 6671 static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx) 6672 { 6673 u32 exit_intr_info; 6674 bool unblock_nmi; 6675 u8 vector; 6676 bool idtv_info_valid; 6677 6678 idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK; 6679 6680 if (enable_vnmi) { 6681 if (vmx->loaded_vmcs->nmi_known_unmasked) 6682 return; 6683 6684 exit_intr_info = vmx_get_intr_info(&vmx->vcpu); 6685 unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0; 6686 vector = exit_intr_info & INTR_INFO_VECTOR_MASK; 6687 /* 6688 * SDM 3: 27.7.1.2 (September 2008) 6689 * Re-set bit "block by NMI" before VM entry if vmexit caused by 6690 * a guest IRET fault. 6691 * SDM 3: 23.2.2 (September 2008) 6692 * Bit 12 is undefined in any of the following cases: 6693 * If the VM exit sets the valid bit in the IDT-vectoring 6694 * information field. 6695 * If the VM exit is due to a double fault. 6696 */ 6697 if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi && 6698 vector != DF_VECTOR && !idtv_info_valid) 6699 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, 6700 GUEST_INTR_STATE_NMI); 6701 else 6702 vmx->loaded_vmcs->nmi_known_unmasked = 6703 !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) 6704 & GUEST_INTR_STATE_NMI); 6705 } else if (unlikely(vmx->loaded_vmcs->soft_vnmi_blocked)) 6706 vmx->loaded_vmcs->vnmi_blocked_time += 6707 ktime_to_ns(ktime_sub(ktime_get(), 6708 vmx->loaded_vmcs->entry_time)); 6709 } 6710 6711 static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu, 6712 u32 idt_vectoring_info, 6713 int instr_len_field, 6714 int error_code_field) 6715 { 6716 u8 vector; 6717 int type; 6718 bool idtv_info_valid; 6719 6720 idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK; 6721 6722 vcpu->arch.nmi_injected = false; 6723 kvm_clear_exception_queue(vcpu); 6724 kvm_clear_interrupt_queue(vcpu); 6725 6726 if (!idtv_info_valid) 6727 return; 6728 6729 kvm_make_request(KVM_REQ_EVENT, vcpu); 6730 6731 vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK; 6732 type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK; 6733 6734 switch (type) { 6735 case INTR_TYPE_NMI_INTR: 6736 vcpu->arch.nmi_injected = true; 6737 /* 6738 * SDM 3: 27.7.1.2 (September 2008) 6739 * Clear bit "block by NMI" before VM entry if a NMI 6740 * delivery faulted. 6741 */ 6742 vmx_set_nmi_mask(vcpu, false); 6743 break; 6744 case INTR_TYPE_SOFT_EXCEPTION: 6745 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); 6746 fallthrough; 6747 case INTR_TYPE_HARD_EXCEPTION: 6748 if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) { 6749 u32 err = vmcs_read32(error_code_field); 6750 kvm_requeue_exception_e(vcpu, vector, err); 6751 } else 6752 kvm_requeue_exception(vcpu, vector); 6753 break; 6754 case INTR_TYPE_SOFT_INTR: 6755 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); 6756 fallthrough; 6757 case INTR_TYPE_EXT_INTR: 6758 kvm_queue_interrupt(vcpu, vector, type == INTR_TYPE_SOFT_INTR); 6759 break; 6760 default: 6761 break; 6762 } 6763 } 6764 6765 static void vmx_complete_interrupts(struct vcpu_vmx *vmx) 6766 { 6767 __vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info, 6768 VM_EXIT_INSTRUCTION_LEN, 6769 IDT_VECTORING_ERROR_CODE); 6770 } 6771 6772 static void vmx_cancel_injection(struct kvm_vcpu *vcpu) 6773 { 6774 __vmx_complete_interrupts(vcpu, 6775 vmcs_read32(VM_ENTRY_INTR_INFO_FIELD), 6776 VM_ENTRY_INSTRUCTION_LEN, 6777 VM_ENTRY_EXCEPTION_ERROR_CODE); 6778 6779 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); 6780 } 6781 6782 static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx) 6783 { 6784 int i, nr_msrs; 6785 struct perf_guest_switch_msr *msrs; 6786 6787 /* Note, nr_msrs may be garbage if perf_guest_get_msrs() returns NULL. */ 6788 msrs = perf_guest_get_msrs(&nr_msrs); 6789 if (!msrs) 6790 return; 6791 6792 for (i = 0; i < nr_msrs; i++) 6793 if (msrs[i].host == msrs[i].guest) 6794 clear_atomic_switch_msr(vmx, msrs[i].msr); 6795 else 6796 add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest, 6797 msrs[i].host, false); 6798 } 6799 6800 static void vmx_update_hv_timer(struct kvm_vcpu *vcpu) 6801 { 6802 struct vcpu_vmx *vmx = to_vmx(vcpu); 6803 u64 tscl; 6804 u32 delta_tsc; 6805 6806 if (vmx->req_immediate_exit) { 6807 vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, 0); 6808 vmx->loaded_vmcs->hv_timer_soft_disabled = false; 6809 } else if (vmx->hv_deadline_tsc != -1) { 6810 tscl = rdtsc(); 6811 if (vmx->hv_deadline_tsc > tscl) 6812 /* set_hv_timer ensures the delta fits in 32-bits */ 6813 delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >> 6814 cpu_preemption_timer_multi); 6815 else 6816 delta_tsc = 0; 6817 6818 vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, delta_tsc); 6819 vmx->loaded_vmcs->hv_timer_soft_disabled = false; 6820 } else if (!vmx->loaded_vmcs->hv_timer_soft_disabled) { 6821 vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, -1); 6822 vmx->loaded_vmcs->hv_timer_soft_disabled = true; 6823 } 6824 } 6825 6826 void noinstr vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp) 6827 { 6828 if (unlikely(host_rsp != vmx->loaded_vmcs->host_state.rsp)) { 6829 vmx->loaded_vmcs->host_state.rsp = host_rsp; 6830 vmcs_writel(HOST_RSP, host_rsp); 6831 } 6832 } 6833 6834 void noinstr vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx, 6835 unsigned int flags) 6836 { 6837 u64 hostval = this_cpu_read(x86_spec_ctrl_current); 6838 6839 if (!cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) 6840 return; 6841 6842 if (flags & VMX_RUN_SAVE_SPEC_CTRL) 6843 vmx->spec_ctrl = __rdmsr(MSR_IA32_SPEC_CTRL); 6844 6845 /* 6846 * If the guest/host SPEC_CTRL values differ, restore the host value. 6847 * 6848 * For legacy IBRS, the IBRS bit always needs to be written after 6849 * transitioning from a less privileged predictor mode, regardless of 6850 * whether the guest/host values differ. 6851 */ 6852 if (cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS) || 6853 vmx->spec_ctrl != hostval) 6854 native_wrmsrl(MSR_IA32_SPEC_CTRL, hostval); 6855 6856 barrier_nospec(); 6857 } 6858 6859 static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu) 6860 { 6861 switch (to_vmx(vcpu)->exit_reason.basic) { 6862 case EXIT_REASON_MSR_WRITE: 6863 return handle_fastpath_set_msr_irqoff(vcpu); 6864 case EXIT_REASON_PREEMPTION_TIMER: 6865 return handle_fastpath_preemption_timer(vcpu); 6866 default: 6867 return EXIT_FASTPATH_NONE; 6868 } 6869 } 6870 6871 static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu, 6872 struct vcpu_vmx *vmx, 6873 unsigned long flags) 6874 { 6875 guest_state_enter_irqoff(); 6876 6877 /* L1D Flush includes CPU buffer clear to mitigate MDS */ 6878 if (static_branch_unlikely(&vmx_l1d_should_flush)) 6879 vmx_l1d_flush(vcpu); 6880 else if (static_branch_unlikely(&mds_user_clear)) 6881 mds_clear_cpu_buffers(); 6882 else if (static_branch_unlikely(&mmio_stale_data_clear) && 6883 kvm_arch_has_assigned_device(vcpu->kvm)) 6884 mds_clear_cpu_buffers(); 6885 6886 vmx_disable_fb_clear(vmx); 6887 6888 if (vcpu->arch.cr2 != native_read_cr2()) 6889 native_write_cr2(vcpu->arch.cr2); 6890 6891 vmx->fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs, 6892 flags); 6893 6894 vcpu->arch.cr2 = native_read_cr2(); 6895 6896 vmx_enable_fb_clear(vmx); 6897 6898 guest_state_exit_irqoff(); 6899 } 6900 6901 static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) 6902 { 6903 struct vcpu_vmx *vmx = to_vmx(vcpu); 6904 unsigned long cr3, cr4; 6905 6906 /* Record the guest's net vcpu time for enforced NMI injections. */ 6907 if (unlikely(!enable_vnmi && 6908 vmx->loaded_vmcs->soft_vnmi_blocked)) 6909 vmx->loaded_vmcs->entry_time = ktime_get(); 6910 6911 /* 6912 * Don't enter VMX if guest state is invalid, let the exit handler 6913 * start emulation until we arrive back to a valid state. Synthesize a 6914 * consistency check VM-Exit due to invalid guest state and bail. 6915 */ 6916 if (unlikely(vmx->emulation_required)) { 6917 vmx->fail = 0; 6918 6919 vmx->exit_reason.full = EXIT_REASON_INVALID_STATE; 6920 vmx->exit_reason.failed_vmentry = 1; 6921 kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1); 6922 vmx->exit_qualification = ENTRY_FAIL_DEFAULT; 6923 kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2); 6924 vmx->exit_intr_info = 0; 6925 return EXIT_FASTPATH_NONE; 6926 } 6927 6928 trace_kvm_entry(vcpu); 6929 6930 if (vmx->ple_window_dirty) { 6931 vmx->ple_window_dirty = false; 6932 vmcs_write32(PLE_WINDOW, vmx->ple_window); 6933 } 6934 6935 /* 6936 * We did this in prepare_switch_to_guest, because it needs to 6937 * be within srcu_read_lock. 6938 */ 6939 WARN_ON_ONCE(vmx->nested.need_vmcs12_to_shadow_sync); 6940 6941 if (kvm_register_is_dirty(vcpu, VCPU_REGS_RSP)) 6942 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); 6943 if (kvm_register_is_dirty(vcpu, VCPU_REGS_RIP)) 6944 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); 6945 vcpu->arch.regs_dirty = 0; 6946 6947 /* 6948 * Refresh vmcs.HOST_CR3 if necessary. This must be done immediately 6949 * prior to VM-Enter, as the kernel may load a new ASID (PCID) any time 6950 * it switches back to the current->mm, which can occur in KVM context 6951 * when switching to a temporary mm to patch kernel code, e.g. if KVM 6952 * toggles a static key while handling a VM-Exit. 6953 */ 6954 cr3 = __get_current_cr3_fast(); 6955 if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) { 6956 vmcs_writel(HOST_CR3, cr3); 6957 vmx->loaded_vmcs->host_state.cr3 = cr3; 6958 } 6959 6960 cr4 = cr4_read_shadow(); 6961 if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) { 6962 vmcs_writel(HOST_CR4, cr4); 6963 vmx->loaded_vmcs->host_state.cr4 = cr4; 6964 } 6965 6966 /* When KVM_DEBUGREG_WONT_EXIT, dr6 is accessible in guest. */ 6967 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) 6968 set_debugreg(vcpu->arch.dr6, 6); 6969 6970 /* When single-stepping over STI and MOV SS, we must clear the 6971 * corresponding interruptibility bits in the guest state. Otherwise 6972 * vmentry fails as it then expects bit 14 (BS) in pending debug 6973 * exceptions being set, but that's not correct for the guest debugging 6974 * case. */ 6975 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) 6976 vmx_set_interrupt_shadow(vcpu, 0); 6977 6978 kvm_load_guest_xsave_state(vcpu); 6979 6980 pt_guest_enter(vmx); 6981 6982 atomic_switch_perf_msrs(vmx); 6983 if (intel_pmu_lbr_is_enabled(vcpu)) 6984 vmx_passthrough_lbr_msrs(vcpu); 6985 6986 if (enable_preemption_timer) 6987 vmx_update_hv_timer(vcpu); 6988 6989 kvm_wait_lapic_expire(vcpu); 6990 6991 /* The actual VMENTER/EXIT is in the .noinstr.text section. */ 6992 vmx_vcpu_enter_exit(vcpu, vmx, __vmx_vcpu_run_flags(vmx)); 6993 6994 /* All fields are clean at this point */ 6995 if (static_branch_unlikely(&enable_evmcs)) { 6996 current_evmcs->hv_clean_fields |= 6997 HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; 6998 6999 current_evmcs->hv_vp_id = kvm_hv_get_vpindex(vcpu); 7000 } 7001 7002 /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */ 7003 if (vmx->host_debugctlmsr) 7004 update_debugctlmsr(vmx->host_debugctlmsr); 7005 7006 #ifndef CONFIG_X86_64 7007 /* 7008 * The sysexit path does not restore ds/es, so we must set them to 7009 * a reasonable value ourselves. 7010 * 7011 * We can't defer this to vmx_prepare_switch_to_host() since that 7012 * function may be executed in interrupt context, which saves and 7013 * restore segments around it, nullifying its effect. 7014 */ 7015 loadsegment(ds, __USER_DS); 7016 loadsegment(es, __USER_DS); 7017 #endif 7018 7019 vcpu->arch.regs_avail &= ~VMX_REGS_LAZY_LOAD_SET; 7020 7021 pt_guest_exit(vmx); 7022 7023 kvm_load_host_xsave_state(vcpu); 7024 7025 if (is_guest_mode(vcpu)) { 7026 /* 7027 * Track VMLAUNCH/VMRESUME that have made past guest state 7028 * checking. 7029 */ 7030 if (vmx->nested.nested_run_pending && 7031 !vmx->exit_reason.failed_vmentry) 7032 ++vcpu->stat.nested_run; 7033 7034 vmx->nested.nested_run_pending = 0; 7035 } 7036 7037 vmx->idt_vectoring_info = 0; 7038 7039 if (unlikely(vmx->fail)) { 7040 vmx->exit_reason.full = 0xdead; 7041 return EXIT_FASTPATH_NONE; 7042 } 7043 7044 vmx->exit_reason.full = vmcs_read32(VM_EXIT_REASON); 7045 if (unlikely((u16)vmx->exit_reason.basic == EXIT_REASON_MCE_DURING_VMENTRY)) 7046 kvm_machine_check(); 7047 7048 if (likely(!vmx->exit_reason.failed_vmentry)) 7049 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); 7050 7051 trace_kvm_exit(vcpu, KVM_ISA_VMX); 7052 7053 if (unlikely(vmx->exit_reason.failed_vmentry)) 7054 return EXIT_FASTPATH_NONE; 7055 7056 vmx->loaded_vmcs->launched = 1; 7057 7058 vmx_recover_nmi_blocking(vmx); 7059 vmx_complete_interrupts(vmx); 7060 7061 if (is_guest_mode(vcpu)) 7062 return EXIT_FASTPATH_NONE; 7063 7064 return vmx_exit_handlers_fastpath(vcpu); 7065 } 7066 7067 static void vmx_vcpu_free(struct kvm_vcpu *vcpu) 7068 { 7069 struct vcpu_vmx *vmx = to_vmx(vcpu); 7070 7071 if (enable_pml) 7072 vmx_destroy_pml_buffer(vmx); 7073 free_vpid(vmx->vpid); 7074 nested_vmx_free_vcpu(vcpu); 7075 free_loaded_vmcs(vmx->loaded_vmcs); 7076 } 7077 7078 static int vmx_vcpu_create(struct kvm_vcpu *vcpu) 7079 { 7080 struct vmx_uret_msr *tsx_ctrl; 7081 struct vcpu_vmx *vmx; 7082 int i, err; 7083 7084 BUILD_BUG_ON(offsetof(struct vcpu_vmx, vcpu) != 0); 7085 vmx = to_vmx(vcpu); 7086 7087 INIT_LIST_HEAD(&vmx->pi_wakeup_list); 7088 7089 err = -ENOMEM; 7090 7091 vmx->vpid = allocate_vpid(); 7092 7093 /* 7094 * If PML is turned on, failure on enabling PML just results in failure 7095 * of creating the vcpu, therefore we can simplify PML logic (by 7096 * avoiding dealing with cases, such as enabling PML partially on vcpus 7097 * for the guest), etc. 7098 */ 7099 if (enable_pml) { 7100 vmx->pml_pg = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); 7101 if (!vmx->pml_pg) 7102 goto free_vpid; 7103 } 7104 7105 for (i = 0; i < kvm_nr_uret_msrs; ++i) 7106 vmx->guest_uret_msrs[i].mask = -1ull; 7107 if (boot_cpu_has(X86_FEATURE_RTM)) { 7108 /* 7109 * TSX_CTRL_CPUID_CLEAR is handled in the CPUID interception. 7110 * Keep the host value unchanged to avoid changing CPUID bits 7111 * under the host kernel's feet. 7112 */ 7113 tsx_ctrl = vmx_find_uret_msr(vmx, MSR_IA32_TSX_CTRL); 7114 if (tsx_ctrl) 7115 tsx_ctrl->mask = ~(u64)TSX_CTRL_CPUID_CLEAR; 7116 } 7117 7118 err = alloc_loaded_vmcs(&vmx->vmcs01); 7119 if (err < 0) 7120 goto free_pml; 7121 7122 /* 7123 * Use Hyper-V 'Enlightened MSR Bitmap' feature when KVM runs as a 7124 * nested (L1) hypervisor and Hyper-V in L0 supports it. Enable the 7125 * feature only for vmcs01, KVM currently isn't equipped to realize any 7126 * performance benefits from enabling it for vmcs02. 7127 */ 7128 if (IS_ENABLED(CONFIG_HYPERV) && static_branch_unlikely(&enable_evmcs) && 7129 (ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) { 7130 struct hv_enlightened_vmcs *evmcs = (void *)vmx->vmcs01.vmcs; 7131 7132 evmcs->hv_enlightenments_control.msr_bitmap = 1; 7133 } 7134 7135 /* The MSR bitmap starts with all ones */ 7136 bitmap_fill(vmx->shadow_msr_intercept.read, MAX_POSSIBLE_PASSTHROUGH_MSRS); 7137 bitmap_fill(vmx->shadow_msr_intercept.write, MAX_POSSIBLE_PASSTHROUGH_MSRS); 7138 7139 vmx_disable_intercept_for_msr(vcpu, MSR_IA32_TSC, MSR_TYPE_R); 7140 #ifdef CONFIG_X86_64 7141 vmx_disable_intercept_for_msr(vcpu, MSR_FS_BASE, MSR_TYPE_RW); 7142 vmx_disable_intercept_for_msr(vcpu, MSR_GS_BASE, MSR_TYPE_RW); 7143 vmx_disable_intercept_for_msr(vcpu, MSR_KERNEL_GS_BASE, MSR_TYPE_RW); 7144 #endif 7145 vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW); 7146 vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW); 7147 vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW); 7148 if (kvm_cstate_in_guest(vcpu->kvm)) { 7149 vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C1_RES, MSR_TYPE_R); 7150 vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C3_RESIDENCY, MSR_TYPE_R); 7151 vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C6_RESIDENCY, MSR_TYPE_R); 7152 vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C7_RESIDENCY, MSR_TYPE_R); 7153 } 7154 7155 vmx->loaded_vmcs = &vmx->vmcs01; 7156 7157 if (cpu_need_virtualize_apic_accesses(vcpu)) { 7158 err = alloc_apic_access_page(vcpu->kvm); 7159 if (err) 7160 goto free_vmcs; 7161 } 7162 7163 if (enable_ept && !enable_unrestricted_guest) { 7164 err = init_rmode_identity_map(vcpu->kvm); 7165 if (err) 7166 goto free_vmcs; 7167 } 7168 7169 return 0; 7170 7171 free_vmcs: 7172 free_loaded_vmcs(vmx->loaded_vmcs); 7173 free_pml: 7174 vmx_destroy_pml_buffer(vmx); 7175 free_vpid: 7176 free_vpid(vmx->vpid); 7177 return err; 7178 } 7179 7180 #define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n" 7181 #define L1TF_MSG_L1D "L1TF CPU bug present and virtualization mitigation disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n" 7182 7183 static int vmx_vm_init(struct kvm *kvm) 7184 { 7185 if (!ple_gap) 7186 kvm->arch.pause_in_guest = true; 7187 7188 if (boot_cpu_has(X86_BUG_L1TF) && enable_ept) { 7189 switch (l1tf_mitigation) { 7190 case L1TF_MITIGATION_OFF: 7191 case L1TF_MITIGATION_FLUSH_NOWARN: 7192 /* 'I explicitly don't care' is set */ 7193 break; 7194 case L1TF_MITIGATION_FLUSH: 7195 case L1TF_MITIGATION_FLUSH_NOSMT: 7196 case L1TF_MITIGATION_FULL: 7197 /* 7198 * Warn upon starting the first VM in a potentially 7199 * insecure environment. 7200 */ 7201 if (sched_smt_active()) 7202 pr_warn_once(L1TF_MSG_SMT); 7203 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER) 7204 pr_warn_once(L1TF_MSG_L1D); 7205 break; 7206 case L1TF_MITIGATION_FULL_FORCE: 7207 /* Flush is enforced */ 7208 break; 7209 } 7210 } 7211 return 0; 7212 } 7213 7214 static int __init vmx_check_processor_compat(void) 7215 { 7216 struct vmcs_config vmcs_conf; 7217 struct vmx_capability vmx_cap; 7218 7219 if (!this_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) || 7220 !this_cpu_has(X86_FEATURE_VMX)) { 7221 pr_err("kvm: VMX is disabled on CPU %d\n", smp_processor_id()); 7222 return -EIO; 7223 } 7224 7225 if (setup_vmcs_config(&vmcs_conf, &vmx_cap) < 0) 7226 return -EIO; 7227 if (nested) 7228 nested_vmx_setup_ctls_msrs(&vmcs_conf.nested, vmx_cap.ept); 7229 if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) { 7230 printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n", 7231 smp_processor_id()); 7232 return -EIO; 7233 } 7234 return 0; 7235 } 7236 7237 static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) 7238 { 7239 u8 cache; 7240 7241 /* We wanted to honor guest CD/MTRR/PAT, but doing so could result in 7242 * memory aliases with conflicting memory types and sometimes MCEs. 7243 * We have to be careful as to what are honored and when. 7244 * 7245 * For MMIO, guest CD/MTRR are ignored. The EPT memory type is set to 7246 * UC. The effective memory type is UC or WC depending on guest PAT. 7247 * This was historically the source of MCEs and we want to be 7248 * conservative. 7249 * 7250 * When there is no need to deal with noncoherent DMA (e.g., no VT-d 7251 * or VT-d has snoop control), guest CD/MTRR/PAT are all ignored. The 7252 * EPT memory type is set to WB. The effective memory type is forced 7253 * WB. 7254 * 7255 * Otherwise, we trust guest. Guest CD/MTRR/PAT are all honored. The 7256 * EPT memory type is used to emulate guest CD/MTRR. 7257 */ 7258 7259 if (is_mmio) 7260 return MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT; 7261 7262 if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) 7263 return (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT) | VMX_EPT_IPAT_BIT; 7264 7265 if (kvm_read_cr0(vcpu) & X86_CR0_CD) { 7266 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) 7267 cache = MTRR_TYPE_WRBACK; 7268 else 7269 cache = MTRR_TYPE_UNCACHABLE; 7270 7271 return (cache << VMX_EPT_MT_EPTE_SHIFT) | VMX_EPT_IPAT_BIT; 7272 } 7273 7274 return kvm_mtrr_get_guest_memory_type(vcpu, gfn) << VMX_EPT_MT_EPTE_SHIFT; 7275 } 7276 7277 static void vmcs_set_secondary_exec_control(struct vcpu_vmx *vmx, u32 new_ctl) 7278 { 7279 /* 7280 * These bits in the secondary execution controls field 7281 * are dynamic, the others are mostly based on the hypervisor 7282 * architecture and the guest's CPUID. Do not touch the 7283 * dynamic bits. 7284 */ 7285 u32 mask = 7286 SECONDARY_EXEC_SHADOW_VMCS | 7287 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 7288 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 7289 SECONDARY_EXEC_DESC; 7290 7291 u32 cur_ctl = secondary_exec_controls_get(vmx); 7292 7293 secondary_exec_controls_set(vmx, (new_ctl & ~mask) | (cur_ctl & mask)); 7294 } 7295 7296 /* 7297 * Generate MSR_IA32_VMX_CR{0,4}_FIXED1 according to CPUID. Only set bits 7298 * (indicating "allowed-1") if they are supported in the guest's CPUID. 7299 */ 7300 static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu) 7301 { 7302 struct vcpu_vmx *vmx = to_vmx(vcpu); 7303 struct kvm_cpuid_entry2 *entry; 7304 7305 vmx->nested.msrs.cr0_fixed1 = 0xffffffff; 7306 vmx->nested.msrs.cr4_fixed1 = X86_CR4_PCE; 7307 7308 #define cr4_fixed1_update(_cr4_mask, _reg, _cpuid_mask) do { \ 7309 if (entry && (entry->_reg & (_cpuid_mask))) \ 7310 vmx->nested.msrs.cr4_fixed1 |= (_cr4_mask); \ 7311 } while (0) 7312 7313 entry = kvm_find_cpuid_entry(vcpu, 0x1, 0); 7314 cr4_fixed1_update(X86_CR4_VME, edx, feature_bit(VME)); 7315 cr4_fixed1_update(X86_CR4_PVI, edx, feature_bit(VME)); 7316 cr4_fixed1_update(X86_CR4_TSD, edx, feature_bit(TSC)); 7317 cr4_fixed1_update(X86_CR4_DE, edx, feature_bit(DE)); 7318 cr4_fixed1_update(X86_CR4_PSE, edx, feature_bit(PSE)); 7319 cr4_fixed1_update(X86_CR4_PAE, edx, feature_bit(PAE)); 7320 cr4_fixed1_update(X86_CR4_MCE, edx, feature_bit(MCE)); 7321 cr4_fixed1_update(X86_CR4_PGE, edx, feature_bit(PGE)); 7322 cr4_fixed1_update(X86_CR4_OSFXSR, edx, feature_bit(FXSR)); 7323 cr4_fixed1_update(X86_CR4_OSXMMEXCPT, edx, feature_bit(XMM)); 7324 cr4_fixed1_update(X86_CR4_VMXE, ecx, feature_bit(VMX)); 7325 cr4_fixed1_update(X86_CR4_SMXE, ecx, feature_bit(SMX)); 7326 cr4_fixed1_update(X86_CR4_PCIDE, ecx, feature_bit(PCID)); 7327 cr4_fixed1_update(X86_CR4_OSXSAVE, ecx, feature_bit(XSAVE)); 7328 7329 entry = kvm_find_cpuid_entry(vcpu, 0x7, 0); 7330 cr4_fixed1_update(X86_CR4_FSGSBASE, ebx, feature_bit(FSGSBASE)); 7331 cr4_fixed1_update(X86_CR4_SMEP, ebx, feature_bit(SMEP)); 7332 cr4_fixed1_update(X86_CR4_SMAP, ebx, feature_bit(SMAP)); 7333 cr4_fixed1_update(X86_CR4_PKE, ecx, feature_bit(PKU)); 7334 cr4_fixed1_update(X86_CR4_UMIP, ecx, feature_bit(UMIP)); 7335 cr4_fixed1_update(X86_CR4_LA57, ecx, feature_bit(LA57)); 7336 7337 #undef cr4_fixed1_update 7338 } 7339 7340 static void nested_vmx_entry_exit_ctls_update(struct kvm_vcpu *vcpu) 7341 { 7342 struct vcpu_vmx *vmx = to_vmx(vcpu); 7343 7344 if (kvm_mpx_supported()) { 7345 bool mpx_enabled = guest_cpuid_has(vcpu, X86_FEATURE_MPX); 7346 7347 if (mpx_enabled) { 7348 vmx->nested.msrs.entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS; 7349 vmx->nested.msrs.exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS; 7350 } else { 7351 vmx->nested.msrs.entry_ctls_high &= ~VM_ENTRY_LOAD_BNDCFGS; 7352 vmx->nested.msrs.exit_ctls_high &= ~VM_EXIT_CLEAR_BNDCFGS; 7353 } 7354 } 7355 } 7356 7357 static void update_intel_pt_cfg(struct kvm_vcpu *vcpu) 7358 { 7359 struct vcpu_vmx *vmx = to_vmx(vcpu); 7360 struct kvm_cpuid_entry2 *best = NULL; 7361 int i; 7362 7363 for (i = 0; i < PT_CPUID_LEAVES; i++) { 7364 best = kvm_find_cpuid_entry(vcpu, 0x14, i); 7365 if (!best) 7366 return; 7367 vmx->pt_desc.caps[CPUID_EAX + i*PT_CPUID_REGS_NUM] = best->eax; 7368 vmx->pt_desc.caps[CPUID_EBX + i*PT_CPUID_REGS_NUM] = best->ebx; 7369 vmx->pt_desc.caps[CPUID_ECX + i*PT_CPUID_REGS_NUM] = best->ecx; 7370 vmx->pt_desc.caps[CPUID_EDX + i*PT_CPUID_REGS_NUM] = best->edx; 7371 } 7372 7373 /* Get the number of configurable Address Ranges for filtering */ 7374 vmx->pt_desc.num_address_ranges = intel_pt_validate_cap(vmx->pt_desc.caps, 7375 PT_CAP_num_address_ranges); 7376 7377 /* Initialize and clear the no dependency bits */ 7378 vmx->pt_desc.ctl_bitmask = ~(RTIT_CTL_TRACEEN | RTIT_CTL_OS | 7379 RTIT_CTL_USR | RTIT_CTL_TSC_EN | RTIT_CTL_DISRETC | 7380 RTIT_CTL_BRANCH_EN); 7381 7382 /* 7383 * If CPUID.(EAX=14H,ECX=0):EBX[0]=1 CR3Filter can be set otherwise 7384 * will inject an #GP 7385 */ 7386 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_cr3_filtering)) 7387 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_CR3EN; 7388 7389 /* 7390 * If CPUID.(EAX=14H,ECX=0):EBX[1]=1 CYCEn, CycThresh and 7391 * PSBFreq can be set 7392 */ 7393 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc)) 7394 vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_CYCLEACC | 7395 RTIT_CTL_CYC_THRESH | RTIT_CTL_PSB_FREQ); 7396 7397 /* 7398 * If CPUID.(EAX=14H,ECX=0):EBX[3]=1 MTCEn and MTCFreq can be set 7399 */ 7400 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc)) 7401 vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_MTC_EN | 7402 RTIT_CTL_MTC_RANGE); 7403 7404 /* If CPUID.(EAX=14H,ECX=0):EBX[4]=1 FUPonPTW and PTWEn can be set */ 7405 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_ptwrite)) 7406 vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_FUP_ON_PTW | 7407 RTIT_CTL_PTW_EN); 7408 7409 /* If CPUID.(EAX=14H,ECX=0):EBX[5]=1 PwrEvEn can be set */ 7410 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_power_event_trace)) 7411 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_PWR_EVT_EN; 7412 7413 /* If CPUID.(EAX=14H,ECX=0):ECX[0]=1 ToPA can be set */ 7414 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_topa_output)) 7415 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_TOPA; 7416 7417 /* If CPUID.(EAX=14H,ECX=0):ECX[3]=1 FabricEn can be set */ 7418 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_output_subsys)) 7419 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_FABRIC_EN; 7420 7421 /* unmask address range configure area */ 7422 for (i = 0; i < vmx->pt_desc.num_address_ranges; i++) 7423 vmx->pt_desc.ctl_bitmask &= ~(0xfULL << (32 + i * 4)); 7424 } 7425 7426 static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) 7427 { 7428 struct vcpu_vmx *vmx = to_vmx(vcpu); 7429 7430 /* xsaves_enabled is recomputed in vmx_compute_secondary_exec_control(). */ 7431 vcpu->arch.xsaves_enabled = false; 7432 7433 vmx_setup_uret_msrs(vmx); 7434 7435 if (cpu_has_secondary_exec_ctrls()) 7436 vmcs_set_secondary_exec_control(vmx, 7437 vmx_secondary_exec_control(vmx)); 7438 7439 if (nested_vmx_allowed(vcpu)) 7440 vmx->msr_ia32_feature_control_valid_bits |= 7441 FEAT_CTL_VMX_ENABLED_INSIDE_SMX | 7442 FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX; 7443 else 7444 vmx->msr_ia32_feature_control_valid_bits &= 7445 ~(FEAT_CTL_VMX_ENABLED_INSIDE_SMX | 7446 FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX); 7447 7448 if (nested_vmx_allowed(vcpu)) { 7449 nested_vmx_cr_fixed1_bits_update(vcpu); 7450 nested_vmx_entry_exit_ctls_update(vcpu); 7451 } 7452 7453 if (boot_cpu_has(X86_FEATURE_INTEL_PT) && 7454 guest_cpuid_has(vcpu, X86_FEATURE_INTEL_PT)) 7455 update_intel_pt_cfg(vcpu); 7456 7457 if (boot_cpu_has(X86_FEATURE_RTM)) { 7458 struct vmx_uret_msr *msr; 7459 msr = vmx_find_uret_msr(vmx, MSR_IA32_TSX_CTRL); 7460 if (msr) { 7461 bool enabled = guest_cpuid_has(vcpu, X86_FEATURE_RTM); 7462 vmx_set_guest_uret_msr(vmx, msr, enabled ? 0 : TSX_CTRL_RTM_DISABLE); 7463 } 7464 } 7465 7466 if (kvm_cpu_cap_has(X86_FEATURE_XFD)) 7467 vmx_set_intercept_for_msr(vcpu, MSR_IA32_XFD_ERR, MSR_TYPE_R, 7468 !guest_cpuid_has(vcpu, X86_FEATURE_XFD)); 7469 7470 7471 set_cr4_guest_host_mask(vmx); 7472 7473 vmx_write_encls_bitmap(vcpu, NULL); 7474 if (guest_cpuid_has(vcpu, X86_FEATURE_SGX)) 7475 vmx->msr_ia32_feature_control_valid_bits |= FEAT_CTL_SGX_ENABLED; 7476 else 7477 vmx->msr_ia32_feature_control_valid_bits &= ~FEAT_CTL_SGX_ENABLED; 7478 7479 if (guest_cpuid_has(vcpu, X86_FEATURE_SGX_LC)) 7480 vmx->msr_ia32_feature_control_valid_bits |= 7481 FEAT_CTL_SGX_LC_ENABLED; 7482 else 7483 vmx->msr_ia32_feature_control_valid_bits &= 7484 ~FEAT_CTL_SGX_LC_ENABLED; 7485 7486 /* Refresh #PF interception to account for MAXPHYADDR changes. */ 7487 vmx_update_exception_bitmap(vcpu); 7488 } 7489 7490 static __init void vmx_set_cpu_caps(void) 7491 { 7492 kvm_set_cpu_caps(); 7493 7494 /* CPUID 0x1 */ 7495 if (nested) 7496 kvm_cpu_cap_set(X86_FEATURE_VMX); 7497 7498 /* CPUID 0x7 */ 7499 if (kvm_mpx_supported()) 7500 kvm_cpu_cap_check_and_set(X86_FEATURE_MPX); 7501 if (!cpu_has_vmx_invpcid()) 7502 kvm_cpu_cap_clear(X86_FEATURE_INVPCID); 7503 if (vmx_pt_mode_is_host_guest()) 7504 kvm_cpu_cap_check_and_set(X86_FEATURE_INTEL_PT); 7505 7506 if (!enable_sgx) { 7507 kvm_cpu_cap_clear(X86_FEATURE_SGX); 7508 kvm_cpu_cap_clear(X86_FEATURE_SGX_LC); 7509 kvm_cpu_cap_clear(X86_FEATURE_SGX1); 7510 kvm_cpu_cap_clear(X86_FEATURE_SGX2); 7511 } 7512 7513 if (vmx_umip_emulated()) 7514 kvm_cpu_cap_set(X86_FEATURE_UMIP); 7515 7516 /* CPUID 0xD.1 */ 7517 supported_xss = 0; 7518 if (!cpu_has_vmx_xsaves()) 7519 kvm_cpu_cap_clear(X86_FEATURE_XSAVES); 7520 7521 /* CPUID 0x80000001 and 0x7 (RDPID) */ 7522 if (!cpu_has_vmx_rdtscp()) { 7523 kvm_cpu_cap_clear(X86_FEATURE_RDTSCP); 7524 kvm_cpu_cap_clear(X86_FEATURE_RDPID); 7525 } 7526 7527 if (cpu_has_vmx_waitpkg()) 7528 kvm_cpu_cap_check_and_set(X86_FEATURE_WAITPKG); 7529 } 7530 7531 static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu) 7532 { 7533 to_vmx(vcpu)->req_immediate_exit = true; 7534 } 7535 7536 static int vmx_check_intercept_io(struct kvm_vcpu *vcpu, 7537 struct x86_instruction_info *info) 7538 { 7539 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 7540 unsigned short port; 7541 bool intercept; 7542 int size; 7543 7544 if (info->intercept == x86_intercept_in || 7545 info->intercept == x86_intercept_ins) { 7546 port = info->src_val; 7547 size = info->dst_bytes; 7548 } else { 7549 port = info->dst_val; 7550 size = info->src_bytes; 7551 } 7552 7553 /* 7554 * If the 'use IO bitmaps' VM-execution control is 0, IO instruction 7555 * VM-exits depend on the 'unconditional IO exiting' VM-execution 7556 * control. 7557 * 7558 * Otherwise, IO instruction VM-exits are controlled by the IO bitmaps. 7559 */ 7560 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) 7561 intercept = nested_cpu_has(vmcs12, 7562 CPU_BASED_UNCOND_IO_EXITING); 7563 else 7564 intercept = nested_vmx_check_io_bitmaps(vcpu, port, size); 7565 7566 /* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED. */ 7567 return intercept ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; 7568 } 7569 7570 static int vmx_check_intercept(struct kvm_vcpu *vcpu, 7571 struct x86_instruction_info *info, 7572 enum x86_intercept_stage stage, 7573 struct x86_exception *exception) 7574 { 7575 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 7576 7577 switch (info->intercept) { 7578 /* 7579 * RDPID causes #UD if disabled through secondary execution controls. 7580 * Because it is marked as EmulateOnUD, we need to intercept it here. 7581 * Note, RDPID is hidden behind ENABLE_RDTSCP. 7582 */ 7583 case x86_intercept_rdpid: 7584 if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_RDTSCP)) { 7585 exception->vector = UD_VECTOR; 7586 exception->error_code_valid = false; 7587 return X86EMUL_PROPAGATE_FAULT; 7588 } 7589 break; 7590 7591 case x86_intercept_in: 7592 case x86_intercept_ins: 7593 case x86_intercept_out: 7594 case x86_intercept_outs: 7595 return vmx_check_intercept_io(vcpu, info); 7596 7597 case x86_intercept_lgdt: 7598 case x86_intercept_lidt: 7599 case x86_intercept_lldt: 7600 case x86_intercept_ltr: 7601 case x86_intercept_sgdt: 7602 case x86_intercept_sidt: 7603 case x86_intercept_sldt: 7604 case x86_intercept_str: 7605 if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC)) 7606 return X86EMUL_CONTINUE; 7607 7608 /* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED. */ 7609 break; 7610 7611 /* TODO: check more intercepts... */ 7612 default: 7613 break; 7614 } 7615 7616 return X86EMUL_UNHANDLEABLE; 7617 } 7618 7619 #ifdef CONFIG_X86_64 7620 /* (a << shift) / divisor, return 1 if overflow otherwise 0 */ 7621 static inline int u64_shl_div_u64(u64 a, unsigned int shift, 7622 u64 divisor, u64 *result) 7623 { 7624 u64 low = a << shift, high = a >> (64 - shift); 7625 7626 /* To avoid the overflow on divq */ 7627 if (high >= divisor) 7628 return 1; 7629 7630 /* Low hold the result, high hold rem which is discarded */ 7631 asm("divq %2\n\t" : "=a" (low), "=d" (high) : 7632 "rm" (divisor), "0" (low), "1" (high)); 7633 *result = low; 7634 7635 return 0; 7636 } 7637 7638 static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc, 7639 bool *expired) 7640 { 7641 struct vcpu_vmx *vmx; 7642 u64 tscl, guest_tscl, delta_tsc, lapic_timer_advance_cycles; 7643 struct kvm_timer *ktimer = &vcpu->arch.apic->lapic_timer; 7644 7645 vmx = to_vmx(vcpu); 7646 tscl = rdtsc(); 7647 guest_tscl = kvm_read_l1_tsc(vcpu, tscl); 7648 delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl; 7649 lapic_timer_advance_cycles = nsec_to_cycles(vcpu, 7650 ktimer->timer_advance_ns); 7651 7652 if (delta_tsc > lapic_timer_advance_cycles) 7653 delta_tsc -= lapic_timer_advance_cycles; 7654 else 7655 delta_tsc = 0; 7656 7657 /* Convert to host delta tsc if tsc scaling is enabled */ 7658 if (vcpu->arch.l1_tsc_scaling_ratio != kvm_default_tsc_scaling_ratio && 7659 delta_tsc && u64_shl_div_u64(delta_tsc, 7660 kvm_tsc_scaling_ratio_frac_bits, 7661 vcpu->arch.l1_tsc_scaling_ratio, &delta_tsc)) 7662 return -ERANGE; 7663 7664 /* 7665 * If the delta tsc can't fit in the 32 bit after the multi shift, 7666 * we can't use the preemption timer. 7667 * It's possible that it fits on later vmentries, but checking 7668 * on every vmentry is costly so we just use an hrtimer. 7669 */ 7670 if (delta_tsc >> (cpu_preemption_timer_multi + 32)) 7671 return -ERANGE; 7672 7673 vmx->hv_deadline_tsc = tscl + delta_tsc; 7674 *expired = !delta_tsc; 7675 return 0; 7676 } 7677 7678 static void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu) 7679 { 7680 to_vmx(vcpu)->hv_deadline_tsc = -1; 7681 } 7682 #endif 7683 7684 static void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu) 7685 { 7686 if (!kvm_pause_in_guest(vcpu->kvm)) 7687 shrink_ple_window(vcpu); 7688 } 7689 7690 void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu) 7691 { 7692 struct vcpu_vmx *vmx = to_vmx(vcpu); 7693 7694 if (is_guest_mode(vcpu)) { 7695 vmx->nested.update_vmcs01_cpu_dirty_logging = true; 7696 return; 7697 } 7698 7699 /* 7700 * Note, cpu_dirty_logging_count can be changed concurrent with this 7701 * code, but in that case another update request will be made and so 7702 * the guest will never run with a stale PML value. 7703 */ 7704 if (vcpu->kvm->arch.cpu_dirty_logging_count) 7705 secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_ENABLE_PML); 7706 else 7707 secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_ENABLE_PML); 7708 } 7709 7710 static void vmx_setup_mce(struct kvm_vcpu *vcpu) 7711 { 7712 if (vcpu->arch.mcg_cap & MCG_LMCE_P) 7713 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |= 7714 FEAT_CTL_LMCE_ENABLED; 7715 else 7716 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &= 7717 ~FEAT_CTL_LMCE_ENABLED; 7718 } 7719 7720 static int vmx_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection) 7721 { 7722 /* we need a nested vmexit to enter SMM, postpone if run is pending */ 7723 if (to_vmx(vcpu)->nested.nested_run_pending) 7724 return -EBUSY; 7725 return !is_smm(vcpu); 7726 } 7727 7728 static int vmx_enter_smm(struct kvm_vcpu *vcpu, char *smstate) 7729 { 7730 struct vcpu_vmx *vmx = to_vmx(vcpu); 7731 7732 vmx->nested.smm.guest_mode = is_guest_mode(vcpu); 7733 if (vmx->nested.smm.guest_mode) 7734 nested_vmx_vmexit(vcpu, -1, 0, 0); 7735 7736 vmx->nested.smm.vmxon = vmx->nested.vmxon; 7737 vmx->nested.vmxon = false; 7738 vmx_clear_hlt(vcpu); 7739 return 0; 7740 } 7741 7742 static int vmx_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) 7743 { 7744 struct vcpu_vmx *vmx = to_vmx(vcpu); 7745 int ret; 7746 7747 if (vmx->nested.smm.vmxon) { 7748 vmx->nested.vmxon = true; 7749 vmx->nested.smm.vmxon = false; 7750 } 7751 7752 if (vmx->nested.smm.guest_mode) { 7753 ret = nested_vmx_enter_non_root_mode(vcpu, false); 7754 if (ret) 7755 return ret; 7756 7757 vmx->nested.nested_run_pending = 1; 7758 vmx->nested.smm.guest_mode = false; 7759 } 7760 return 0; 7761 } 7762 7763 static void vmx_enable_smi_window(struct kvm_vcpu *vcpu) 7764 { 7765 /* RSM will cause a vmexit anyway. */ 7766 } 7767 7768 static bool vmx_apic_init_signal_blocked(struct kvm_vcpu *vcpu) 7769 { 7770 return to_vmx(vcpu)->nested.vmxon && !is_guest_mode(vcpu); 7771 } 7772 7773 static void vmx_migrate_timers(struct kvm_vcpu *vcpu) 7774 { 7775 if (is_guest_mode(vcpu)) { 7776 struct hrtimer *timer = &to_vmx(vcpu)->nested.preemption_timer; 7777 7778 if (hrtimer_try_to_cancel(timer) == 1) 7779 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED); 7780 } 7781 } 7782 7783 static void vmx_hardware_unsetup(void) 7784 { 7785 kvm_set_posted_intr_wakeup_handler(NULL); 7786 7787 if (nested) 7788 nested_vmx_hardware_unsetup(); 7789 7790 free_kvm_area(); 7791 } 7792 7793 static bool vmx_check_apicv_inhibit_reasons(enum kvm_apicv_inhibit reason) 7794 { 7795 ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) | 7796 BIT(APICV_INHIBIT_REASON_ABSENT) | 7797 BIT(APICV_INHIBIT_REASON_HYPERV) | 7798 BIT(APICV_INHIBIT_REASON_BLOCKIRQ) | 7799 BIT(APICV_INHIBIT_REASON_APIC_ID_MODIFIED) | 7800 BIT(APICV_INHIBIT_REASON_APIC_BASE_MODIFIED); 7801 7802 return supported & BIT(reason); 7803 } 7804 7805 static struct kvm_x86_ops vmx_x86_ops __initdata = { 7806 .name = "kvm_intel", 7807 7808 .hardware_unsetup = vmx_hardware_unsetup, 7809 7810 .hardware_enable = vmx_hardware_enable, 7811 .hardware_disable = vmx_hardware_disable, 7812 .has_emulated_msr = vmx_has_emulated_msr, 7813 7814 .vm_size = sizeof(struct kvm_vmx), 7815 .vm_init = vmx_vm_init, 7816 7817 .vcpu_create = vmx_vcpu_create, 7818 .vcpu_free = vmx_vcpu_free, 7819 .vcpu_reset = vmx_vcpu_reset, 7820 7821 .prepare_switch_to_guest = vmx_prepare_switch_to_guest, 7822 .vcpu_load = vmx_vcpu_load, 7823 .vcpu_put = vmx_vcpu_put, 7824 7825 .update_exception_bitmap = vmx_update_exception_bitmap, 7826 .get_msr_feature = vmx_get_msr_feature, 7827 .get_msr = vmx_get_msr, 7828 .set_msr = vmx_set_msr, 7829 .get_segment_base = vmx_get_segment_base, 7830 .get_segment = vmx_get_segment, 7831 .set_segment = vmx_set_segment, 7832 .get_cpl = vmx_get_cpl, 7833 .get_cs_db_l_bits = vmx_get_cs_db_l_bits, 7834 .set_cr0 = vmx_set_cr0, 7835 .is_valid_cr4 = vmx_is_valid_cr4, 7836 .set_cr4 = vmx_set_cr4, 7837 .set_efer = vmx_set_efer, 7838 .get_idt = vmx_get_idt, 7839 .set_idt = vmx_set_idt, 7840 .get_gdt = vmx_get_gdt, 7841 .set_gdt = vmx_set_gdt, 7842 .set_dr7 = vmx_set_dr7, 7843 .sync_dirty_debug_regs = vmx_sync_dirty_debug_regs, 7844 .cache_reg = vmx_cache_reg, 7845 .get_rflags = vmx_get_rflags, 7846 .set_rflags = vmx_set_rflags, 7847 .get_if_flag = vmx_get_if_flag, 7848 7849 .flush_tlb_all = vmx_flush_tlb_all, 7850 .flush_tlb_current = vmx_flush_tlb_current, 7851 .flush_tlb_gva = vmx_flush_tlb_gva, 7852 .flush_tlb_guest = vmx_flush_tlb_guest, 7853 7854 .vcpu_pre_run = vmx_vcpu_pre_run, 7855 .vcpu_run = vmx_vcpu_run, 7856 .handle_exit = vmx_handle_exit, 7857 .skip_emulated_instruction = vmx_skip_emulated_instruction, 7858 .update_emulated_instruction = vmx_update_emulated_instruction, 7859 .set_interrupt_shadow = vmx_set_interrupt_shadow, 7860 .get_interrupt_shadow = vmx_get_interrupt_shadow, 7861 .patch_hypercall = vmx_patch_hypercall, 7862 .inject_irq = vmx_inject_irq, 7863 .inject_nmi = vmx_inject_nmi, 7864 .queue_exception = vmx_queue_exception, 7865 .cancel_injection = vmx_cancel_injection, 7866 .interrupt_allowed = vmx_interrupt_allowed, 7867 .nmi_allowed = vmx_nmi_allowed, 7868 .get_nmi_mask = vmx_get_nmi_mask, 7869 .set_nmi_mask = vmx_set_nmi_mask, 7870 .enable_nmi_window = vmx_enable_nmi_window, 7871 .enable_irq_window = vmx_enable_irq_window, 7872 .update_cr8_intercept = vmx_update_cr8_intercept, 7873 .set_virtual_apic_mode = vmx_set_virtual_apic_mode, 7874 .set_apic_access_page_addr = vmx_set_apic_access_page_addr, 7875 .refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl, 7876 .load_eoi_exitmap = vmx_load_eoi_exitmap, 7877 .apicv_post_state_restore = vmx_apicv_post_state_restore, 7878 .check_apicv_inhibit_reasons = vmx_check_apicv_inhibit_reasons, 7879 .hwapic_irr_update = vmx_hwapic_irr_update, 7880 .hwapic_isr_update = vmx_hwapic_isr_update, 7881 .guest_apic_has_interrupt = vmx_guest_apic_has_interrupt, 7882 .sync_pir_to_irr = vmx_sync_pir_to_irr, 7883 .deliver_interrupt = vmx_deliver_interrupt, 7884 .dy_apicv_has_pending_interrupt = pi_has_pending_interrupt, 7885 7886 .set_tss_addr = vmx_set_tss_addr, 7887 .set_identity_map_addr = vmx_set_identity_map_addr, 7888 .get_mt_mask = vmx_get_mt_mask, 7889 7890 .get_exit_info = vmx_get_exit_info, 7891 7892 .vcpu_after_set_cpuid = vmx_vcpu_after_set_cpuid, 7893 7894 .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit, 7895 7896 .get_l2_tsc_offset = vmx_get_l2_tsc_offset, 7897 .get_l2_tsc_multiplier = vmx_get_l2_tsc_multiplier, 7898 .write_tsc_offset = vmx_write_tsc_offset, 7899 .write_tsc_multiplier = vmx_write_tsc_multiplier, 7900 7901 .load_mmu_pgd = vmx_load_mmu_pgd, 7902 7903 .check_intercept = vmx_check_intercept, 7904 .handle_exit_irqoff = vmx_handle_exit_irqoff, 7905 7906 .request_immediate_exit = vmx_request_immediate_exit, 7907 7908 .sched_in = vmx_sched_in, 7909 7910 .cpu_dirty_log_size = PML_ENTITY_NUM, 7911 .update_cpu_dirty_logging = vmx_update_cpu_dirty_logging, 7912 7913 .nested_ops = &vmx_nested_ops, 7914 7915 .pi_update_irte = vmx_pi_update_irte, 7916 .pi_start_assignment = vmx_pi_start_assignment, 7917 7918 #ifdef CONFIG_X86_64 7919 .set_hv_timer = vmx_set_hv_timer, 7920 .cancel_hv_timer = vmx_cancel_hv_timer, 7921 #endif 7922 7923 .setup_mce = vmx_setup_mce, 7924 7925 .smi_allowed = vmx_smi_allowed, 7926 .enter_smm = vmx_enter_smm, 7927 .leave_smm = vmx_leave_smm, 7928 .enable_smi_window = vmx_enable_smi_window, 7929 7930 .can_emulate_instruction = vmx_can_emulate_instruction, 7931 .apic_init_signal_blocked = vmx_apic_init_signal_blocked, 7932 .migrate_timers = vmx_migrate_timers, 7933 7934 .msr_filter_changed = vmx_msr_filter_changed, 7935 .complete_emulated_msr = kvm_complete_insn_gp, 7936 7937 .vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector, 7938 }; 7939 7940 static unsigned int vmx_handle_intel_pt_intr(void) 7941 { 7942 struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); 7943 7944 /* '0' on failure so that the !PT case can use a RET0 static call. */ 7945 if (!vcpu || !kvm_handling_nmi_from_guest(vcpu)) 7946 return 0; 7947 7948 kvm_make_request(KVM_REQ_PMI, vcpu); 7949 __set_bit(MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI_BIT, 7950 (unsigned long *)&vcpu->arch.pmu.global_status); 7951 return 1; 7952 } 7953 7954 static __init void vmx_setup_user_return_msrs(void) 7955 { 7956 7957 /* 7958 * Though SYSCALL is only supported in 64-bit mode on Intel CPUs, kvm 7959 * will emulate SYSCALL in legacy mode if the vendor string in guest 7960 * CPUID.0:{EBX,ECX,EDX} is "AuthenticAMD" or "AMDisbetter!" To 7961 * support this emulation, MSR_STAR is included in the list for i386, 7962 * but is never loaded into hardware. MSR_CSTAR is also never loaded 7963 * into hardware and is here purely for emulation purposes. 7964 */ 7965 const u32 vmx_uret_msrs_list[] = { 7966 #ifdef CONFIG_X86_64 7967 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, 7968 #endif 7969 MSR_EFER, MSR_TSC_AUX, MSR_STAR, 7970 MSR_IA32_TSX_CTRL, 7971 }; 7972 int i; 7973 7974 BUILD_BUG_ON(ARRAY_SIZE(vmx_uret_msrs_list) != MAX_NR_USER_RETURN_MSRS); 7975 7976 for (i = 0; i < ARRAY_SIZE(vmx_uret_msrs_list); ++i) 7977 kvm_add_user_return_msr(vmx_uret_msrs_list[i]); 7978 } 7979 7980 static void __init vmx_setup_me_spte_mask(void) 7981 { 7982 u64 me_mask = 0; 7983 7984 /* 7985 * kvm_get_shadow_phys_bits() returns shadow_phys_bits. Use 7986 * the former to avoid exposing shadow_phys_bits. 7987 * 7988 * On pre-MKTME system, boot_cpu_data.x86_phys_bits equals to 7989 * shadow_phys_bits. On MKTME and/or TDX capable systems, 7990 * boot_cpu_data.x86_phys_bits holds the actual physical address 7991 * w/o the KeyID bits, and shadow_phys_bits equals to MAXPHYADDR 7992 * reported by CPUID. Those bits between are KeyID bits. 7993 */ 7994 if (boot_cpu_data.x86_phys_bits != kvm_get_shadow_phys_bits()) 7995 me_mask = rsvd_bits(boot_cpu_data.x86_phys_bits, 7996 kvm_get_shadow_phys_bits() - 1); 7997 /* 7998 * Unlike SME, host kernel doesn't support setting up any 7999 * MKTME KeyID on Intel platforms. No memory encryption 8000 * bits should be included into the SPTE. 8001 */ 8002 kvm_mmu_set_me_spte_mask(0, me_mask); 8003 } 8004 8005 static struct kvm_x86_init_ops vmx_init_ops __initdata; 8006 8007 static __init int hardware_setup(void) 8008 { 8009 unsigned long host_bndcfgs; 8010 struct desc_ptr dt; 8011 int r; 8012 8013 store_idt(&dt); 8014 host_idt_base = dt.address; 8015 8016 vmx_setup_user_return_msrs(); 8017 8018 if (setup_vmcs_config(&vmcs_config, &vmx_capability) < 0) 8019 return -EIO; 8020 8021 if (boot_cpu_has(X86_FEATURE_NX)) 8022 kvm_enable_efer_bits(EFER_NX); 8023 8024 if (boot_cpu_has(X86_FEATURE_MPX)) { 8025 rdmsrl(MSR_IA32_BNDCFGS, host_bndcfgs); 8026 WARN_ONCE(host_bndcfgs, "KVM: BNDCFGS in host will be lost"); 8027 } 8028 8029 if (!cpu_has_vmx_mpx()) 8030 supported_xcr0 &= ~(XFEATURE_MASK_BNDREGS | 8031 XFEATURE_MASK_BNDCSR); 8032 8033 if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() || 8034 !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global())) 8035 enable_vpid = 0; 8036 8037 if (!cpu_has_vmx_ept() || 8038 !cpu_has_vmx_ept_4levels() || 8039 !cpu_has_vmx_ept_mt_wb() || 8040 !cpu_has_vmx_invept_global()) 8041 enable_ept = 0; 8042 8043 /* NX support is required for shadow paging. */ 8044 if (!enable_ept && !boot_cpu_has(X86_FEATURE_NX)) { 8045 pr_err_ratelimited("kvm: NX (Execute Disable) not supported\n"); 8046 return -EOPNOTSUPP; 8047 } 8048 8049 if (!cpu_has_vmx_ept_ad_bits() || !enable_ept) 8050 enable_ept_ad_bits = 0; 8051 8052 if (!cpu_has_vmx_unrestricted_guest() || !enable_ept) 8053 enable_unrestricted_guest = 0; 8054 8055 if (!cpu_has_vmx_flexpriority()) 8056 flexpriority_enabled = 0; 8057 8058 if (!cpu_has_virtual_nmis()) 8059 enable_vnmi = 0; 8060 8061 /* 8062 * set_apic_access_page_addr() is used to reload apic access 8063 * page upon invalidation. No need to do anything if not 8064 * using the APIC_ACCESS_ADDR VMCS field. 8065 */ 8066 if (!flexpriority_enabled) 8067 vmx_x86_ops.set_apic_access_page_addr = NULL; 8068 8069 if (!cpu_has_vmx_tpr_shadow()) 8070 vmx_x86_ops.update_cr8_intercept = NULL; 8071 8072 #if IS_ENABLED(CONFIG_HYPERV) 8073 if (ms_hyperv.nested_features & HV_X64_NESTED_GUEST_MAPPING_FLUSH 8074 && enable_ept) { 8075 vmx_x86_ops.tlb_remote_flush = hv_remote_flush_tlb; 8076 vmx_x86_ops.tlb_remote_flush_with_range = 8077 hv_remote_flush_tlb_with_range; 8078 } 8079 #endif 8080 8081 if (!cpu_has_vmx_ple()) { 8082 ple_gap = 0; 8083 ple_window = 0; 8084 ple_window_grow = 0; 8085 ple_window_max = 0; 8086 ple_window_shrink = 0; 8087 } 8088 8089 if (!cpu_has_vmx_apicv()) 8090 enable_apicv = 0; 8091 if (!enable_apicv) 8092 vmx_x86_ops.sync_pir_to_irr = NULL; 8093 8094 if (cpu_has_vmx_tsc_scaling()) 8095 kvm_has_tsc_control = true; 8096 8097 kvm_max_tsc_scaling_ratio = KVM_VMX_TSC_MULTIPLIER_MAX; 8098 kvm_tsc_scaling_ratio_frac_bits = 48; 8099 kvm_has_bus_lock_exit = cpu_has_vmx_bus_lock_detection(); 8100 8101 set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */ 8102 8103 if (enable_ept) 8104 kvm_mmu_set_ept_masks(enable_ept_ad_bits, 8105 cpu_has_vmx_ept_execute_only()); 8106 8107 /* 8108 * Setup shadow_me_value/shadow_me_mask to include MKTME KeyID 8109 * bits to shadow_zero_check. 8110 */ 8111 vmx_setup_me_spte_mask(); 8112 8113 kvm_configure_mmu(enable_ept, 0, vmx_get_max_tdp_level(), 8114 ept_caps_to_lpage_level(vmx_capability.ept)); 8115 8116 /* 8117 * Only enable PML when hardware supports PML feature, and both EPT 8118 * and EPT A/D bit features are enabled -- PML depends on them to work. 8119 */ 8120 if (!enable_ept || !enable_ept_ad_bits || !cpu_has_vmx_pml()) 8121 enable_pml = 0; 8122 8123 if (!enable_pml) 8124 vmx_x86_ops.cpu_dirty_log_size = 0; 8125 8126 if (!cpu_has_vmx_preemption_timer()) 8127 enable_preemption_timer = false; 8128 8129 if (enable_preemption_timer) { 8130 u64 use_timer_freq = 5000ULL * 1000 * 1000; 8131 u64 vmx_msr; 8132 8133 rdmsrl(MSR_IA32_VMX_MISC, vmx_msr); 8134 cpu_preemption_timer_multi = 8135 vmx_msr & VMX_MISC_PREEMPTION_TIMER_RATE_MASK; 8136 8137 if (tsc_khz) 8138 use_timer_freq = (u64)tsc_khz * 1000; 8139 use_timer_freq >>= cpu_preemption_timer_multi; 8140 8141 /* 8142 * KVM "disables" the preemption timer by setting it to its max 8143 * value. Don't use the timer if it might cause spurious exits 8144 * at a rate faster than 0.1 Hz (of uninterrupted guest time). 8145 */ 8146 if (use_timer_freq > 0xffffffffu / 10) 8147 enable_preemption_timer = false; 8148 } 8149 8150 if (!enable_preemption_timer) { 8151 vmx_x86_ops.set_hv_timer = NULL; 8152 vmx_x86_ops.cancel_hv_timer = NULL; 8153 vmx_x86_ops.request_immediate_exit = __kvm_request_immediate_exit; 8154 } 8155 8156 kvm_mce_cap_supported |= MCG_LMCE_P; 8157 8158 if (pt_mode != PT_MODE_SYSTEM && pt_mode != PT_MODE_HOST_GUEST) 8159 return -EINVAL; 8160 if (!enable_ept || !cpu_has_vmx_intel_pt()) 8161 pt_mode = PT_MODE_SYSTEM; 8162 if (pt_mode == PT_MODE_HOST_GUEST) 8163 vmx_init_ops.handle_intel_pt_intr = vmx_handle_intel_pt_intr; 8164 else 8165 vmx_init_ops.handle_intel_pt_intr = NULL; 8166 8167 setup_default_sgx_lepubkeyhash(); 8168 8169 if (nested) { 8170 nested_vmx_setup_ctls_msrs(&vmcs_config.nested, 8171 vmx_capability.ept); 8172 8173 r = nested_vmx_hardware_setup(kvm_vmx_exit_handlers); 8174 if (r) 8175 return r; 8176 } 8177 8178 vmx_set_cpu_caps(); 8179 8180 r = alloc_kvm_area(); 8181 if (r && nested) 8182 nested_vmx_hardware_unsetup(); 8183 8184 kvm_set_posted_intr_wakeup_handler(pi_wakeup_handler); 8185 8186 return r; 8187 } 8188 8189 static struct kvm_x86_init_ops vmx_init_ops __initdata = { 8190 .cpu_has_kvm_support = cpu_has_kvm_support, 8191 .disabled_by_bios = vmx_disabled_by_bios, 8192 .check_processor_compatibility = vmx_check_processor_compat, 8193 .hardware_setup = hardware_setup, 8194 .handle_intel_pt_intr = NULL, 8195 8196 .runtime_ops = &vmx_x86_ops, 8197 .pmu_ops = &intel_pmu_ops, 8198 }; 8199 8200 static void vmx_cleanup_l1d_flush(void) 8201 { 8202 if (vmx_l1d_flush_pages) { 8203 free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER); 8204 vmx_l1d_flush_pages = NULL; 8205 } 8206 /* Restore state so sysfs ignores VMX */ 8207 l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; 8208 } 8209 8210 static void vmx_exit(void) 8211 { 8212 #ifdef CONFIG_KEXEC_CORE 8213 RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL); 8214 synchronize_rcu(); 8215 #endif 8216 8217 kvm_exit(); 8218 8219 #if IS_ENABLED(CONFIG_HYPERV) 8220 if (static_branch_unlikely(&enable_evmcs)) { 8221 int cpu; 8222 struct hv_vp_assist_page *vp_ap; 8223 /* 8224 * Reset everything to support using non-enlightened VMCS 8225 * access later (e.g. when we reload the module with 8226 * enlightened_vmcs=0) 8227 */ 8228 for_each_online_cpu(cpu) { 8229 vp_ap = hv_get_vp_assist_page(cpu); 8230 8231 if (!vp_ap) 8232 continue; 8233 8234 vp_ap->nested_control.features.directhypercall = 0; 8235 vp_ap->current_nested_vmcs = 0; 8236 vp_ap->enlighten_vmentry = 0; 8237 } 8238 8239 static_branch_disable(&enable_evmcs); 8240 } 8241 #endif 8242 vmx_cleanup_l1d_flush(); 8243 8244 allow_smaller_maxphyaddr = false; 8245 } 8246 module_exit(vmx_exit); 8247 8248 static int __init vmx_init(void) 8249 { 8250 int r, cpu; 8251 8252 #if IS_ENABLED(CONFIG_HYPERV) 8253 /* 8254 * Enlightened VMCS usage should be recommended and the host needs 8255 * to support eVMCS v1 or above. We can also disable eVMCS support 8256 * with module parameter. 8257 */ 8258 if (enlightened_vmcs && 8259 ms_hyperv.hints & HV_X64_ENLIGHTENED_VMCS_RECOMMENDED && 8260 (ms_hyperv.nested_features & HV_X64_ENLIGHTENED_VMCS_VERSION) >= 8261 KVM_EVMCS_VERSION) { 8262 8263 /* Check that we have assist pages on all online CPUs */ 8264 for_each_online_cpu(cpu) { 8265 if (!hv_get_vp_assist_page(cpu)) { 8266 enlightened_vmcs = false; 8267 break; 8268 } 8269 } 8270 8271 if (enlightened_vmcs) { 8272 pr_info("KVM: vmx: using Hyper-V Enlightened VMCS\n"); 8273 static_branch_enable(&enable_evmcs); 8274 } 8275 8276 if (ms_hyperv.nested_features & HV_X64_NESTED_DIRECT_FLUSH) 8277 vmx_x86_ops.enable_direct_tlbflush 8278 = hv_enable_direct_tlbflush; 8279 8280 } else { 8281 enlightened_vmcs = false; 8282 } 8283 #endif 8284 8285 r = kvm_init(&vmx_init_ops, sizeof(struct vcpu_vmx), 8286 __alignof__(struct vcpu_vmx), THIS_MODULE); 8287 if (r) 8288 return r; 8289 8290 /* 8291 * Must be called after kvm_init() so enable_ept is properly set 8292 * up. Hand the parameter mitigation value in which was stored in 8293 * the pre module init parser. If no parameter was given, it will 8294 * contain 'auto' which will be turned into the default 'cond' 8295 * mitigation mode. 8296 */ 8297 r = vmx_setup_l1d_flush(vmentry_l1d_flush_param); 8298 if (r) { 8299 vmx_exit(); 8300 return r; 8301 } 8302 8303 vmx_setup_fb_clear_ctrl(); 8304 8305 for_each_possible_cpu(cpu) { 8306 INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu)); 8307 8308 pi_init_cpu(cpu); 8309 } 8310 8311 #ifdef CONFIG_KEXEC_CORE 8312 rcu_assign_pointer(crash_vmclear_loaded_vmcss, 8313 crash_vmclear_local_loaded_vmcss); 8314 #endif 8315 vmx_check_vmcs12_offsets(); 8316 8317 /* 8318 * Shadow paging doesn't have a (further) performance penalty 8319 * from GUEST_MAXPHYADDR < HOST_MAXPHYADDR so enable it 8320 * by default 8321 */ 8322 if (!enable_ept) 8323 allow_smaller_maxphyaddr = true; 8324 8325 return 0; 8326 } 8327 module_init(vmx_init); 8328