1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Kernel-based Virtual Machine driver for Linux 4 * 5 * This module enables machines with Intel VT-x extensions to run virtual 6 * machines without emulation or binary translation. 7 * 8 * Copyright (C) 2006 Qumranet, Inc. 9 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 10 * 11 * Authors: 12 * Avi Kivity <avi@qumranet.com> 13 * Yaniv Kamay <yaniv@qumranet.com> 14 */ 15 16 #include <linux/highmem.h> 17 #include <linux/hrtimer.h> 18 #include <linux/kernel.h> 19 #include <linux/kvm_host.h> 20 #include <linux/module.h> 21 #include <linux/moduleparam.h> 22 #include <linux/mod_devicetable.h> 23 #include <linux/mm.h> 24 #include <linux/objtool.h> 25 #include <linux/sched.h> 26 #include <linux/sched/smt.h> 27 #include <linux/slab.h> 28 #include <linux/tboot.h> 29 #include <linux/trace_events.h> 30 #include <linux/entry-kvm.h> 31 32 #include <asm/apic.h> 33 #include <asm/asm.h> 34 #include <asm/cpu.h> 35 #include <asm/cpu_device_id.h> 36 #include <asm/debugreg.h> 37 #include <asm/desc.h> 38 #include <asm/fpu/api.h> 39 #include <asm/fpu/xstate.h> 40 #include <asm/idtentry.h> 41 #include <asm/io.h> 42 #include <asm/irq_remapping.h> 43 #include <asm/kexec.h> 44 #include <asm/perf_event.h> 45 #include <asm/mmu_context.h> 46 #include <asm/mshyperv.h> 47 #include <asm/mwait.h> 48 #include <asm/spec-ctrl.h> 49 #include <asm/virtext.h> 50 #include <asm/vmx.h> 51 52 #include "capabilities.h" 53 #include "cpuid.h" 54 #include "evmcs.h" 55 #include "hyperv.h" 56 #include "kvm_onhyperv.h" 57 #include "irq.h" 58 #include "kvm_cache_regs.h" 59 #include "lapic.h" 60 #include "mmu.h" 61 #include "nested.h" 62 #include "pmu.h" 63 #include "sgx.h" 64 #include "trace.h" 65 #include "vmcs.h" 66 #include "vmcs12.h" 67 #include "vmx.h" 68 #include "x86.h" 69 70 MODULE_AUTHOR("Qumranet"); 71 MODULE_LICENSE("GPL"); 72 73 #ifdef MODULE 74 static const struct x86_cpu_id vmx_cpu_id[] = { 75 X86_MATCH_FEATURE(X86_FEATURE_VMX, NULL), 76 {} 77 }; 78 MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id); 79 #endif 80 81 bool __read_mostly enable_vpid = 1; 82 module_param_named(vpid, enable_vpid, bool, 0444); 83 84 static bool __read_mostly enable_vnmi = 1; 85 module_param_named(vnmi, enable_vnmi, bool, S_IRUGO); 86 87 bool __read_mostly flexpriority_enabled = 1; 88 module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO); 89 90 bool __read_mostly enable_ept = 1; 91 module_param_named(ept, enable_ept, bool, S_IRUGO); 92 93 bool __read_mostly enable_unrestricted_guest = 1; 94 module_param_named(unrestricted_guest, 95 enable_unrestricted_guest, bool, S_IRUGO); 96 97 bool __read_mostly enable_ept_ad_bits = 1; 98 module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO); 99 100 static bool __read_mostly emulate_invalid_guest_state = true; 101 module_param(emulate_invalid_guest_state, bool, S_IRUGO); 102 103 static bool __read_mostly fasteoi = 1; 104 module_param(fasteoi, bool, S_IRUGO); 105 106 module_param(enable_apicv, bool, S_IRUGO); 107 108 /* 109 * If nested=1, nested virtualization is supported, i.e., guests may use 110 * VMX and be a hypervisor for its own guests. If nested=0, guests may not 111 * use VMX instructions. 112 */ 113 static bool __read_mostly nested = 1; 114 module_param(nested, bool, S_IRUGO); 115 116 bool __read_mostly enable_pml = 1; 117 module_param_named(pml, enable_pml, bool, S_IRUGO); 118 119 static bool __read_mostly dump_invalid_vmcs = 0; 120 module_param(dump_invalid_vmcs, bool, 0644); 121 122 #define MSR_BITMAP_MODE_X2APIC 1 123 #define MSR_BITMAP_MODE_X2APIC_APICV 2 124 125 #define KVM_VMX_TSC_MULTIPLIER_MAX 0xffffffffffffffffULL 126 127 /* Guest_tsc -> host_tsc conversion requires 64-bit division. */ 128 static int __read_mostly cpu_preemption_timer_multi; 129 static bool __read_mostly enable_preemption_timer = 1; 130 #ifdef CONFIG_X86_64 131 module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO); 132 #endif 133 134 extern bool __read_mostly allow_smaller_maxphyaddr; 135 module_param(allow_smaller_maxphyaddr, bool, S_IRUGO); 136 137 #define KVM_VM_CR0_ALWAYS_OFF (X86_CR0_NW | X86_CR0_CD) 138 #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR0_NE 139 #define KVM_VM_CR0_ALWAYS_ON \ 140 (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE) 141 142 #define KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR4_VMXE 143 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) 144 #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE) 145 146 #define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM)) 147 148 #define MSR_IA32_RTIT_STATUS_MASK (~(RTIT_STATUS_FILTEREN | \ 149 RTIT_STATUS_CONTEXTEN | RTIT_STATUS_TRIGGEREN | \ 150 RTIT_STATUS_ERROR | RTIT_STATUS_STOPPED | \ 151 RTIT_STATUS_BYTECNT)) 152 153 /* 154 * List of MSRs that can be directly passed to the guest. 155 * In addition to these x2apic and PT MSRs are handled specially. 156 */ 157 static u32 vmx_possible_passthrough_msrs[MAX_POSSIBLE_PASSTHROUGH_MSRS] = { 158 MSR_IA32_SPEC_CTRL, 159 MSR_IA32_PRED_CMD, 160 MSR_IA32_TSC, 161 #ifdef CONFIG_X86_64 162 MSR_FS_BASE, 163 MSR_GS_BASE, 164 MSR_KERNEL_GS_BASE, 165 MSR_IA32_XFD, 166 MSR_IA32_XFD_ERR, 167 #endif 168 MSR_IA32_SYSENTER_CS, 169 MSR_IA32_SYSENTER_ESP, 170 MSR_IA32_SYSENTER_EIP, 171 MSR_CORE_C1_RES, 172 MSR_CORE_C3_RESIDENCY, 173 MSR_CORE_C6_RESIDENCY, 174 MSR_CORE_C7_RESIDENCY, 175 }; 176 177 /* 178 * These 2 parameters are used to config the controls for Pause-Loop Exiting: 179 * ple_gap: upper bound on the amount of time between two successive 180 * executions of PAUSE in a loop. Also indicate if ple enabled. 181 * According to test, this time is usually smaller than 128 cycles. 182 * ple_window: upper bound on the amount of time a guest is allowed to execute 183 * in a PAUSE loop. Tests indicate that most spinlocks are held for 184 * less than 2^12 cycles 185 * Time is measured based on a counter that runs at the same rate as the TSC, 186 * refer SDM volume 3b section 21.6.13 & 22.1.3. 187 */ 188 static unsigned int ple_gap = KVM_DEFAULT_PLE_GAP; 189 module_param(ple_gap, uint, 0444); 190 191 static unsigned int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW; 192 module_param(ple_window, uint, 0444); 193 194 /* Default doubles per-vcpu window every exit. */ 195 static unsigned int ple_window_grow = KVM_DEFAULT_PLE_WINDOW_GROW; 196 module_param(ple_window_grow, uint, 0444); 197 198 /* Default resets per-vcpu window every exit to ple_window. */ 199 static unsigned int ple_window_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK; 200 module_param(ple_window_shrink, uint, 0444); 201 202 /* Default is to compute the maximum so we can never overflow. */ 203 static unsigned int ple_window_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX; 204 module_param(ple_window_max, uint, 0444); 205 206 /* Default is SYSTEM mode, 1 for host-guest mode */ 207 int __read_mostly pt_mode = PT_MODE_SYSTEM; 208 module_param(pt_mode, int, S_IRUGO); 209 210 static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush); 211 static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond); 212 static DEFINE_MUTEX(vmx_l1d_flush_mutex); 213 214 /* Storage for pre module init parameter parsing */ 215 static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_L1D_FLUSH_AUTO; 216 217 static const struct { 218 const char *option; 219 bool for_parse; 220 } vmentry_l1d_param[] = { 221 [VMENTER_L1D_FLUSH_AUTO] = {"auto", true}, 222 [VMENTER_L1D_FLUSH_NEVER] = {"never", true}, 223 [VMENTER_L1D_FLUSH_COND] = {"cond", true}, 224 [VMENTER_L1D_FLUSH_ALWAYS] = {"always", true}, 225 [VMENTER_L1D_FLUSH_EPT_DISABLED] = {"EPT disabled", false}, 226 [VMENTER_L1D_FLUSH_NOT_REQUIRED] = {"not required", false}, 227 }; 228 229 #define L1D_CACHE_ORDER 4 230 static void *vmx_l1d_flush_pages; 231 232 static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf) 233 { 234 struct page *page; 235 unsigned int i; 236 237 if (!boot_cpu_has_bug(X86_BUG_L1TF)) { 238 l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED; 239 return 0; 240 } 241 242 if (!enable_ept) { 243 l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED; 244 return 0; 245 } 246 247 if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) { 248 u64 msr; 249 250 rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr); 251 if (msr & ARCH_CAP_SKIP_VMENTRY_L1DFLUSH) { 252 l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED; 253 return 0; 254 } 255 } 256 257 /* If set to auto use the default l1tf mitigation method */ 258 if (l1tf == VMENTER_L1D_FLUSH_AUTO) { 259 switch (l1tf_mitigation) { 260 case L1TF_MITIGATION_OFF: 261 l1tf = VMENTER_L1D_FLUSH_NEVER; 262 break; 263 case L1TF_MITIGATION_FLUSH_NOWARN: 264 case L1TF_MITIGATION_FLUSH: 265 case L1TF_MITIGATION_FLUSH_NOSMT: 266 l1tf = VMENTER_L1D_FLUSH_COND; 267 break; 268 case L1TF_MITIGATION_FULL: 269 case L1TF_MITIGATION_FULL_FORCE: 270 l1tf = VMENTER_L1D_FLUSH_ALWAYS; 271 break; 272 } 273 } else if (l1tf_mitigation == L1TF_MITIGATION_FULL_FORCE) { 274 l1tf = VMENTER_L1D_FLUSH_ALWAYS; 275 } 276 277 if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages && 278 !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) { 279 /* 280 * This allocation for vmx_l1d_flush_pages is not tied to a VM 281 * lifetime and so should not be charged to a memcg. 282 */ 283 page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER); 284 if (!page) 285 return -ENOMEM; 286 vmx_l1d_flush_pages = page_address(page); 287 288 /* 289 * Initialize each page with a different pattern in 290 * order to protect against KSM in the nested 291 * virtualization case. 292 */ 293 for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) { 294 memset(vmx_l1d_flush_pages + i * PAGE_SIZE, i + 1, 295 PAGE_SIZE); 296 } 297 } 298 299 l1tf_vmx_mitigation = l1tf; 300 301 if (l1tf != VMENTER_L1D_FLUSH_NEVER) 302 static_branch_enable(&vmx_l1d_should_flush); 303 else 304 static_branch_disable(&vmx_l1d_should_flush); 305 306 if (l1tf == VMENTER_L1D_FLUSH_COND) 307 static_branch_enable(&vmx_l1d_flush_cond); 308 else 309 static_branch_disable(&vmx_l1d_flush_cond); 310 return 0; 311 } 312 313 static int vmentry_l1d_flush_parse(const char *s) 314 { 315 unsigned int i; 316 317 if (s) { 318 for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) { 319 if (vmentry_l1d_param[i].for_parse && 320 sysfs_streq(s, vmentry_l1d_param[i].option)) 321 return i; 322 } 323 } 324 return -EINVAL; 325 } 326 327 static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp) 328 { 329 int l1tf, ret; 330 331 l1tf = vmentry_l1d_flush_parse(s); 332 if (l1tf < 0) 333 return l1tf; 334 335 if (!boot_cpu_has(X86_BUG_L1TF)) 336 return 0; 337 338 /* 339 * Has vmx_init() run already? If not then this is the pre init 340 * parameter parsing. In that case just store the value and let 341 * vmx_init() do the proper setup after enable_ept has been 342 * established. 343 */ 344 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) { 345 vmentry_l1d_flush_param = l1tf; 346 return 0; 347 } 348 349 mutex_lock(&vmx_l1d_flush_mutex); 350 ret = vmx_setup_l1d_flush(l1tf); 351 mutex_unlock(&vmx_l1d_flush_mutex); 352 return ret; 353 } 354 355 static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp) 356 { 357 if (WARN_ON_ONCE(l1tf_vmx_mitigation >= ARRAY_SIZE(vmentry_l1d_param))) 358 return sprintf(s, "???\n"); 359 360 return sprintf(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option); 361 } 362 363 static const struct kernel_param_ops vmentry_l1d_flush_ops = { 364 .set = vmentry_l1d_flush_set, 365 .get = vmentry_l1d_flush_get, 366 }; 367 module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, 0644); 368 369 static u32 vmx_segment_access_rights(struct kvm_segment *var); 370 371 void vmx_vmexit(void); 372 373 #define vmx_insn_failed(fmt...) \ 374 do { \ 375 WARN_ONCE(1, fmt); \ 376 pr_warn_ratelimited(fmt); \ 377 } while (0) 378 379 asmlinkage void vmread_error(unsigned long field, bool fault) 380 { 381 if (fault) 382 kvm_spurious_fault(); 383 else 384 vmx_insn_failed("kvm: vmread failed: field=%lx\n", field); 385 } 386 387 noinline void vmwrite_error(unsigned long field, unsigned long value) 388 { 389 vmx_insn_failed("kvm: vmwrite failed: field=%lx val=%lx err=%d\n", 390 field, value, vmcs_read32(VM_INSTRUCTION_ERROR)); 391 } 392 393 noinline void vmclear_error(struct vmcs *vmcs, u64 phys_addr) 394 { 395 vmx_insn_failed("kvm: vmclear failed: %p/%llx\n", vmcs, phys_addr); 396 } 397 398 noinline void vmptrld_error(struct vmcs *vmcs, u64 phys_addr) 399 { 400 vmx_insn_failed("kvm: vmptrld failed: %p/%llx\n", vmcs, phys_addr); 401 } 402 403 noinline void invvpid_error(unsigned long ext, u16 vpid, gva_t gva) 404 { 405 vmx_insn_failed("kvm: invvpid failed: ext=0x%lx vpid=%u gva=0x%lx\n", 406 ext, vpid, gva); 407 } 408 409 noinline void invept_error(unsigned long ext, u64 eptp, gpa_t gpa) 410 { 411 vmx_insn_failed("kvm: invept failed: ext=0x%lx eptp=%llx gpa=0x%llx\n", 412 ext, eptp, gpa); 413 } 414 415 static DEFINE_PER_CPU(struct vmcs *, vmxarea); 416 DEFINE_PER_CPU(struct vmcs *, current_vmcs); 417 /* 418 * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed 419 * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it. 420 */ 421 static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu); 422 423 static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS); 424 static DEFINE_SPINLOCK(vmx_vpid_lock); 425 426 struct vmcs_config vmcs_config; 427 struct vmx_capability vmx_capability; 428 429 #define VMX_SEGMENT_FIELD(seg) \ 430 [VCPU_SREG_##seg] = { \ 431 .selector = GUEST_##seg##_SELECTOR, \ 432 .base = GUEST_##seg##_BASE, \ 433 .limit = GUEST_##seg##_LIMIT, \ 434 .ar_bytes = GUEST_##seg##_AR_BYTES, \ 435 } 436 437 static const struct kvm_vmx_segment_field { 438 unsigned selector; 439 unsigned base; 440 unsigned limit; 441 unsigned ar_bytes; 442 } kvm_vmx_segment_fields[] = { 443 VMX_SEGMENT_FIELD(CS), 444 VMX_SEGMENT_FIELD(DS), 445 VMX_SEGMENT_FIELD(ES), 446 VMX_SEGMENT_FIELD(FS), 447 VMX_SEGMENT_FIELD(GS), 448 VMX_SEGMENT_FIELD(SS), 449 VMX_SEGMENT_FIELD(TR), 450 VMX_SEGMENT_FIELD(LDTR), 451 }; 452 453 static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx) 454 { 455 vmx->segment_cache.bitmask = 0; 456 } 457 458 static unsigned long host_idt_base; 459 460 #if IS_ENABLED(CONFIG_HYPERV) 461 static bool __read_mostly enlightened_vmcs = true; 462 module_param(enlightened_vmcs, bool, 0444); 463 464 static int hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu) 465 { 466 struct hv_enlightened_vmcs *evmcs; 467 struct hv_partition_assist_pg **p_hv_pa_pg = 468 &to_kvm_hv(vcpu->kvm)->hv_pa_pg; 469 /* 470 * Synthetic VM-Exit is not enabled in current code and so All 471 * evmcs in singe VM shares same assist page. 472 */ 473 if (!*p_hv_pa_pg) 474 *p_hv_pa_pg = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT); 475 476 if (!*p_hv_pa_pg) 477 return -ENOMEM; 478 479 evmcs = (struct hv_enlightened_vmcs *)to_vmx(vcpu)->loaded_vmcs->vmcs; 480 481 evmcs->partition_assist_page = 482 __pa(*p_hv_pa_pg); 483 evmcs->hv_vm_id = (unsigned long)vcpu->kvm; 484 evmcs->hv_enlightenments_control.nested_flush_hypercall = 1; 485 486 return 0; 487 } 488 489 #endif /* IS_ENABLED(CONFIG_HYPERV) */ 490 491 /* 492 * Comment's format: document - errata name - stepping - processor name. 493 * Refer from 494 * https://www.virtualbox.org/svn/vbox/trunk/src/VBox/VMM/VMMR0/HMR0.cpp 495 */ 496 static u32 vmx_preemption_cpu_tfms[] = { 497 /* 323344.pdf - BA86 - D0 - Xeon 7500 Series */ 498 0x000206E6, 499 /* 323056.pdf - AAX65 - C2 - Xeon L3406 */ 500 /* 322814.pdf - AAT59 - C2 - i7-600, i5-500, i5-400 and i3-300 Mobile */ 501 /* 322911.pdf - AAU65 - C2 - i5-600, i3-500 Desktop and Pentium G6950 */ 502 0x00020652, 503 /* 322911.pdf - AAU65 - K0 - i5-600, i3-500 Desktop and Pentium G6950 */ 504 0x00020655, 505 /* 322373.pdf - AAO95 - B1 - Xeon 3400 Series */ 506 /* 322166.pdf - AAN92 - B1 - i7-800 and i5-700 Desktop */ 507 /* 508 * 320767.pdf - AAP86 - B1 - 509 * i7-900 Mobile Extreme, i7-800 and i7-700 Mobile 510 */ 511 0x000106E5, 512 /* 321333.pdf - AAM126 - C0 - Xeon 3500 */ 513 0x000106A0, 514 /* 321333.pdf - AAM126 - C1 - Xeon 3500 */ 515 0x000106A1, 516 /* 320836.pdf - AAJ124 - C0 - i7-900 Desktop Extreme and i7-900 Desktop */ 517 0x000106A4, 518 /* 321333.pdf - AAM126 - D0 - Xeon 3500 */ 519 /* 321324.pdf - AAK139 - D0 - Xeon 5500 */ 520 /* 320836.pdf - AAJ124 - D0 - i7-900 Extreme and i7-900 Desktop */ 521 0x000106A5, 522 /* Xeon E3-1220 V2 */ 523 0x000306A8, 524 }; 525 526 static inline bool cpu_has_broken_vmx_preemption_timer(void) 527 { 528 u32 eax = cpuid_eax(0x00000001), i; 529 530 /* Clear the reserved bits */ 531 eax &= ~(0x3U << 14 | 0xfU << 28); 532 for (i = 0; i < ARRAY_SIZE(vmx_preemption_cpu_tfms); i++) 533 if (eax == vmx_preemption_cpu_tfms[i]) 534 return true; 535 536 return false; 537 } 538 539 static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu) 540 { 541 return flexpriority_enabled && lapic_in_kernel(vcpu); 542 } 543 544 static inline bool report_flexpriority(void) 545 { 546 return flexpriority_enabled; 547 } 548 549 static int possible_passthrough_msr_slot(u32 msr) 550 { 551 u32 i; 552 553 for (i = 0; i < ARRAY_SIZE(vmx_possible_passthrough_msrs); i++) 554 if (vmx_possible_passthrough_msrs[i] == msr) 555 return i; 556 557 return -ENOENT; 558 } 559 560 static bool is_valid_passthrough_msr(u32 msr) 561 { 562 bool r; 563 564 switch (msr) { 565 case 0x800 ... 0x8ff: 566 /* x2APIC MSRs. These are handled in vmx_update_msr_bitmap_x2apic() */ 567 return true; 568 case MSR_IA32_RTIT_STATUS: 569 case MSR_IA32_RTIT_OUTPUT_BASE: 570 case MSR_IA32_RTIT_OUTPUT_MASK: 571 case MSR_IA32_RTIT_CR3_MATCH: 572 case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: 573 /* PT MSRs. These are handled in pt_update_intercept_for_msr() */ 574 case MSR_LBR_SELECT: 575 case MSR_LBR_TOS: 576 case MSR_LBR_INFO_0 ... MSR_LBR_INFO_0 + 31: 577 case MSR_LBR_NHM_FROM ... MSR_LBR_NHM_FROM + 31: 578 case MSR_LBR_NHM_TO ... MSR_LBR_NHM_TO + 31: 579 case MSR_LBR_CORE_FROM ... MSR_LBR_CORE_FROM + 8: 580 case MSR_LBR_CORE_TO ... MSR_LBR_CORE_TO + 8: 581 /* LBR MSRs. These are handled in vmx_update_intercept_for_lbr_msrs() */ 582 return true; 583 } 584 585 r = possible_passthrough_msr_slot(msr) != -ENOENT; 586 587 WARN(!r, "Invalid MSR %x, please adapt vmx_possible_passthrough_msrs[]", msr); 588 589 return r; 590 } 591 592 struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr) 593 { 594 int i; 595 596 i = kvm_find_user_return_msr(msr); 597 if (i >= 0) 598 return &vmx->guest_uret_msrs[i]; 599 return NULL; 600 } 601 602 static int vmx_set_guest_uret_msr(struct vcpu_vmx *vmx, 603 struct vmx_uret_msr *msr, u64 data) 604 { 605 unsigned int slot = msr - vmx->guest_uret_msrs; 606 int ret = 0; 607 608 if (msr->load_into_hardware) { 609 preempt_disable(); 610 ret = kvm_set_user_return_msr(slot, data, msr->mask); 611 preempt_enable(); 612 } 613 if (!ret) 614 msr->data = data; 615 return ret; 616 } 617 618 #ifdef CONFIG_KEXEC_CORE 619 static void crash_vmclear_local_loaded_vmcss(void) 620 { 621 int cpu = raw_smp_processor_id(); 622 struct loaded_vmcs *v; 623 624 list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu), 625 loaded_vmcss_on_cpu_link) 626 vmcs_clear(v->vmcs); 627 } 628 #endif /* CONFIG_KEXEC_CORE */ 629 630 static void __loaded_vmcs_clear(void *arg) 631 { 632 struct loaded_vmcs *loaded_vmcs = arg; 633 int cpu = raw_smp_processor_id(); 634 635 if (loaded_vmcs->cpu != cpu) 636 return; /* vcpu migration can race with cpu offline */ 637 if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs) 638 per_cpu(current_vmcs, cpu) = NULL; 639 640 vmcs_clear(loaded_vmcs->vmcs); 641 if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched) 642 vmcs_clear(loaded_vmcs->shadow_vmcs); 643 644 list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link); 645 646 /* 647 * Ensure all writes to loaded_vmcs, including deleting it from its 648 * current percpu list, complete before setting loaded_vmcs->vcpu to 649 * -1, otherwise a different cpu can see vcpu == -1 first and add 650 * loaded_vmcs to its percpu list before it's deleted from this cpu's 651 * list. Pairs with the smp_rmb() in vmx_vcpu_load_vmcs(). 652 */ 653 smp_wmb(); 654 655 loaded_vmcs->cpu = -1; 656 loaded_vmcs->launched = 0; 657 } 658 659 void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs) 660 { 661 int cpu = loaded_vmcs->cpu; 662 663 if (cpu != -1) 664 smp_call_function_single(cpu, 665 __loaded_vmcs_clear, loaded_vmcs, 1); 666 } 667 668 static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg, 669 unsigned field) 670 { 671 bool ret; 672 u32 mask = 1 << (seg * SEG_FIELD_NR + field); 673 674 if (!kvm_register_is_available(&vmx->vcpu, VCPU_EXREG_SEGMENTS)) { 675 kvm_register_mark_available(&vmx->vcpu, VCPU_EXREG_SEGMENTS); 676 vmx->segment_cache.bitmask = 0; 677 } 678 ret = vmx->segment_cache.bitmask & mask; 679 vmx->segment_cache.bitmask |= mask; 680 return ret; 681 } 682 683 static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg) 684 { 685 u16 *p = &vmx->segment_cache.seg[seg].selector; 686 687 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL)) 688 *p = vmcs_read16(kvm_vmx_segment_fields[seg].selector); 689 return *p; 690 } 691 692 static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg) 693 { 694 ulong *p = &vmx->segment_cache.seg[seg].base; 695 696 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE)) 697 *p = vmcs_readl(kvm_vmx_segment_fields[seg].base); 698 return *p; 699 } 700 701 static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg) 702 { 703 u32 *p = &vmx->segment_cache.seg[seg].limit; 704 705 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT)) 706 *p = vmcs_read32(kvm_vmx_segment_fields[seg].limit); 707 return *p; 708 } 709 710 static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg) 711 { 712 u32 *p = &vmx->segment_cache.seg[seg].ar; 713 714 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR)) 715 *p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes); 716 return *p; 717 } 718 719 void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu) 720 { 721 u32 eb; 722 723 eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) | 724 (1u << DB_VECTOR) | (1u << AC_VECTOR); 725 /* 726 * Guest access to VMware backdoor ports could legitimately 727 * trigger #GP because of TSS I/O permission bitmap. 728 * We intercept those #GP and allow access to them anyway 729 * as VMware does. 730 */ 731 if (enable_vmware_backdoor) 732 eb |= (1u << GP_VECTOR); 733 if ((vcpu->guest_debug & 734 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) == 735 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) 736 eb |= 1u << BP_VECTOR; 737 if (to_vmx(vcpu)->rmode.vm86_active) 738 eb = ~0; 739 if (!vmx_need_pf_intercept(vcpu)) 740 eb &= ~(1u << PF_VECTOR); 741 742 /* When we are running a nested L2 guest and L1 specified for it a 743 * certain exception bitmap, we must trap the same exceptions and pass 744 * them to L1. When running L2, we will only handle the exceptions 745 * specified above if L1 did not want them. 746 */ 747 if (is_guest_mode(vcpu)) 748 eb |= get_vmcs12(vcpu)->exception_bitmap; 749 else { 750 int mask = 0, match = 0; 751 752 if (enable_ept && (eb & (1u << PF_VECTOR))) { 753 /* 754 * If EPT is enabled, #PF is currently only intercepted 755 * if MAXPHYADDR is smaller on the guest than on the 756 * host. In that case we only care about present, 757 * non-reserved faults. For vmcs02, however, PFEC_MASK 758 * and PFEC_MATCH are set in prepare_vmcs02_rare. 759 */ 760 mask = PFERR_PRESENT_MASK | PFERR_RSVD_MASK; 761 match = PFERR_PRESENT_MASK; 762 } 763 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, mask); 764 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, match); 765 } 766 767 /* 768 * Disabling xfd interception indicates that dynamic xfeatures 769 * might be used in the guest. Always trap #NM in this case 770 * to save guest xfd_err timely. 771 */ 772 if (vcpu->arch.xfd_no_write_intercept) 773 eb |= (1u << NM_VECTOR); 774 775 vmcs_write32(EXCEPTION_BITMAP, eb); 776 } 777 778 /* 779 * Check if MSR is intercepted for currently loaded MSR bitmap. 780 */ 781 static bool msr_write_intercepted(struct vcpu_vmx *vmx, u32 msr) 782 { 783 if (!(exec_controls_get(vmx) & CPU_BASED_USE_MSR_BITMAPS)) 784 return true; 785 786 return vmx_test_msr_bitmap_write(vmx->loaded_vmcs->msr_bitmap, 787 MSR_IA32_SPEC_CTRL); 788 } 789 790 static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx, 791 unsigned long entry, unsigned long exit) 792 { 793 vm_entry_controls_clearbit(vmx, entry); 794 vm_exit_controls_clearbit(vmx, exit); 795 } 796 797 int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr) 798 { 799 unsigned int i; 800 801 for (i = 0; i < m->nr; ++i) { 802 if (m->val[i].index == msr) 803 return i; 804 } 805 return -ENOENT; 806 } 807 808 static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) 809 { 810 int i; 811 struct msr_autoload *m = &vmx->msr_autoload; 812 813 switch (msr) { 814 case MSR_EFER: 815 if (cpu_has_load_ia32_efer()) { 816 clear_atomic_switch_msr_special(vmx, 817 VM_ENTRY_LOAD_IA32_EFER, 818 VM_EXIT_LOAD_IA32_EFER); 819 return; 820 } 821 break; 822 case MSR_CORE_PERF_GLOBAL_CTRL: 823 if (cpu_has_load_perf_global_ctrl()) { 824 clear_atomic_switch_msr_special(vmx, 825 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, 826 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL); 827 return; 828 } 829 break; 830 } 831 i = vmx_find_loadstore_msr_slot(&m->guest, msr); 832 if (i < 0) 833 goto skip_guest; 834 --m->guest.nr; 835 m->guest.val[i] = m->guest.val[m->guest.nr]; 836 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr); 837 838 skip_guest: 839 i = vmx_find_loadstore_msr_slot(&m->host, msr); 840 if (i < 0) 841 return; 842 843 --m->host.nr; 844 m->host.val[i] = m->host.val[m->host.nr]; 845 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr); 846 } 847 848 static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx, 849 unsigned long entry, unsigned long exit, 850 unsigned long guest_val_vmcs, unsigned long host_val_vmcs, 851 u64 guest_val, u64 host_val) 852 { 853 vmcs_write64(guest_val_vmcs, guest_val); 854 if (host_val_vmcs != HOST_IA32_EFER) 855 vmcs_write64(host_val_vmcs, host_val); 856 vm_entry_controls_setbit(vmx, entry); 857 vm_exit_controls_setbit(vmx, exit); 858 } 859 860 static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, 861 u64 guest_val, u64 host_val, bool entry_only) 862 { 863 int i, j = 0; 864 struct msr_autoload *m = &vmx->msr_autoload; 865 866 switch (msr) { 867 case MSR_EFER: 868 if (cpu_has_load_ia32_efer()) { 869 add_atomic_switch_msr_special(vmx, 870 VM_ENTRY_LOAD_IA32_EFER, 871 VM_EXIT_LOAD_IA32_EFER, 872 GUEST_IA32_EFER, 873 HOST_IA32_EFER, 874 guest_val, host_val); 875 return; 876 } 877 break; 878 case MSR_CORE_PERF_GLOBAL_CTRL: 879 if (cpu_has_load_perf_global_ctrl()) { 880 add_atomic_switch_msr_special(vmx, 881 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, 882 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL, 883 GUEST_IA32_PERF_GLOBAL_CTRL, 884 HOST_IA32_PERF_GLOBAL_CTRL, 885 guest_val, host_val); 886 return; 887 } 888 break; 889 case MSR_IA32_PEBS_ENABLE: 890 /* PEBS needs a quiescent period after being disabled (to write 891 * a record). Disabling PEBS through VMX MSR swapping doesn't 892 * provide that period, so a CPU could write host's record into 893 * guest's memory. 894 */ 895 wrmsrl(MSR_IA32_PEBS_ENABLE, 0); 896 } 897 898 i = vmx_find_loadstore_msr_slot(&m->guest, msr); 899 if (!entry_only) 900 j = vmx_find_loadstore_msr_slot(&m->host, msr); 901 902 if ((i < 0 && m->guest.nr == MAX_NR_LOADSTORE_MSRS) || 903 (j < 0 && m->host.nr == MAX_NR_LOADSTORE_MSRS)) { 904 printk_once(KERN_WARNING "Not enough msr switch entries. " 905 "Can't add msr %x\n", msr); 906 return; 907 } 908 if (i < 0) { 909 i = m->guest.nr++; 910 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr); 911 } 912 m->guest.val[i].index = msr; 913 m->guest.val[i].value = guest_val; 914 915 if (entry_only) 916 return; 917 918 if (j < 0) { 919 j = m->host.nr++; 920 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr); 921 } 922 m->host.val[j].index = msr; 923 m->host.val[j].value = host_val; 924 } 925 926 static bool update_transition_efer(struct vcpu_vmx *vmx) 927 { 928 u64 guest_efer = vmx->vcpu.arch.efer; 929 u64 ignore_bits = 0; 930 int i; 931 932 /* Shadow paging assumes NX to be available. */ 933 if (!enable_ept) 934 guest_efer |= EFER_NX; 935 936 /* 937 * LMA and LME handled by hardware; SCE meaningless outside long mode. 938 */ 939 ignore_bits |= EFER_SCE; 940 #ifdef CONFIG_X86_64 941 ignore_bits |= EFER_LMA | EFER_LME; 942 /* SCE is meaningful only in long mode on Intel */ 943 if (guest_efer & EFER_LMA) 944 ignore_bits &= ~(u64)EFER_SCE; 945 #endif 946 947 /* 948 * On EPT, we can't emulate NX, so we must switch EFER atomically. 949 * On CPUs that support "load IA32_EFER", always switch EFER 950 * atomically, since it's faster than switching it manually. 951 */ 952 if (cpu_has_load_ia32_efer() || 953 (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) { 954 if (!(guest_efer & EFER_LMA)) 955 guest_efer &= ~EFER_LME; 956 if (guest_efer != host_efer) 957 add_atomic_switch_msr(vmx, MSR_EFER, 958 guest_efer, host_efer, false); 959 else 960 clear_atomic_switch_msr(vmx, MSR_EFER); 961 return false; 962 } 963 964 i = kvm_find_user_return_msr(MSR_EFER); 965 if (i < 0) 966 return false; 967 968 clear_atomic_switch_msr(vmx, MSR_EFER); 969 970 guest_efer &= ~ignore_bits; 971 guest_efer |= host_efer & ignore_bits; 972 973 vmx->guest_uret_msrs[i].data = guest_efer; 974 vmx->guest_uret_msrs[i].mask = ~ignore_bits; 975 976 return true; 977 } 978 979 #ifdef CONFIG_X86_32 980 /* 981 * On 32-bit kernels, VM exits still load the FS and GS bases from the 982 * VMCS rather than the segment table. KVM uses this helper to figure 983 * out the current bases to poke them into the VMCS before entry. 984 */ 985 static unsigned long segment_base(u16 selector) 986 { 987 struct desc_struct *table; 988 unsigned long v; 989 990 if (!(selector & ~SEGMENT_RPL_MASK)) 991 return 0; 992 993 table = get_current_gdt_ro(); 994 995 if ((selector & SEGMENT_TI_MASK) == SEGMENT_LDT) { 996 u16 ldt_selector = kvm_read_ldt(); 997 998 if (!(ldt_selector & ~SEGMENT_RPL_MASK)) 999 return 0; 1000 1001 table = (struct desc_struct *)segment_base(ldt_selector); 1002 } 1003 v = get_desc_base(&table[selector >> 3]); 1004 return v; 1005 } 1006 #endif 1007 1008 static inline bool pt_can_write_msr(struct vcpu_vmx *vmx) 1009 { 1010 return vmx_pt_mode_is_host_guest() && 1011 !(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN); 1012 } 1013 1014 static inline bool pt_output_base_valid(struct kvm_vcpu *vcpu, u64 base) 1015 { 1016 /* The base must be 128-byte aligned and a legal physical address. */ 1017 return kvm_vcpu_is_legal_aligned_gpa(vcpu, base, 128); 1018 } 1019 1020 static inline void pt_load_msr(struct pt_ctx *ctx, u32 addr_range) 1021 { 1022 u32 i; 1023 1024 wrmsrl(MSR_IA32_RTIT_STATUS, ctx->status); 1025 wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base); 1026 wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask); 1027 wrmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match); 1028 for (i = 0; i < addr_range; i++) { 1029 wrmsrl(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]); 1030 wrmsrl(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]); 1031 } 1032 } 1033 1034 static inline void pt_save_msr(struct pt_ctx *ctx, u32 addr_range) 1035 { 1036 u32 i; 1037 1038 rdmsrl(MSR_IA32_RTIT_STATUS, ctx->status); 1039 rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base); 1040 rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask); 1041 rdmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match); 1042 for (i = 0; i < addr_range; i++) { 1043 rdmsrl(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]); 1044 rdmsrl(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]); 1045 } 1046 } 1047 1048 static void pt_guest_enter(struct vcpu_vmx *vmx) 1049 { 1050 if (vmx_pt_mode_is_system()) 1051 return; 1052 1053 /* 1054 * GUEST_IA32_RTIT_CTL is already set in the VMCS. 1055 * Save host state before VM entry. 1056 */ 1057 rdmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl); 1058 if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) { 1059 wrmsrl(MSR_IA32_RTIT_CTL, 0); 1060 pt_save_msr(&vmx->pt_desc.host, vmx->pt_desc.num_address_ranges); 1061 pt_load_msr(&vmx->pt_desc.guest, vmx->pt_desc.num_address_ranges); 1062 } 1063 } 1064 1065 static void pt_guest_exit(struct vcpu_vmx *vmx) 1066 { 1067 if (vmx_pt_mode_is_system()) 1068 return; 1069 1070 if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) { 1071 pt_save_msr(&vmx->pt_desc.guest, vmx->pt_desc.num_address_ranges); 1072 pt_load_msr(&vmx->pt_desc.host, vmx->pt_desc.num_address_ranges); 1073 } 1074 1075 /* 1076 * KVM requires VM_EXIT_CLEAR_IA32_RTIT_CTL to expose PT to the guest, 1077 * i.e. RTIT_CTL is always cleared on VM-Exit. Restore it if necessary. 1078 */ 1079 if (vmx->pt_desc.host.ctl) 1080 wrmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl); 1081 } 1082 1083 void vmx_set_vmcs_host_state(struct vmcs_host_state *host, unsigned long cr3, 1084 u16 fs_sel, u16 gs_sel, 1085 unsigned long fs_base, unsigned long gs_base) 1086 { 1087 if (unlikely(cr3 != host->cr3)) { 1088 vmcs_writel(HOST_CR3, cr3); 1089 host->cr3 = cr3; 1090 } 1091 if (unlikely(fs_sel != host->fs_sel)) { 1092 if (!(fs_sel & 7)) 1093 vmcs_write16(HOST_FS_SELECTOR, fs_sel); 1094 else 1095 vmcs_write16(HOST_FS_SELECTOR, 0); 1096 host->fs_sel = fs_sel; 1097 } 1098 if (unlikely(gs_sel != host->gs_sel)) { 1099 if (!(gs_sel & 7)) 1100 vmcs_write16(HOST_GS_SELECTOR, gs_sel); 1101 else 1102 vmcs_write16(HOST_GS_SELECTOR, 0); 1103 host->gs_sel = gs_sel; 1104 } 1105 if (unlikely(fs_base != host->fs_base)) { 1106 vmcs_writel(HOST_FS_BASE, fs_base); 1107 host->fs_base = fs_base; 1108 } 1109 if (unlikely(gs_base != host->gs_base)) { 1110 vmcs_writel(HOST_GS_BASE, gs_base); 1111 host->gs_base = gs_base; 1112 } 1113 } 1114 1115 void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) 1116 { 1117 struct vcpu_vmx *vmx = to_vmx(vcpu); 1118 struct vmcs_host_state *host_state; 1119 #ifdef CONFIG_X86_64 1120 int cpu = raw_smp_processor_id(); 1121 #endif 1122 unsigned long fs_base, gs_base; 1123 u16 fs_sel, gs_sel; 1124 int i; 1125 1126 vmx->req_immediate_exit = false; 1127 1128 /* 1129 * Note that guest MSRs to be saved/restored can also be changed 1130 * when guest state is loaded. This happens when guest transitions 1131 * to/from long-mode by setting MSR_EFER.LMA. 1132 */ 1133 if (!vmx->guest_uret_msrs_loaded) { 1134 vmx->guest_uret_msrs_loaded = true; 1135 for (i = 0; i < kvm_nr_uret_msrs; ++i) { 1136 if (!vmx->guest_uret_msrs[i].load_into_hardware) 1137 continue; 1138 1139 kvm_set_user_return_msr(i, 1140 vmx->guest_uret_msrs[i].data, 1141 vmx->guest_uret_msrs[i].mask); 1142 } 1143 } 1144 1145 if (vmx->nested.need_vmcs12_to_shadow_sync) 1146 nested_sync_vmcs12_to_shadow(vcpu); 1147 1148 if (vmx->guest_state_loaded) 1149 return; 1150 1151 host_state = &vmx->loaded_vmcs->host_state; 1152 1153 /* 1154 * Set host fs and gs selectors. Unfortunately, 22.2.3 does not 1155 * allow segment selectors with cpl > 0 or ti == 1. 1156 */ 1157 host_state->ldt_sel = kvm_read_ldt(); 1158 1159 #ifdef CONFIG_X86_64 1160 savesegment(ds, host_state->ds_sel); 1161 savesegment(es, host_state->es_sel); 1162 1163 gs_base = cpu_kernelmode_gs_base(cpu); 1164 if (likely(is_64bit_mm(current->mm))) { 1165 current_save_fsgs(); 1166 fs_sel = current->thread.fsindex; 1167 gs_sel = current->thread.gsindex; 1168 fs_base = current->thread.fsbase; 1169 vmx->msr_host_kernel_gs_base = current->thread.gsbase; 1170 } else { 1171 savesegment(fs, fs_sel); 1172 savesegment(gs, gs_sel); 1173 fs_base = read_msr(MSR_FS_BASE); 1174 vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE); 1175 } 1176 1177 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); 1178 #else 1179 savesegment(fs, fs_sel); 1180 savesegment(gs, gs_sel); 1181 fs_base = segment_base(fs_sel); 1182 gs_base = segment_base(gs_sel); 1183 #endif 1184 1185 vmx_set_vmcs_host_state(host_state, __get_current_cr3_fast(), 1186 fs_sel, gs_sel, fs_base, gs_base); 1187 1188 vmx->guest_state_loaded = true; 1189 } 1190 1191 static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx) 1192 { 1193 struct vmcs_host_state *host_state; 1194 1195 if (!vmx->guest_state_loaded) 1196 return; 1197 1198 host_state = &vmx->loaded_vmcs->host_state; 1199 1200 ++vmx->vcpu.stat.host_state_reload; 1201 1202 #ifdef CONFIG_X86_64 1203 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); 1204 #endif 1205 if (host_state->ldt_sel || (host_state->gs_sel & 7)) { 1206 kvm_load_ldt(host_state->ldt_sel); 1207 #ifdef CONFIG_X86_64 1208 load_gs_index(host_state->gs_sel); 1209 #else 1210 loadsegment(gs, host_state->gs_sel); 1211 #endif 1212 } 1213 if (host_state->fs_sel & 7) 1214 loadsegment(fs, host_state->fs_sel); 1215 #ifdef CONFIG_X86_64 1216 if (unlikely(host_state->ds_sel | host_state->es_sel)) { 1217 loadsegment(ds, host_state->ds_sel); 1218 loadsegment(es, host_state->es_sel); 1219 } 1220 #endif 1221 invalidate_tss_limit(); 1222 #ifdef CONFIG_X86_64 1223 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); 1224 #endif 1225 load_fixmap_gdt(raw_smp_processor_id()); 1226 vmx->guest_state_loaded = false; 1227 vmx->guest_uret_msrs_loaded = false; 1228 } 1229 1230 #ifdef CONFIG_X86_64 1231 static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx) 1232 { 1233 preempt_disable(); 1234 if (vmx->guest_state_loaded) 1235 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); 1236 preempt_enable(); 1237 return vmx->msr_guest_kernel_gs_base; 1238 } 1239 1240 static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data) 1241 { 1242 preempt_disable(); 1243 if (vmx->guest_state_loaded) 1244 wrmsrl(MSR_KERNEL_GS_BASE, data); 1245 preempt_enable(); 1246 vmx->msr_guest_kernel_gs_base = data; 1247 } 1248 #endif 1249 1250 void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu, 1251 struct loaded_vmcs *buddy) 1252 { 1253 struct vcpu_vmx *vmx = to_vmx(vcpu); 1254 bool already_loaded = vmx->loaded_vmcs->cpu == cpu; 1255 struct vmcs *prev; 1256 1257 if (!already_loaded) { 1258 loaded_vmcs_clear(vmx->loaded_vmcs); 1259 local_irq_disable(); 1260 1261 /* 1262 * Ensure loaded_vmcs->cpu is read before adding loaded_vmcs to 1263 * this cpu's percpu list, otherwise it may not yet be deleted 1264 * from its previous cpu's percpu list. Pairs with the 1265 * smb_wmb() in __loaded_vmcs_clear(). 1266 */ 1267 smp_rmb(); 1268 1269 list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link, 1270 &per_cpu(loaded_vmcss_on_cpu, cpu)); 1271 local_irq_enable(); 1272 } 1273 1274 prev = per_cpu(current_vmcs, cpu); 1275 if (prev != vmx->loaded_vmcs->vmcs) { 1276 per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs; 1277 vmcs_load(vmx->loaded_vmcs->vmcs); 1278 1279 /* 1280 * No indirect branch prediction barrier needed when switching 1281 * the active VMCS within a guest, e.g. on nested VM-Enter. 1282 * The L1 VMM can protect itself with retpolines, IBPB or IBRS. 1283 */ 1284 if (!buddy || WARN_ON_ONCE(buddy->vmcs != prev)) 1285 indirect_branch_prediction_barrier(); 1286 } 1287 1288 if (!already_loaded) { 1289 void *gdt = get_current_gdt_ro(); 1290 1291 /* 1292 * Flush all EPTP/VPID contexts, the new pCPU may have stale 1293 * TLB entries from its previous association with the vCPU. 1294 */ 1295 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 1296 1297 /* 1298 * Linux uses per-cpu TSS and GDT, so set these when switching 1299 * processors. See 22.2.4. 1300 */ 1301 vmcs_writel(HOST_TR_BASE, 1302 (unsigned long)&get_cpu_entry_area(cpu)->tss.x86_tss); 1303 vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt); /* 22.2.4 */ 1304 1305 if (IS_ENABLED(CONFIG_IA32_EMULATION) || IS_ENABLED(CONFIG_X86_32)) { 1306 /* 22.2.3 */ 1307 vmcs_writel(HOST_IA32_SYSENTER_ESP, 1308 (unsigned long)(cpu_entry_stack(cpu) + 1)); 1309 } 1310 1311 vmx->loaded_vmcs->cpu = cpu; 1312 } 1313 } 1314 1315 /* 1316 * Switches to specified vcpu, until a matching vcpu_put(), but assumes 1317 * vcpu mutex is already taken. 1318 */ 1319 static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 1320 { 1321 struct vcpu_vmx *vmx = to_vmx(vcpu); 1322 1323 vmx_vcpu_load_vmcs(vcpu, cpu, NULL); 1324 1325 vmx_vcpu_pi_load(vcpu, cpu); 1326 1327 vmx->host_debugctlmsr = get_debugctlmsr(); 1328 } 1329 1330 static void vmx_vcpu_put(struct kvm_vcpu *vcpu) 1331 { 1332 vmx_vcpu_pi_put(vcpu); 1333 1334 vmx_prepare_switch_to_host(to_vmx(vcpu)); 1335 } 1336 1337 bool vmx_emulation_required(struct kvm_vcpu *vcpu) 1338 { 1339 return emulate_invalid_guest_state && !vmx_guest_state_valid(vcpu); 1340 } 1341 1342 unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) 1343 { 1344 struct vcpu_vmx *vmx = to_vmx(vcpu); 1345 unsigned long rflags, save_rflags; 1346 1347 if (!kvm_register_is_available(vcpu, VCPU_EXREG_RFLAGS)) { 1348 kvm_register_mark_available(vcpu, VCPU_EXREG_RFLAGS); 1349 rflags = vmcs_readl(GUEST_RFLAGS); 1350 if (vmx->rmode.vm86_active) { 1351 rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS; 1352 save_rflags = vmx->rmode.save_rflags; 1353 rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; 1354 } 1355 vmx->rflags = rflags; 1356 } 1357 return vmx->rflags; 1358 } 1359 1360 void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) 1361 { 1362 struct vcpu_vmx *vmx = to_vmx(vcpu); 1363 unsigned long old_rflags; 1364 1365 if (is_unrestricted_guest(vcpu)) { 1366 kvm_register_mark_available(vcpu, VCPU_EXREG_RFLAGS); 1367 vmx->rflags = rflags; 1368 vmcs_writel(GUEST_RFLAGS, rflags); 1369 return; 1370 } 1371 1372 old_rflags = vmx_get_rflags(vcpu); 1373 vmx->rflags = rflags; 1374 if (vmx->rmode.vm86_active) { 1375 vmx->rmode.save_rflags = rflags; 1376 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; 1377 } 1378 vmcs_writel(GUEST_RFLAGS, rflags); 1379 1380 if ((old_rflags ^ vmx->rflags) & X86_EFLAGS_VM) 1381 vmx->emulation_required = vmx_emulation_required(vcpu); 1382 } 1383 1384 static bool vmx_get_if_flag(struct kvm_vcpu *vcpu) 1385 { 1386 return vmx_get_rflags(vcpu) & X86_EFLAGS_IF; 1387 } 1388 1389 u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu) 1390 { 1391 u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); 1392 int ret = 0; 1393 1394 if (interruptibility & GUEST_INTR_STATE_STI) 1395 ret |= KVM_X86_SHADOW_INT_STI; 1396 if (interruptibility & GUEST_INTR_STATE_MOV_SS) 1397 ret |= KVM_X86_SHADOW_INT_MOV_SS; 1398 1399 return ret; 1400 } 1401 1402 void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) 1403 { 1404 u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); 1405 u32 interruptibility = interruptibility_old; 1406 1407 interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS); 1408 1409 if (mask & KVM_X86_SHADOW_INT_MOV_SS) 1410 interruptibility |= GUEST_INTR_STATE_MOV_SS; 1411 else if (mask & KVM_X86_SHADOW_INT_STI) 1412 interruptibility |= GUEST_INTR_STATE_STI; 1413 1414 if ((interruptibility != interruptibility_old)) 1415 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility); 1416 } 1417 1418 static int vmx_rtit_ctl_check(struct kvm_vcpu *vcpu, u64 data) 1419 { 1420 struct vcpu_vmx *vmx = to_vmx(vcpu); 1421 unsigned long value; 1422 1423 /* 1424 * Any MSR write that attempts to change bits marked reserved will 1425 * case a #GP fault. 1426 */ 1427 if (data & vmx->pt_desc.ctl_bitmask) 1428 return 1; 1429 1430 /* 1431 * Any attempt to modify IA32_RTIT_CTL while TraceEn is set will 1432 * result in a #GP unless the same write also clears TraceEn. 1433 */ 1434 if ((vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) && 1435 ((vmx->pt_desc.guest.ctl ^ data) & ~RTIT_CTL_TRACEEN)) 1436 return 1; 1437 1438 /* 1439 * WRMSR to IA32_RTIT_CTL that sets TraceEn but clears this bit 1440 * and FabricEn would cause #GP, if 1441 * CPUID.(EAX=14H, ECX=0):ECX.SNGLRGNOUT[bit 2] = 0 1442 */ 1443 if ((data & RTIT_CTL_TRACEEN) && !(data & RTIT_CTL_TOPA) && 1444 !(data & RTIT_CTL_FABRIC_EN) && 1445 !intel_pt_validate_cap(vmx->pt_desc.caps, 1446 PT_CAP_single_range_output)) 1447 return 1; 1448 1449 /* 1450 * MTCFreq, CycThresh and PSBFreq encodings check, any MSR write that 1451 * utilize encodings marked reserved will cause a #GP fault. 1452 */ 1453 value = intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc_periods); 1454 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc) && 1455 !test_bit((data & RTIT_CTL_MTC_RANGE) >> 1456 RTIT_CTL_MTC_RANGE_OFFSET, &value)) 1457 return 1; 1458 value = intel_pt_validate_cap(vmx->pt_desc.caps, 1459 PT_CAP_cycle_thresholds); 1460 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc) && 1461 !test_bit((data & RTIT_CTL_CYC_THRESH) >> 1462 RTIT_CTL_CYC_THRESH_OFFSET, &value)) 1463 return 1; 1464 value = intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_periods); 1465 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc) && 1466 !test_bit((data & RTIT_CTL_PSB_FREQ) >> 1467 RTIT_CTL_PSB_FREQ_OFFSET, &value)) 1468 return 1; 1469 1470 /* 1471 * If ADDRx_CFG is reserved or the encodings is >2 will 1472 * cause a #GP fault. 1473 */ 1474 value = (data & RTIT_CTL_ADDR0) >> RTIT_CTL_ADDR0_OFFSET; 1475 if ((value && (vmx->pt_desc.num_address_ranges < 1)) || (value > 2)) 1476 return 1; 1477 value = (data & RTIT_CTL_ADDR1) >> RTIT_CTL_ADDR1_OFFSET; 1478 if ((value && (vmx->pt_desc.num_address_ranges < 2)) || (value > 2)) 1479 return 1; 1480 value = (data & RTIT_CTL_ADDR2) >> RTIT_CTL_ADDR2_OFFSET; 1481 if ((value && (vmx->pt_desc.num_address_ranges < 3)) || (value > 2)) 1482 return 1; 1483 value = (data & RTIT_CTL_ADDR3) >> RTIT_CTL_ADDR3_OFFSET; 1484 if ((value && (vmx->pt_desc.num_address_ranges < 4)) || (value > 2)) 1485 return 1; 1486 1487 return 0; 1488 } 1489 1490 static bool vmx_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type, 1491 void *insn, int insn_len) 1492 { 1493 /* 1494 * Emulation of instructions in SGX enclaves is impossible as RIP does 1495 * not point at the failing instruction, and even if it did, the code 1496 * stream is inaccessible. Inject #UD instead of exiting to userspace 1497 * so that guest userspace can't DoS the guest simply by triggering 1498 * emulation (enclaves are CPL3 only). 1499 */ 1500 if (to_vmx(vcpu)->exit_reason.enclave_mode) { 1501 kvm_queue_exception(vcpu, UD_VECTOR); 1502 return false; 1503 } 1504 return true; 1505 } 1506 1507 static int skip_emulated_instruction(struct kvm_vcpu *vcpu) 1508 { 1509 union vmx_exit_reason exit_reason = to_vmx(vcpu)->exit_reason; 1510 unsigned long rip, orig_rip; 1511 u32 instr_len; 1512 1513 /* 1514 * Using VMCS.VM_EXIT_INSTRUCTION_LEN on EPT misconfig depends on 1515 * undefined behavior: Intel's SDM doesn't mandate the VMCS field be 1516 * set when EPT misconfig occurs. In practice, real hardware updates 1517 * VM_EXIT_INSTRUCTION_LEN on EPT misconfig, but other hypervisors 1518 * (namely Hyper-V) don't set it due to it being undefined behavior, 1519 * i.e. we end up advancing IP with some random value. 1520 */ 1521 if (!static_cpu_has(X86_FEATURE_HYPERVISOR) || 1522 exit_reason.basic != EXIT_REASON_EPT_MISCONFIG) { 1523 instr_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); 1524 1525 /* 1526 * Emulating an enclave's instructions isn't supported as KVM 1527 * cannot access the enclave's memory or its true RIP, e.g. the 1528 * vmcs.GUEST_RIP points at the exit point of the enclave, not 1529 * the RIP that actually triggered the VM-Exit. But, because 1530 * most instructions that cause VM-Exit will #UD in an enclave, 1531 * most instruction-based VM-Exits simply do not occur. 1532 * 1533 * There are a few exceptions, notably the debug instructions 1534 * INT1ICEBRK and INT3, as they are allowed in debug enclaves 1535 * and generate #DB/#BP as expected, which KVM might intercept. 1536 * But again, the CPU does the dirty work and saves an instr 1537 * length of zero so VMMs don't shoot themselves in the foot. 1538 * WARN if KVM tries to skip a non-zero length instruction on 1539 * a VM-Exit from an enclave. 1540 */ 1541 if (!instr_len) 1542 goto rip_updated; 1543 1544 WARN(exit_reason.enclave_mode, 1545 "KVM: skipping instruction after SGX enclave VM-Exit"); 1546 1547 orig_rip = kvm_rip_read(vcpu); 1548 rip = orig_rip + instr_len; 1549 #ifdef CONFIG_X86_64 1550 /* 1551 * We need to mask out the high 32 bits of RIP if not in 64-bit 1552 * mode, but just finding out that we are in 64-bit mode is 1553 * quite expensive. Only do it if there was a carry. 1554 */ 1555 if (unlikely(((rip ^ orig_rip) >> 31) == 3) && !is_64_bit_mode(vcpu)) 1556 rip = (u32)rip; 1557 #endif 1558 kvm_rip_write(vcpu, rip); 1559 } else { 1560 if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP)) 1561 return 0; 1562 } 1563 1564 rip_updated: 1565 /* skipping an emulated instruction also counts */ 1566 vmx_set_interrupt_shadow(vcpu, 0); 1567 1568 return 1; 1569 } 1570 1571 /* 1572 * Recognizes a pending MTF VM-exit and records the nested state for later 1573 * delivery. 1574 */ 1575 static void vmx_update_emulated_instruction(struct kvm_vcpu *vcpu) 1576 { 1577 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 1578 struct vcpu_vmx *vmx = to_vmx(vcpu); 1579 1580 if (!is_guest_mode(vcpu)) 1581 return; 1582 1583 /* 1584 * Per the SDM, MTF takes priority over debug-trap exceptions besides 1585 * T-bit traps. As instruction emulation is completed (i.e. at the 1586 * instruction boundary), any #DB exception pending delivery must be a 1587 * debug-trap. Record the pending MTF state to be delivered in 1588 * vmx_check_nested_events(). 1589 */ 1590 if (nested_cpu_has_mtf(vmcs12) && 1591 (!vcpu->arch.exception.pending || 1592 vcpu->arch.exception.nr == DB_VECTOR)) 1593 vmx->nested.mtf_pending = true; 1594 else 1595 vmx->nested.mtf_pending = false; 1596 } 1597 1598 static int vmx_skip_emulated_instruction(struct kvm_vcpu *vcpu) 1599 { 1600 vmx_update_emulated_instruction(vcpu); 1601 return skip_emulated_instruction(vcpu); 1602 } 1603 1604 static void vmx_clear_hlt(struct kvm_vcpu *vcpu) 1605 { 1606 /* 1607 * Ensure that we clear the HLT state in the VMCS. We don't need to 1608 * explicitly skip the instruction because if the HLT state is set, 1609 * then the instruction is already executing and RIP has already been 1610 * advanced. 1611 */ 1612 if (kvm_hlt_in_guest(vcpu->kvm) && 1613 vmcs_read32(GUEST_ACTIVITY_STATE) == GUEST_ACTIVITY_HLT) 1614 vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE); 1615 } 1616 1617 static void vmx_queue_exception(struct kvm_vcpu *vcpu) 1618 { 1619 struct vcpu_vmx *vmx = to_vmx(vcpu); 1620 unsigned nr = vcpu->arch.exception.nr; 1621 bool has_error_code = vcpu->arch.exception.has_error_code; 1622 u32 error_code = vcpu->arch.exception.error_code; 1623 u32 intr_info = nr | INTR_INFO_VALID_MASK; 1624 1625 kvm_deliver_exception_payload(vcpu); 1626 1627 if (has_error_code) { 1628 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); 1629 intr_info |= INTR_INFO_DELIVER_CODE_MASK; 1630 } 1631 1632 if (vmx->rmode.vm86_active) { 1633 int inc_eip = 0; 1634 if (kvm_exception_is_soft(nr)) 1635 inc_eip = vcpu->arch.event_exit_inst_len; 1636 kvm_inject_realmode_interrupt(vcpu, nr, inc_eip); 1637 return; 1638 } 1639 1640 WARN_ON_ONCE(vmx->emulation_required); 1641 1642 if (kvm_exception_is_soft(nr)) { 1643 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1644 vmx->vcpu.arch.event_exit_inst_len); 1645 intr_info |= INTR_TYPE_SOFT_EXCEPTION; 1646 } else 1647 intr_info |= INTR_TYPE_HARD_EXCEPTION; 1648 1649 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info); 1650 1651 vmx_clear_hlt(vcpu); 1652 } 1653 1654 static void vmx_setup_uret_msr(struct vcpu_vmx *vmx, unsigned int msr, 1655 bool load_into_hardware) 1656 { 1657 struct vmx_uret_msr *uret_msr; 1658 1659 uret_msr = vmx_find_uret_msr(vmx, msr); 1660 if (!uret_msr) 1661 return; 1662 1663 uret_msr->load_into_hardware = load_into_hardware; 1664 } 1665 1666 /* 1667 * Configuring user return MSRs to automatically save, load, and restore MSRs 1668 * that need to be shoved into hardware when running the guest. Note, omitting 1669 * an MSR here does _NOT_ mean it's not emulated, only that it will not be 1670 * loaded into hardware when running the guest. 1671 */ 1672 static void vmx_setup_uret_msrs(struct vcpu_vmx *vmx) 1673 { 1674 #ifdef CONFIG_X86_64 1675 bool load_syscall_msrs; 1676 1677 /* 1678 * The SYSCALL MSRs are only needed on long mode guests, and only 1679 * when EFER.SCE is set. 1680 */ 1681 load_syscall_msrs = is_long_mode(&vmx->vcpu) && 1682 (vmx->vcpu.arch.efer & EFER_SCE); 1683 1684 vmx_setup_uret_msr(vmx, MSR_STAR, load_syscall_msrs); 1685 vmx_setup_uret_msr(vmx, MSR_LSTAR, load_syscall_msrs); 1686 vmx_setup_uret_msr(vmx, MSR_SYSCALL_MASK, load_syscall_msrs); 1687 #endif 1688 vmx_setup_uret_msr(vmx, MSR_EFER, update_transition_efer(vmx)); 1689 1690 vmx_setup_uret_msr(vmx, MSR_TSC_AUX, 1691 guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP) || 1692 guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDPID)); 1693 1694 /* 1695 * hle=0, rtm=0, tsx_ctrl=1 can be found with some combinations of new 1696 * kernel and old userspace. If those guests run on a tsx=off host, do 1697 * allow guests to use TSX_CTRL, but don't change the value in hardware 1698 * so that TSX remains always disabled. 1699 */ 1700 vmx_setup_uret_msr(vmx, MSR_IA32_TSX_CTRL, boot_cpu_has(X86_FEATURE_RTM)); 1701 1702 /* 1703 * The set of MSRs to load may have changed, reload MSRs before the 1704 * next VM-Enter. 1705 */ 1706 vmx->guest_uret_msrs_loaded = false; 1707 } 1708 1709 u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu) 1710 { 1711 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 1712 1713 if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETTING)) 1714 return vmcs12->tsc_offset; 1715 1716 return 0; 1717 } 1718 1719 u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu) 1720 { 1721 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 1722 1723 if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETTING) && 1724 nested_cpu_has2(vmcs12, SECONDARY_EXEC_TSC_SCALING)) 1725 return vmcs12->tsc_multiplier; 1726 1727 return kvm_default_tsc_scaling_ratio; 1728 } 1729 1730 static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) 1731 { 1732 vmcs_write64(TSC_OFFSET, offset); 1733 } 1734 1735 static void vmx_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier) 1736 { 1737 vmcs_write64(TSC_MULTIPLIER, multiplier); 1738 } 1739 1740 /* 1741 * nested_vmx_allowed() checks whether a guest should be allowed to use VMX 1742 * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for 1743 * all guests if the "nested" module option is off, and can also be disabled 1744 * for a single guest by disabling its VMX cpuid bit. 1745 */ 1746 bool nested_vmx_allowed(struct kvm_vcpu *vcpu) 1747 { 1748 return nested && guest_cpuid_has(vcpu, X86_FEATURE_VMX); 1749 } 1750 1751 static inline bool vmx_feature_control_msr_valid(struct kvm_vcpu *vcpu, 1752 uint64_t val) 1753 { 1754 uint64_t valid_bits = to_vmx(vcpu)->msr_ia32_feature_control_valid_bits; 1755 1756 return !(val & ~valid_bits); 1757 } 1758 1759 static int vmx_get_msr_feature(struct kvm_msr_entry *msr) 1760 { 1761 switch (msr->index) { 1762 case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: 1763 if (!nested) 1764 return 1; 1765 return vmx_get_vmx_msr(&vmcs_config.nested, msr->index, &msr->data); 1766 case MSR_IA32_PERF_CAPABILITIES: 1767 msr->data = vmx_get_perf_capabilities(); 1768 return 0; 1769 default: 1770 return KVM_MSR_RET_INVALID; 1771 } 1772 } 1773 1774 /* 1775 * Reads an msr value (of 'msr_info->index') into 'msr_info->data'. 1776 * Returns 0 on success, non-0 otherwise. 1777 * Assumes vcpu_load() was already called. 1778 */ 1779 static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 1780 { 1781 struct vcpu_vmx *vmx = to_vmx(vcpu); 1782 struct vmx_uret_msr *msr; 1783 u32 index; 1784 1785 switch (msr_info->index) { 1786 #ifdef CONFIG_X86_64 1787 case MSR_FS_BASE: 1788 msr_info->data = vmcs_readl(GUEST_FS_BASE); 1789 break; 1790 case MSR_GS_BASE: 1791 msr_info->data = vmcs_readl(GUEST_GS_BASE); 1792 break; 1793 case MSR_KERNEL_GS_BASE: 1794 msr_info->data = vmx_read_guest_kernel_gs_base(vmx); 1795 break; 1796 #endif 1797 case MSR_EFER: 1798 return kvm_get_msr_common(vcpu, msr_info); 1799 case MSR_IA32_TSX_CTRL: 1800 if (!msr_info->host_initiated && 1801 !(vcpu->arch.arch_capabilities & ARCH_CAP_TSX_CTRL_MSR)) 1802 return 1; 1803 goto find_uret_msr; 1804 case MSR_IA32_UMWAIT_CONTROL: 1805 if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx)) 1806 return 1; 1807 1808 msr_info->data = vmx->msr_ia32_umwait_control; 1809 break; 1810 case MSR_IA32_SPEC_CTRL: 1811 if (!msr_info->host_initiated && 1812 !guest_has_spec_ctrl_msr(vcpu)) 1813 return 1; 1814 1815 msr_info->data = to_vmx(vcpu)->spec_ctrl; 1816 break; 1817 case MSR_IA32_SYSENTER_CS: 1818 msr_info->data = vmcs_read32(GUEST_SYSENTER_CS); 1819 break; 1820 case MSR_IA32_SYSENTER_EIP: 1821 msr_info->data = vmcs_readl(GUEST_SYSENTER_EIP); 1822 break; 1823 case MSR_IA32_SYSENTER_ESP: 1824 msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP); 1825 break; 1826 case MSR_IA32_BNDCFGS: 1827 if (!kvm_mpx_supported() || 1828 (!msr_info->host_initiated && 1829 !guest_cpuid_has(vcpu, X86_FEATURE_MPX))) 1830 return 1; 1831 msr_info->data = vmcs_read64(GUEST_BNDCFGS); 1832 break; 1833 case MSR_IA32_MCG_EXT_CTL: 1834 if (!msr_info->host_initiated && 1835 !(vmx->msr_ia32_feature_control & 1836 FEAT_CTL_LMCE_ENABLED)) 1837 return 1; 1838 msr_info->data = vcpu->arch.mcg_ext_ctl; 1839 break; 1840 case MSR_IA32_FEAT_CTL: 1841 msr_info->data = vmx->msr_ia32_feature_control; 1842 break; 1843 case MSR_IA32_SGXLEPUBKEYHASH0 ... MSR_IA32_SGXLEPUBKEYHASH3: 1844 if (!msr_info->host_initiated && 1845 !guest_cpuid_has(vcpu, X86_FEATURE_SGX_LC)) 1846 return 1; 1847 msr_info->data = to_vmx(vcpu)->msr_ia32_sgxlepubkeyhash 1848 [msr_info->index - MSR_IA32_SGXLEPUBKEYHASH0]; 1849 break; 1850 case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: 1851 if (!nested_vmx_allowed(vcpu)) 1852 return 1; 1853 if (vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index, 1854 &msr_info->data)) 1855 return 1; 1856 /* 1857 * Enlightened VMCS v1 doesn't have certain VMCS fields but 1858 * instead of just ignoring the features, different Hyper-V 1859 * versions are either trying to use them and fail or do some 1860 * sanity checking and refuse to boot. Filter all unsupported 1861 * features out. 1862 */ 1863 if (!msr_info->host_initiated && 1864 vmx->nested.enlightened_vmcs_enabled) 1865 nested_evmcs_filter_control_msr(msr_info->index, 1866 &msr_info->data); 1867 break; 1868 case MSR_IA32_RTIT_CTL: 1869 if (!vmx_pt_mode_is_host_guest()) 1870 return 1; 1871 msr_info->data = vmx->pt_desc.guest.ctl; 1872 break; 1873 case MSR_IA32_RTIT_STATUS: 1874 if (!vmx_pt_mode_is_host_guest()) 1875 return 1; 1876 msr_info->data = vmx->pt_desc.guest.status; 1877 break; 1878 case MSR_IA32_RTIT_CR3_MATCH: 1879 if (!vmx_pt_mode_is_host_guest() || 1880 !intel_pt_validate_cap(vmx->pt_desc.caps, 1881 PT_CAP_cr3_filtering)) 1882 return 1; 1883 msr_info->data = vmx->pt_desc.guest.cr3_match; 1884 break; 1885 case MSR_IA32_RTIT_OUTPUT_BASE: 1886 if (!vmx_pt_mode_is_host_guest() || 1887 (!intel_pt_validate_cap(vmx->pt_desc.caps, 1888 PT_CAP_topa_output) && 1889 !intel_pt_validate_cap(vmx->pt_desc.caps, 1890 PT_CAP_single_range_output))) 1891 return 1; 1892 msr_info->data = vmx->pt_desc.guest.output_base; 1893 break; 1894 case MSR_IA32_RTIT_OUTPUT_MASK: 1895 if (!vmx_pt_mode_is_host_guest() || 1896 (!intel_pt_validate_cap(vmx->pt_desc.caps, 1897 PT_CAP_topa_output) && 1898 !intel_pt_validate_cap(vmx->pt_desc.caps, 1899 PT_CAP_single_range_output))) 1900 return 1; 1901 msr_info->data = vmx->pt_desc.guest.output_mask; 1902 break; 1903 case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: 1904 index = msr_info->index - MSR_IA32_RTIT_ADDR0_A; 1905 if (!vmx_pt_mode_is_host_guest() || 1906 (index >= 2 * vmx->pt_desc.num_address_ranges)) 1907 return 1; 1908 if (index % 2) 1909 msr_info->data = vmx->pt_desc.guest.addr_b[index / 2]; 1910 else 1911 msr_info->data = vmx->pt_desc.guest.addr_a[index / 2]; 1912 break; 1913 case MSR_IA32_DEBUGCTLMSR: 1914 msr_info->data = vmcs_read64(GUEST_IA32_DEBUGCTL); 1915 break; 1916 default: 1917 find_uret_msr: 1918 msr = vmx_find_uret_msr(vmx, msr_info->index); 1919 if (msr) { 1920 msr_info->data = msr->data; 1921 break; 1922 } 1923 return kvm_get_msr_common(vcpu, msr_info); 1924 } 1925 1926 return 0; 1927 } 1928 1929 static u64 nested_vmx_truncate_sysenter_addr(struct kvm_vcpu *vcpu, 1930 u64 data) 1931 { 1932 #ifdef CONFIG_X86_64 1933 if (!guest_cpuid_has(vcpu, X86_FEATURE_LM)) 1934 return (u32)data; 1935 #endif 1936 return (unsigned long)data; 1937 } 1938 1939 static u64 vcpu_supported_debugctl(struct kvm_vcpu *vcpu) 1940 { 1941 u64 debugctl = vmx_supported_debugctl(); 1942 1943 if (!intel_pmu_lbr_is_enabled(vcpu)) 1944 debugctl &= ~DEBUGCTLMSR_LBR_MASK; 1945 1946 if (!guest_cpuid_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT)) 1947 debugctl &= ~DEBUGCTLMSR_BUS_LOCK_DETECT; 1948 1949 return debugctl; 1950 } 1951 1952 /* 1953 * Writes msr value into the appropriate "register". 1954 * Returns 0 on success, non-0 otherwise. 1955 * Assumes vcpu_load() was already called. 1956 */ 1957 static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 1958 { 1959 struct vcpu_vmx *vmx = to_vmx(vcpu); 1960 struct vmx_uret_msr *msr; 1961 int ret = 0; 1962 u32 msr_index = msr_info->index; 1963 u64 data = msr_info->data; 1964 u32 index; 1965 1966 switch (msr_index) { 1967 case MSR_EFER: 1968 ret = kvm_set_msr_common(vcpu, msr_info); 1969 break; 1970 #ifdef CONFIG_X86_64 1971 case MSR_FS_BASE: 1972 vmx_segment_cache_clear(vmx); 1973 vmcs_writel(GUEST_FS_BASE, data); 1974 break; 1975 case MSR_GS_BASE: 1976 vmx_segment_cache_clear(vmx); 1977 vmcs_writel(GUEST_GS_BASE, data); 1978 break; 1979 case MSR_KERNEL_GS_BASE: 1980 vmx_write_guest_kernel_gs_base(vmx, data); 1981 break; 1982 case MSR_IA32_XFD: 1983 ret = kvm_set_msr_common(vcpu, msr_info); 1984 /* 1985 * Always intercepting WRMSR could incur non-negligible 1986 * overhead given xfd might be changed frequently in 1987 * guest context switch. Disable write interception 1988 * upon the first write with a non-zero value (indicating 1989 * potential usage on dynamic xfeatures). Also update 1990 * exception bitmap to trap #NM for proper virtualization 1991 * of guest xfd_err. 1992 */ 1993 if (!ret && data) { 1994 vmx_disable_intercept_for_msr(vcpu, MSR_IA32_XFD, 1995 MSR_TYPE_RW); 1996 vcpu->arch.xfd_no_write_intercept = true; 1997 vmx_update_exception_bitmap(vcpu); 1998 } 1999 break; 2000 #endif 2001 case MSR_IA32_SYSENTER_CS: 2002 if (is_guest_mode(vcpu)) 2003 get_vmcs12(vcpu)->guest_sysenter_cs = data; 2004 vmcs_write32(GUEST_SYSENTER_CS, data); 2005 break; 2006 case MSR_IA32_SYSENTER_EIP: 2007 if (is_guest_mode(vcpu)) { 2008 data = nested_vmx_truncate_sysenter_addr(vcpu, data); 2009 get_vmcs12(vcpu)->guest_sysenter_eip = data; 2010 } 2011 vmcs_writel(GUEST_SYSENTER_EIP, data); 2012 break; 2013 case MSR_IA32_SYSENTER_ESP: 2014 if (is_guest_mode(vcpu)) { 2015 data = nested_vmx_truncate_sysenter_addr(vcpu, data); 2016 get_vmcs12(vcpu)->guest_sysenter_esp = data; 2017 } 2018 vmcs_writel(GUEST_SYSENTER_ESP, data); 2019 break; 2020 case MSR_IA32_DEBUGCTLMSR: { 2021 u64 invalid = data & ~vcpu_supported_debugctl(vcpu); 2022 if (invalid & (DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR)) { 2023 if (report_ignored_msrs) 2024 vcpu_unimpl(vcpu, "%s: BTF|LBR in IA32_DEBUGCTLMSR 0x%llx, nop\n", 2025 __func__, data); 2026 data &= ~(DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR); 2027 invalid &= ~(DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR); 2028 } 2029 2030 if (invalid) 2031 return 1; 2032 2033 if (is_guest_mode(vcpu) && get_vmcs12(vcpu)->vm_exit_controls & 2034 VM_EXIT_SAVE_DEBUG_CONTROLS) 2035 get_vmcs12(vcpu)->guest_ia32_debugctl = data; 2036 2037 vmcs_write64(GUEST_IA32_DEBUGCTL, data); 2038 if (intel_pmu_lbr_is_enabled(vcpu) && !to_vmx(vcpu)->lbr_desc.event && 2039 (data & DEBUGCTLMSR_LBR)) 2040 intel_pmu_create_guest_lbr_event(vcpu); 2041 return 0; 2042 } 2043 case MSR_IA32_BNDCFGS: 2044 if (!kvm_mpx_supported() || 2045 (!msr_info->host_initiated && 2046 !guest_cpuid_has(vcpu, X86_FEATURE_MPX))) 2047 return 1; 2048 if (is_noncanonical_address(data & PAGE_MASK, vcpu) || 2049 (data & MSR_IA32_BNDCFGS_RSVD)) 2050 return 1; 2051 vmcs_write64(GUEST_BNDCFGS, data); 2052 break; 2053 case MSR_IA32_UMWAIT_CONTROL: 2054 if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx)) 2055 return 1; 2056 2057 /* The reserved bit 1 and non-32 bit [63:32] should be zero */ 2058 if (data & (BIT_ULL(1) | GENMASK_ULL(63, 32))) 2059 return 1; 2060 2061 vmx->msr_ia32_umwait_control = data; 2062 break; 2063 case MSR_IA32_SPEC_CTRL: 2064 if (!msr_info->host_initiated && 2065 !guest_has_spec_ctrl_msr(vcpu)) 2066 return 1; 2067 2068 if (kvm_spec_ctrl_test_value(data)) 2069 return 1; 2070 2071 vmx->spec_ctrl = data; 2072 if (!data) 2073 break; 2074 2075 /* 2076 * For non-nested: 2077 * When it's written (to non-zero) for the first time, pass 2078 * it through. 2079 * 2080 * For nested: 2081 * The handling of the MSR bitmap for L2 guests is done in 2082 * nested_vmx_prepare_msr_bitmap. We should not touch the 2083 * vmcs02.msr_bitmap here since it gets completely overwritten 2084 * in the merging. We update the vmcs01 here for L1 as well 2085 * since it will end up touching the MSR anyway now. 2086 */ 2087 vmx_disable_intercept_for_msr(vcpu, 2088 MSR_IA32_SPEC_CTRL, 2089 MSR_TYPE_RW); 2090 break; 2091 case MSR_IA32_TSX_CTRL: 2092 if (!msr_info->host_initiated && 2093 !(vcpu->arch.arch_capabilities & ARCH_CAP_TSX_CTRL_MSR)) 2094 return 1; 2095 if (data & ~(TSX_CTRL_RTM_DISABLE | TSX_CTRL_CPUID_CLEAR)) 2096 return 1; 2097 goto find_uret_msr; 2098 case MSR_IA32_PRED_CMD: 2099 if (!msr_info->host_initiated && 2100 !guest_has_pred_cmd_msr(vcpu)) 2101 return 1; 2102 2103 if (data & ~PRED_CMD_IBPB) 2104 return 1; 2105 if (!boot_cpu_has(X86_FEATURE_IBPB)) 2106 return 1; 2107 if (!data) 2108 break; 2109 2110 wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB); 2111 2112 /* 2113 * For non-nested: 2114 * When it's written (to non-zero) for the first time, pass 2115 * it through. 2116 * 2117 * For nested: 2118 * The handling of the MSR bitmap for L2 guests is done in 2119 * nested_vmx_prepare_msr_bitmap. We should not touch the 2120 * vmcs02.msr_bitmap here since it gets completely overwritten 2121 * in the merging. 2122 */ 2123 vmx_disable_intercept_for_msr(vcpu, MSR_IA32_PRED_CMD, MSR_TYPE_W); 2124 break; 2125 case MSR_IA32_CR_PAT: 2126 if (!kvm_pat_valid(data)) 2127 return 1; 2128 2129 if (is_guest_mode(vcpu) && 2130 get_vmcs12(vcpu)->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT) 2131 get_vmcs12(vcpu)->guest_ia32_pat = data; 2132 2133 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { 2134 vmcs_write64(GUEST_IA32_PAT, data); 2135 vcpu->arch.pat = data; 2136 break; 2137 } 2138 ret = kvm_set_msr_common(vcpu, msr_info); 2139 break; 2140 case MSR_IA32_MCG_EXT_CTL: 2141 if ((!msr_info->host_initiated && 2142 !(to_vmx(vcpu)->msr_ia32_feature_control & 2143 FEAT_CTL_LMCE_ENABLED)) || 2144 (data & ~MCG_EXT_CTL_LMCE_EN)) 2145 return 1; 2146 vcpu->arch.mcg_ext_ctl = data; 2147 break; 2148 case MSR_IA32_FEAT_CTL: 2149 if (!vmx_feature_control_msr_valid(vcpu, data) || 2150 (to_vmx(vcpu)->msr_ia32_feature_control & 2151 FEAT_CTL_LOCKED && !msr_info->host_initiated)) 2152 return 1; 2153 vmx->msr_ia32_feature_control = data; 2154 if (msr_info->host_initiated && data == 0) 2155 vmx_leave_nested(vcpu); 2156 2157 /* SGX may be enabled/disabled by guest's firmware */ 2158 vmx_write_encls_bitmap(vcpu, NULL); 2159 break; 2160 case MSR_IA32_SGXLEPUBKEYHASH0 ... MSR_IA32_SGXLEPUBKEYHASH3: 2161 /* 2162 * On real hardware, the LE hash MSRs are writable before 2163 * the firmware sets bit 0 in MSR 0x7a ("activating" SGX), 2164 * at which point SGX related bits in IA32_FEATURE_CONTROL 2165 * become writable. 2166 * 2167 * KVM does not emulate SGX activation for simplicity, so 2168 * allow writes to the LE hash MSRs if IA32_FEATURE_CONTROL 2169 * is unlocked. This is technically not architectural 2170 * behavior, but it's close enough. 2171 */ 2172 if (!msr_info->host_initiated && 2173 (!guest_cpuid_has(vcpu, X86_FEATURE_SGX_LC) || 2174 ((vmx->msr_ia32_feature_control & FEAT_CTL_LOCKED) && 2175 !(vmx->msr_ia32_feature_control & FEAT_CTL_SGX_LC_ENABLED)))) 2176 return 1; 2177 vmx->msr_ia32_sgxlepubkeyhash 2178 [msr_index - MSR_IA32_SGXLEPUBKEYHASH0] = data; 2179 break; 2180 case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: 2181 if (!msr_info->host_initiated) 2182 return 1; /* they are read-only */ 2183 if (!nested_vmx_allowed(vcpu)) 2184 return 1; 2185 return vmx_set_vmx_msr(vcpu, msr_index, data); 2186 case MSR_IA32_RTIT_CTL: 2187 if (!vmx_pt_mode_is_host_guest() || 2188 vmx_rtit_ctl_check(vcpu, data) || 2189 vmx->nested.vmxon) 2190 return 1; 2191 vmcs_write64(GUEST_IA32_RTIT_CTL, data); 2192 vmx->pt_desc.guest.ctl = data; 2193 pt_update_intercept_for_msr(vcpu); 2194 break; 2195 case MSR_IA32_RTIT_STATUS: 2196 if (!pt_can_write_msr(vmx)) 2197 return 1; 2198 if (data & MSR_IA32_RTIT_STATUS_MASK) 2199 return 1; 2200 vmx->pt_desc.guest.status = data; 2201 break; 2202 case MSR_IA32_RTIT_CR3_MATCH: 2203 if (!pt_can_write_msr(vmx)) 2204 return 1; 2205 if (!intel_pt_validate_cap(vmx->pt_desc.caps, 2206 PT_CAP_cr3_filtering)) 2207 return 1; 2208 vmx->pt_desc.guest.cr3_match = data; 2209 break; 2210 case MSR_IA32_RTIT_OUTPUT_BASE: 2211 if (!pt_can_write_msr(vmx)) 2212 return 1; 2213 if (!intel_pt_validate_cap(vmx->pt_desc.caps, 2214 PT_CAP_topa_output) && 2215 !intel_pt_validate_cap(vmx->pt_desc.caps, 2216 PT_CAP_single_range_output)) 2217 return 1; 2218 if (!pt_output_base_valid(vcpu, data)) 2219 return 1; 2220 vmx->pt_desc.guest.output_base = data; 2221 break; 2222 case MSR_IA32_RTIT_OUTPUT_MASK: 2223 if (!pt_can_write_msr(vmx)) 2224 return 1; 2225 if (!intel_pt_validate_cap(vmx->pt_desc.caps, 2226 PT_CAP_topa_output) && 2227 !intel_pt_validate_cap(vmx->pt_desc.caps, 2228 PT_CAP_single_range_output)) 2229 return 1; 2230 vmx->pt_desc.guest.output_mask = data; 2231 break; 2232 case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: 2233 if (!pt_can_write_msr(vmx)) 2234 return 1; 2235 index = msr_info->index - MSR_IA32_RTIT_ADDR0_A; 2236 if (index >= 2 * vmx->pt_desc.num_address_ranges) 2237 return 1; 2238 if (is_noncanonical_address(data, vcpu)) 2239 return 1; 2240 if (index % 2) 2241 vmx->pt_desc.guest.addr_b[index / 2] = data; 2242 else 2243 vmx->pt_desc.guest.addr_a[index / 2] = data; 2244 break; 2245 case MSR_IA32_PERF_CAPABILITIES: 2246 if (data && !vcpu_to_pmu(vcpu)->version) 2247 return 1; 2248 if (data & PMU_CAP_LBR_FMT) { 2249 if ((data & PMU_CAP_LBR_FMT) != 2250 (vmx_get_perf_capabilities() & PMU_CAP_LBR_FMT)) 2251 return 1; 2252 if (!intel_pmu_lbr_is_compatible(vcpu)) 2253 return 1; 2254 } 2255 ret = kvm_set_msr_common(vcpu, msr_info); 2256 break; 2257 2258 default: 2259 find_uret_msr: 2260 msr = vmx_find_uret_msr(vmx, msr_index); 2261 if (msr) 2262 ret = vmx_set_guest_uret_msr(vmx, msr, data); 2263 else 2264 ret = kvm_set_msr_common(vcpu, msr_info); 2265 } 2266 2267 return ret; 2268 } 2269 2270 static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) 2271 { 2272 unsigned long guest_owned_bits; 2273 2274 kvm_register_mark_available(vcpu, reg); 2275 2276 switch (reg) { 2277 case VCPU_REGS_RSP: 2278 vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP); 2279 break; 2280 case VCPU_REGS_RIP: 2281 vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP); 2282 break; 2283 case VCPU_EXREG_PDPTR: 2284 if (enable_ept) 2285 ept_save_pdptrs(vcpu); 2286 break; 2287 case VCPU_EXREG_CR0: 2288 guest_owned_bits = vcpu->arch.cr0_guest_owned_bits; 2289 2290 vcpu->arch.cr0 &= ~guest_owned_bits; 2291 vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & guest_owned_bits; 2292 break; 2293 case VCPU_EXREG_CR3: 2294 /* 2295 * When intercepting CR3 loads, e.g. for shadowing paging, KVM's 2296 * CR3 is loaded into hardware, not the guest's CR3. 2297 */ 2298 if (!(exec_controls_get(to_vmx(vcpu)) & CPU_BASED_CR3_LOAD_EXITING)) 2299 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); 2300 break; 2301 case VCPU_EXREG_CR4: 2302 guest_owned_bits = vcpu->arch.cr4_guest_owned_bits; 2303 2304 vcpu->arch.cr4 &= ~guest_owned_bits; 2305 vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & guest_owned_bits; 2306 break; 2307 default: 2308 KVM_BUG_ON(1, vcpu->kvm); 2309 break; 2310 } 2311 } 2312 2313 static __init int cpu_has_kvm_support(void) 2314 { 2315 return cpu_has_vmx(); 2316 } 2317 2318 static __init int vmx_disabled_by_bios(void) 2319 { 2320 return !boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) || 2321 !boot_cpu_has(X86_FEATURE_VMX); 2322 } 2323 2324 static int kvm_cpu_vmxon(u64 vmxon_pointer) 2325 { 2326 u64 msr; 2327 2328 cr4_set_bits(X86_CR4_VMXE); 2329 2330 asm_volatile_goto("1: vmxon %[vmxon_pointer]\n\t" 2331 _ASM_EXTABLE(1b, %l[fault]) 2332 : : [vmxon_pointer] "m"(vmxon_pointer) 2333 : : fault); 2334 return 0; 2335 2336 fault: 2337 WARN_ONCE(1, "VMXON faulted, MSR_IA32_FEAT_CTL (0x3a) = 0x%llx\n", 2338 rdmsrl_safe(MSR_IA32_FEAT_CTL, &msr) ? 0xdeadbeef : msr); 2339 cr4_clear_bits(X86_CR4_VMXE); 2340 2341 return -EFAULT; 2342 } 2343 2344 static int hardware_enable(void) 2345 { 2346 int cpu = raw_smp_processor_id(); 2347 u64 phys_addr = __pa(per_cpu(vmxarea, cpu)); 2348 int r; 2349 2350 if (cr4_read_shadow() & X86_CR4_VMXE) 2351 return -EBUSY; 2352 2353 /* 2354 * This can happen if we hot-added a CPU but failed to allocate 2355 * VP assist page for it. 2356 */ 2357 if (static_branch_unlikely(&enable_evmcs) && 2358 !hv_get_vp_assist_page(cpu)) 2359 return -EFAULT; 2360 2361 intel_pt_handle_vmx(1); 2362 2363 r = kvm_cpu_vmxon(phys_addr); 2364 if (r) { 2365 intel_pt_handle_vmx(0); 2366 return r; 2367 } 2368 2369 if (enable_ept) 2370 ept_sync_global(); 2371 2372 return 0; 2373 } 2374 2375 static void vmclear_local_loaded_vmcss(void) 2376 { 2377 int cpu = raw_smp_processor_id(); 2378 struct loaded_vmcs *v, *n; 2379 2380 list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu), 2381 loaded_vmcss_on_cpu_link) 2382 __loaded_vmcs_clear(v); 2383 } 2384 2385 static void hardware_disable(void) 2386 { 2387 vmclear_local_loaded_vmcss(); 2388 2389 if (cpu_vmxoff()) 2390 kvm_spurious_fault(); 2391 2392 intel_pt_handle_vmx(0); 2393 } 2394 2395 /* 2396 * There is no X86_FEATURE for SGX yet, but anyway we need to query CPUID 2397 * directly instead of going through cpu_has(), to ensure KVM is trapping 2398 * ENCLS whenever it's supported in hardware. It does not matter whether 2399 * the host OS supports or has enabled SGX. 2400 */ 2401 static bool cpu_has_sgx(void) 2402 { 2403 return cpuid_eax(0) >= 0x12 && (cpuid_eax(0x12) & BIT(0)); 2404 } 2405 2406 static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, 2407 u32 msr, u32 *result) 2408 { 2409 u32 vmx_msr_low, vmx_msr_high; 2410 u32 ctl = ctl_min | ctl_opt; 2411 2412 rdmsr(msr, vmx_msr_low, vmx_msr_high); 2413 2414 ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */ 2415 ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */ 2416 2417 /* Ensure minimum (required) set of control bits are supported. */ 2418 if (ctl_min & ~ctl) 2419 return -EIO; 2420 2421 *result = ctl; 2422 return 0; 2423 } 2424 2425 static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf, 2426 struct vmx_capability *vmx_cap) 2427 { 2428 u32 vmx_msr_low, vmx_msr_high; 2429 u32 min, opt, min2, opt2; 2430 u32 _pin_based_exec_control = 0; 2431 u32 _cpu_based_exec_control = 0; 2432 u32 _cpu_based_2nd_exec_control = 0; 2433 u32 _vmexit_control = 0; 2434 u32 _vmentry_control = 0; 2435 2436 memset(vmcs_conf, 0, sizeof(*vmcs_conf)); 2437 min = CPU_BASED_HLT_EXITING | 2438 #ifdef CONFIG_X86_64 2439 CPU_BASED_CR8_LOAD_EXITING | 2440 CPU_BASED_CR8_STORE_EXITING | 2441 #endif 2442 CPU_BASED_CR3_LOAD_EXITING | 2443 CPU_BASED_CR3_STORE_EXITING | 2444 CPU_BASED_UNCOND_IO_EXITING | 2445 CPU_BASED_MOV_DR_EXITING | 2446 CPU_BASED_USE_TSC_OFFSETTING | 2447 CPU_BASED_MWAIT_EXITING | 2448 CPU_BASED_MONITOR_EXITING | 2449 CPU_BASED_INVLPG_EXITING | 2450 CPU_BASED_RDPMC_EXITING; 2451 2452 opt = CPU_BASED_TPR_SHADOW | 2453 CPU_BASED_USE_MSR_BITMAPS | 2454 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; 2455 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS, 2456 &_cpu_based_exec_control) < 0) 2457 return -EIO; 2458 #ifdef CONFIG_X86_64 2459 if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW)) 2460 _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING & 2461 ~CPU_BASED_CR8_STORE_EXITING; 2462 #endif 2463 if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) { 2464 min2 = 0; 2465 opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2466 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2467 SECONDARY_EXEC_WBINVD_EXITING | 2468 SECONDARY_EXEC_ENABLE_VPID | 2469 SECONDARY_EXEC_ENABLE_EPT | 2470 SECONDARY_EXEC_UNRESTRICTED_GUEST | 2471 SECONDARY_EXEC_PAUSE_LOOP_EXITING | 2472 SECONDARY_EXEC_DESC | 2473 SECONDARY_EXEC_ENABLE_RDTSCP | 2474 SECONDARY_EXEC_ENABLE_INVPCID | 2475 SECONDARY_EXEC_APIC_REGISTER_VIRT | 2476 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2477 SECONDARY_EXEC_SHADOW_VMCS | 2478 SECONDARY_EXEC_XSAVES | 2479 SECONDARY_EXEC_RDSEED_EXITING | 2480 SECONDARY_EXEC_RDRAND_EXITING | 2481 SECONDARY_EXEC_ENABLE_PML | 2482 SECONDARY_EXEC_TSC_SCALING | 2483 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE | 2484 SECONDARY_EXEC_PT_USE_GPA | 2485 SECONDARY_EXEC_PT_CONCEAL_VMX | 2486 SECONDARY_EXEC_ENABLE_VMFUNC | 2487 SECONDARY_EXEC_BUS_LOCK_DETECTION; 2488 if (cpu_has_sgx()) 2489 opt2 |= SECONDARY_EXEC_ENCLS_EXITING; 2490 if (adjust_vmx_controls(min2, opt2, 2491 MSR_IA32_VMX_PROCBASED_CTLS2, 2492 &_cpu_based_2nd_exec_control) < 0) 2493 return -EIO; 2494 } 2495 #ifndef CONFIG_X86_64 2496 if (!(_cpu_based_2nd_exec_control & 2497 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) 2498 _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW; 2499 #endif 2500 2501 if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW)) 2502 _cpu_based_2nd_exec_control &= ~( 2503 SECONDARY_EXEC_APIC_REGISTER_VIRT | 2504 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2505 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); 2506 2507 rdmsr_safe(MSR_IA32_VMX_EPT_VPID_CAP, 2508 &vmx_cap->ept, &vmx_cap->vpid); 2509 2510 if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) { 2511 /* CR3 accesses and invlpg don't need to cause VM Exits when EPT 2512 enabled */ 2513 _cpu_based_exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING | 2514 CPU_BASED_CR3_STORE_EXITING | 2515 CPU_BASED_INVLPG_EXITING); 2516 } else if (vmx_cap->ept) { 2517 vmx_cap->ept = 0; 2518 pr_warn_once("EPT CAP should not exist if not support " 2519 "1-setting enable EPT VM-execution control\n"); 2520 } 2521 if (!(_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_VPID) && 2522 vmx_cap->vpid) { 2523 vmx_cap->vpid = 0; 2524 pr_warn_once("VPID CAP should not exist if not support " 2525 "1-setting enable VPID VM-execution control\n"); 2526 } 2527 2528 min = VM_EXIT_SAVE_DEBUG_CONTROLS | VM_EXIT_ACK_INTR_ON_EXIT; 2529 #ifdef CONFIG_X86_64 2530 min |= VM_EXIT_HOST_ADDR_SPACE_SIZE; 2531 #endif 2532 opt = VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2533 VM_EXIT_LOAD_IA32_PAT | 2534 VM_EXIT_LOAD_IA32_EFER | 2535 VM_EXIT_CLEAR_BNDCFGS | 2536 VM_EXIT_PT_CONCEAL_PIP | 2537 VM_EXIT_CLEAR_IA32_RTIT_CTL; 2538 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS, 2539 &_vmexit_control) < 0) 2540 return -EIO; 2541 2542 min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING; 2543 opt = PIN_BASED_VIRTUAL_NMIS | PIN_BASED_POSTED_INTR | 2544 PIN_BASED_VMX_PREEMPTION_TIMER; 2545 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS, 2546 &_pin_based_exec_control) < 0) 2547 return -EIO; 2548 2549 if (cpu_has_broken_vmx_preemption_timer()) 2550 _pin_based_exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER; 2551 if (!(_cpu_based_2nd_exec_control & 2552 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)) 2553 _pin_based_exec_control &= ~PIN_BASED_POSTED_INTR; 2554 2555 min = VM_ENTRY_LOAD_DEBUG_CONTROLS; 2556 opt = VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | 2557 VM_ENTRY_LOAD_IA32_PAT | 2558 VM_ENTRY_LOAD_IA32_EFER | 2559 VM_ENTRY_LOAD_BNDCFGS | 2560 VM_ENTRY_PT_CONCEAL_PIP | 2561 VM_ENTRY_LOAD_IA32_RTIT_CTL; 2562 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS, 2563 &_vmentry_control) < 0) 2564 return -EIO; 2565 2566 /* 2567 * Some cpus support VM_{ENTRY,EXIT}_IA32_PERF_GLOBAL_CTRL but they 2568 * can't be used due to an errata where VM Exit may incorrectly clear 2569 * IA32_PERF_GLOBAL_CTRL[34:32]. Workaround the errata by using the 2570 * MSR load mechanism to switch IA32_PERF_GLOBAL_CTRL. 2571 */ 2572 if (boot_cpu_data.x86 == 0x6) { 2573 switch (boot_cpu_data.x86_model) { 2574 case 26: /* AAK155 */ 2575 case 30: /* AAP115 */ 2576 case 37: /* AAT100 */ 2577 case 44: /* BC86,AAY89,BD102 */ 2578 case 46: /* BA97 */ 2579 _vmentry_control &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL; 2580 _vmexit_control &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL; 2581 pr_warn_once("kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL " 2582 "does not work properly. Using workaround\n"); 2583 break; 2584 default: 2585 break; 2586 } 2587 } 2588 2589 2590 rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high); 2591 2592 /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */ 2593 if ((vmx_msr_high & 0x1fff) > PAGE_SIZE) 2594 return -EIO; 2595 2596 #ifdef CONFIG_X86_64 2597 /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */ 2598 if (vmx_msr_high & (1u<<16)) 2599 return -EIO; 2600 #endif 2601 2602 /* Require Write-Back (WB) memory type for VMCS accesses. */ 2603 if (((vmx_msr_high >> 18) & 15) != 6) 2604 return -EIO; 2605 2606 vmcs_conf->size = vmx_msr_high & 0x1fff; 2607 vmcs_conf->basic_cap = vmx_msr_high & ~0x1fff; 2608 2609 vmcs_conf->revision_id = vmx_msr_low; 2610 2611 vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control; 2612 vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control; 2613 vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control; 2614 vmcs_conf->vmexit_ctrl = _vmexit_control; 2615 vmcs_conf->vmentry_ctrl = _vmentry_control; 2616 2617 #if IS_ENABLED(CONFIG_HYPERV) 2618 if (enlightened_vmcs) 2619 evmcs_sanitize_exec_ctrls(vmcs_conf); 2620 #endif 2621 2622 return 0; 2623 } 2624 2625 struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags) 2626 { 2627 int node = cpu_to_node(cpu); 2628 struct page *pages; 2629 struct vmcs *vmcs; 2630 2631 pages = __alloc_pages_node(node, flags, 0); 2632 if (!pages) 2633 return NULL; 2634 vmcs = page_address(pages); 2635 memset(vmcs, 0, vmcs_config.size); 2636 2637 /* KVM supports Enlightened VMCS v1 only */ 2638 if (static_branch_unlikely(&enable_evmcs)) 2639 vmcs->hdr.revision_id = KVM_EVMCS_VERSION; 2640 else 2641 vmcs->hdr.revision_id = vmcs_config.revision_id; 2642 2643 if (shadow) 2644 vmcs->hdr.shadow_vmcs = 1; 2645 return vmcs; 2646 } 2647 2648 void free_vmcs(struct vmcs *vmcs) 2649 { 2650 free_page((unsigned long)vmcs); 2651 } 2652 2653 /* 2654 * Free a VMCS, but before that VMCLEAR it on the CPU where it was last loaded 2655 */ 2656 void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs) 2657 { 2658 if (!loaded_vmcs->vmcs) 2659 return; 2660 loaded_vmcs_clear(loaded_vmcs); 2661 free_vmcs(loaded_vmcs->vmcs); 2662 loaded_vmcs->vmcs = NULL; 2663 if (loaded_vmcs->msr_bitmap) 2664 free_page((unsigned long)loaded_vmcs->msr_bitmap); 2665 WARN_ON(loaded_vmcs->shadow_vmcs != NULL); 2666 } 2667 2668 int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs) 2669 { 2670 loaded_vmcs->vmcs = alloc_vmcs(false); 2671 if (!loaded_vmcs->vmcs) 2672 return -ENOMEM; 2673 2674 vmcs_clear(loaded_vmcs->vmcs); 2675 2676 loaded_vmcs->shadow_vmcs = NULL; 2677 loaded_vmcs->hv_timer_soft_disabled = false; 2678 loaded_vmcs->cpu = -1; 2679 loaded_vmcs->launched = 0; 2680 2681 if (cpu_has_vmx_msr_bitmap()) { 2682 loaded_vmcs->msr_bitmap = (unsigned long *) 2683 __get_free_page(GFP_KERNEL_ACCOUNT); 2684 if (!loaded_vmcs->msr_bitmap) 2685 goto out_vmcs; 2686 memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE); 2687 } 2688 2689 memset(&loaded_vmcs->host_state, 0, sizeof(struct vmcs_host_state)); 2690 memset(&loaded_vmcs->controls_shadow, 0, 2691 sizeof(struct vmcs_controls_shadow)); 2692 2693 return 0; 2694 2695 out_vmcs: 2696 free_loaded_vmcs(loaded_vmcs); 2697 return -ENOMEM; 2698 } 2699 2700 static void free_kvm_area(void) 2701 { 2702 int cpu; 2703 2704 for_each_possible_cpu(cpu) { 2705 free_vmcs(per_cpu(vmxarea, cpu)); 2706 per_cpu(vmxarea, cpu) = NULL; 2707 } 2708 } 2709 2710 static __init int alloc_kvm_area(void) 2711 { 2712 int cpu; 2713 2714 for_each_possible_cpu(cpu) { 2715 struct vmcs *vmcs; 2716 2717 vmcs = alloc_vmcs_cpu(false, cpu, GFP_KERNEL); 2718 if (!vmcs) { 2719 free_kvm_area(); 2720 return -ENOMEM; 2721 } 2722 2723 /* 2724 * When eVMCS is enabled, alloc_vmcs_cpu() sets 2725 * vmcs->revision_id to KVM_EVMCS_VERSION instead of 2726 * revision_id reported by MSR_IA32_VMX_BASIC. 2727 * 2728 * However, even though not explicitly documented by 2729 * TLFS, VMXArea passed as VMXON argument should 2730 * still be marked with revision_id reported by 2731 * physical CPU. 2732 */ 2733 if (static_branch_unlikely(&enable_evmcs)) 2734 vmcs->hdr.revision_id = vmcs_config.revision_id; 2735 2736 per_cpu(vmxarea, cpu) = vmcs; 2737 } 2738 return 0; 2739 } 2740 2741 static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg, 2742 struct kvm_segment *save) 2743 { 2744 if (!emulate_invalid_guest_state) { 2745 /* 2746 * CS and SS RPL should be equal during guest entry according 2747 * to VMX spec, but in reality it is not always so. Since vcpu 2748 * is in the middle of the transition from real mode to 2749 * protected mode it is safe to assume that RPL 0 is a good 2750 * default value. 2751 */ 2752 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS) 2753 save->selector &= ~SEGMENT_RPL_MASK; 2754 save->dpl = save->selector & SEGMENT_RPL_MASK; 2755 save->s = 1; 2756 } 2757 __vmx_set_segment(vcpu, save, seg); 2758 } 2759 2760 static void enter_pmode(struct kvm_vcpu *vcpu) 2761 { 2762 unsigned long flags; 2763 struct vcpu_vmx *vmx = to_vmx(vcpu); 2764 2765 /* 2766 * Update real mode segment cache. It may be not up-to-date if segment 2767 * register was written while vcpu was in a guest mode. 2768 */ 2769 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); 2770 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); 2771 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); 2772 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); 2773 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); 2774 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); 2775 2776 vmx->rmode.vm86_active = 0; 2777 2778 __vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); 2779 2780 flags = vmcs_readl(GUEST_RFLAGS); 2781 flags &= RMODE_GUEST_OWNED_EFLAGS_BITS; 2782 flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; 2783 vmcs_writel(GUEST_RFLAGS, flags); 2784 2785 vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) | 2786 (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME)); 2787 2788 vmx_update_exception_bitmap(vcpu); 2789 2790 fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); 2791 fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); 2792 fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); 2793 fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); 2794 fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); 2795 fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); 2796 } 2797 2798 static void fix_rmode_seg(int seg, struct kvm_segment *save) 2799 { 2800 const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; 2801 struct kvm_segment var = *save; 2802 2803 var.dpl = 0x3; 2804 if (seg == VCPU_SREG_CS) 2805 var.type = 0x3; 2806 2807 if (!emulate_invalid_guest_state) { 2808 var.selector = var.base >> 4; 2809 var.base = var.base & 0xffff0; 2810 var.limit = 0xffff; 2811 var.g = 0; 2812 var.db = 0; 2813 var.present = 1; 2814 var.s = 1; 2815 var.l = 0; 2816 var.unusable = 0; 2817 var.type = 0x3; 2818 var.avl = 0; 2819 if (save->base & 0xf) 2820 printk_once(KERN_WARNING "kvm: segment base is not " 2821 "paragraph aligned when entering " 2822 "protected mode (seg=%d)", seg); 2823 } 2824 2825 vmcs_write16(sf->selector, var.selector); 2826 vmcs_writel(sf->base, var.base); 2827 vmcs_write32(sf->limit, var.limit); 2828 vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var)); 2829 } 2830 2831 static void enter_rmode(struct kvm_vcpu *vcpu) 2832 { 2833 unsigned long flags; 2834 struct vcpu_vmx *vmx = to_vmx(vcpu); 2835 struct kvm_vmx *kvm_vmx = to_kvm_vmx(vcpu->kvm); 2836 2837 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); 2838 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); 2839 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); 2840 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); 2841 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); 2842 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); 2843 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); 2844 2845 vmx->rmode.vm86_active = 1; 2846 2847 /* 2848 * Very old userspace does not call KVM_SET_TSS_ADDR before entering 2849 * vcpu. Warn the user that an update is overdue. 2850 */ 2851 if (!kvm_vmx->tss_addr) 2852 printk_once(KERN_WARNING "kvm: KVM_SET_TSS_ADDR need to be " 2853 "called before entering vcpu\n"); 2854 2855 vmx_segment_cache_clear(vmx); 2856 2857 vmcs_writel(GUEST_TR_BASE, kvm_vmx->tss_addr); 2858 vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1); 2859 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); 2860 2861 flags = vmcs_readl(GUEST_RFLAGS); 2862 vmx->rmode.save_rflags = flags; 2863 2864 flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; 2865 2866 vmcs_writel(GUEST_RFLAGS, flags); 2867 vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME); 2868 vmx_update_exception_bitmap(vcpu); 2869 2870 fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); 2871 fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); 2872 fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); 2873 fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); 2874 fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); 2875 fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); 2876 } 2877 2878 int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) 2879 { 2880 struct vcpu_vmx *vmx = to_vmx(vcpu); 2881 struct vmx_uret_msr *msr = vmx_find_uret_msr(vmx, MSR_EFER); 2882 2883 /* Nothing to do if hardware doesn't support EFER. */ 2884 if (!msr) 2885 return 0; 2886 2887 vcpu->arch.efer = efer; 2888 if (efer & EFER_LMA) { 2889 vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); 2890 msr->data = efer; 2891 } else { 2892 vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); 2893 2894 msr->data = efer & ~EFER_LME; 2895 } 2896 vmx_setup_uret_msrs(vmx); 2897 return 0; 2898 } 2899 2900 #ifdef CONFIG_X86_64 2901 2902 static void enter_lmode(struct kvm_vcpu *vcpu) 2903 { 2904 u32 guest_tr_ar; 2905 2906 vmx_segment_cache_clear(to_vmx(vcpu)); 2907 2908 guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES); 2909 if ((guest_tr_ar & VMX_AR_TYPE_MASK) != VMX_AR_TYPE_BUSY_64_TSS) { 2910 pr_debug_ratelimited("%s: tss fixup for long mode. \n", 2911 __func__); 2912 vmcs_write32(GUEST_TR_AR_BYTES, 2913 (guest_tr_ar & ~VMX_AR_TYPE_MASK) 2914 | VMX_AR_TYPE_BUSY_64_TSS); 2915 } 2916 vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA); 2917 } 2918 2919 static void exit_lmode(struct kvm_vcpu *vcpu) 2920 { 2921 vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); 2922 vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA); 2923 } 2924 2925 #endif 2926 2927 static void vmx_flush_tlb_all(struct kvm_vcpu *vcpu) 2928 { 2929 struct vcpu_vmx *vmx = to_vmx(vcpu); 2930 2931 /* 2932 * INVEPT must be issued when EPT is enabled, irrespective of VPID, as 2933 * the CPU is not required to invalidate guest-physical mappings on 2934 * VM-Entry, even if VPID is disabled. Guest-physical mappings are 2935 * associated with the root EPT structure and not any particular VPID 2936 * (INVVPID also isn't required to invalidate guest-physical mappings). 2937 */ 2938 if (enable_ept) { 2939 ept_sync_global(); 2940 } else if (enable_vpid) { 2941 if (cpu_has_vmx_invvpid_global()) { 2942 vpid_sync_vcpu_global(); 2943 } else { 2944 vpid_sync_vcpu_single(vmx->vpid); 2945 vpid_sync_vcpu_single(vmx->nested.vpid02); 2946 } 2947 } 2948 } 2949 2950 static inline int vmx_get_current_vpid(struct kvm_vcpu *vcpu) 2951 { 2952 if (is_guest_mode(vcpu)) 2953 return nested_get_vpid02(vcpu); 2954 return to_vmx(vcpu)->vpid; 2955 } 2956 2957 static void vmx_flush_tlb_current(struct kvm_vcpu *vcpu) 2958 { 2959 struct kvm_mmu *mmu = vcpu->arch.mmu; 2960 u64 root_hpa = mmu->root_hpa; 2961 2962 /* No flush required if the current context is invalid. */ 2963 if (!VALID_PAGE(root_hpa)) 2964 return; 2965 2966 if (enable_ept) 2967 ept_sync_context(construct_eptp(vcpu, root_hpa, 2968 mmu->shadow_root_level)); 2969 else 2970 vpid_sync_context(vmx_get_current_vpid(vcpu)); 2971 } 2972 2973 static void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr) 2974 { 2975 /* 2976 * vpid_sync_vcpu_addr() is a nop if vpid==0, see the comment in 2977 * vmx_flush_tlb_guest() for an explanation of why this is ok. 2978 */ 2979 vpid_sync_vcpu_addr(vmx_get_current_vpid(vcpu), addr); 2980 } 2981 2982 static void vmx_flush_tlb_guest(struct kvm_vcpu *vcpu) 2983 { 2984 /* 2985 * vpid_sync_context() is a nop if vpid==0, e.g. if enable_vpid==0 or a 2986 * vpid couldn't be allocated for this vCPU. VM-Enter and VM-Exit are 2987 * required to flush GVA->{G,H}PA mappings from the TLB if vpid is 2988 * disabled (VM-Enter with vpid enabled and vpid==0 is disallowed), 2989 * i.e. no explicit INVVPID is necessary. 2990 */ 2991 vpid_sync_context(vmx_get_current_vpid(vcpu)); 2992 } 2993 2994 void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu) 2995 { 2996 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 2997 2998 if (!kvm_register_is_dirty(vcpu, VCPU_EXREG_PDPTR)) 2999 return; 3000 3001 if (is_pae_paging(vcpu)) { 3002 vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]); 3003 vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]); 3004 vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]); 3005 vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]); 3006 } 3007 } 3008 3009 void ept_save_pdptrs(struct kvm_vcpu *vcpu) 3010 { 3011 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 3012 3013 if (WARN_ON_ONCE(!is_pae_paging(vcpu))) 3014 return; 3015 3016 mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0); 3017 mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1); 3018 mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2); 3019 mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3); 3020 3021 kvm_register_mark_available(vcpu, VCPU_EXREG_PDPTR); 3022 } 3023 3024 #define CR3_EXITING_BITS (CPU_BASED_CR3_LOAD_EXITING | \ 3025 CPU_BASED_CR3_STORE_EXITING) 3026 3027 void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 3028 { 3029 struct vcpu_vmx *vmx = to_vmx(vcpu); 3030 unsigned long hw_cr0, old_cr0_pg; 3031 u32 tmp; 3032 3033 old_cr0_pg = kvm_read_cr0_bits(vcpu, X86_CR0_PG); 3034 3035 hw_cr0 = (cr0 & ~KVM_VM_CR0_ALWAYS_OFF); 3036 if (is_unrestricted_guest(vcpu)) 3037 hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST; 3038 else { 3039 hw_cr0 |= KVM_VM_CR0_ALWAYS_ON; 3040 if (!enable_ept) 3041 hw_cr0 |= X86_CR0_WP; 3042 3043 if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE)) 3044 enter_pmode(vcpu); 3045 3046 if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE)) 3047 enter_rmode(vcpu); 3048 } 3049 3050 vmcs_writel(CR0_READ_SHADOW, cr0); 3051 vmcs_writel(GUEST_CR0, hw_cr0); 3052 vcpu->arch.cr0 = cr0; 3053 kvm_register_mark_available(vcpu, VCPU_EXREG_CR0); 3054 3055 #ifdef CONFIG_X86_64 3056 if (vcpu->arch.efer & EFER_LME) { 3057 if (!old_cr0_pg && (cr0 & X86_CR0_PG)) 3058 enter_lmode(vcpu); 3059 else if (old_cr0_pg && !(cr0 & X86_CR0_PG)) 3060 exit_lmode(vcpu); 3061 } 3062 #endif 3063 3064 if (enable_ept && !is_unrestricted_guest(vcpu)) { 3065 /* 3066 * Ensure KVM has an up-to-date snapshot of the guest's CR3. If 3067 * the below code _enables_ CR3 exiting, vmx_cache_reg() will 3068 * (correctly) stop reading vmcs.GUEST_CR3 because it thinks 3069 * KVM's CR3 is installed. 3070 */ 3071 if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3)) 3072 vmx_cache_reg(vcpu, VCPU_EXREG_CR3); 3073 3074 /* 3075 * When running with EPT but not unrestricted guest, KVM must 3076 * intercept CR3 accesses when paging is _disabled_. This is 3077 * necessary because restricted guests can't actually run with 3078 * paging disabled, and so KVM stuffs its own CR3 in order to 3079 * run the guest when identity mapped page tables. 3080 * 3081 * Do _NOT_ check the old CR0.PG, e.g. to optimize away the 3082 * update, it may be stale with respect to CR3 interception, 3083 * e.g. after nested VM-Enter. 3084 * 3085 * Lastly, honor L1's desires, i.e. intercept CR3 loads and/or 3086 * stores to forward them to L1, even if KVM does not need to 3087 * intercept them to preserve its identity mapped page tables. 3088 */ 3089 if (!(cr0 & X86_CR0_PG)) { 3090 exec_controls_setbit(vmx, CR3_EXITING_BITS); 3091 } else if (!is_guest_mode(vcpu)) { 3092 exec_controls_clearbit(vmx, CR3_EXITING_BITS); 3093 } else { 3094 tmp = exec_controls_get(vmx); 3095 tmp &= ~CR3_EXITING_BITS; 3096 tmp |= get_vmcs12(vcpu)->cpu_based_vm_exec_control & CR3_EXITING_BITS; 3097 exec_controls_set(vmx, tmp); 3098 } 3099 3100 /* Note, vmx_set_cr4() consumes the new vcpu->arch.cr0. */ 3101 if ((old_cr0_pg ^ cr0) & X86_CR0_PG) 3102 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu)); 3103 3104 /* 3105 * When !CR0_PG -> CR0_PG, vcpu->arch.cr3 becomes active, but 3106 * GUEST_CR3 is still vmx->ept_identity_map_addr if EPT + !URG. 3107 */ 3108 if (!(old_cr0_pg & X86_CR0_PG) && (cr0 & X86_CR0_PG)) 3109 kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); 3110 } 3111 3112 /* depends on vcpu->arch.cr0 to be set to a new value */ 3113 vmx->emulation_required = vmx_emulation_required(vcpu); 3114 } 3115 3116 static int vmx_get_max_tdp_level(void) 3117 { 3118 if (cpu_has_vmx_ept_5levels()) 3119 return 5; 3120 return 4; 3121 } 3122 3123 u64 construct_eptp(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level) 3124 { 3125 u64 eptp = VMX_EPTP_MT_WB; 3126 3127 eptp |= (root_level == 5) ? VMX_EPTP_PWL_5 : VMX_EPTP_PWL_4; 3128 3129 if (enable_ept_ad_bits && 3130 (!is_guest_mode(vcpu) || nested_ept_ad_enabled(vcpu))) 3131 eptp |= VMX_EPTP_AD_ENABLE_BIT; 3132 eptp |= root_hpa; 3133 3134 return eptp; 3135 } 3136 3137 static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, 3138 int root_level) 3139 { 3140 struct kvm *kvm = vcpu->kvm; 3141 bool update_guest_cr3 = true; 3142 unsigned long guest_cr3; 3143 u64 eptp; 3144 3145 if (enable_ept) { 3146 eptp = construct_eptp(vcpu, root_hpa, root_level); 3147 vmcs_write64(EPT_POINTER, eptp); 3148 3149 hv_track_root_tdp(vcpu, root_hpa); 3150 3151 if (!enable_unrestricted_guest && !is_paging(vcpu)) 3152 guest_cr3 = to_kvm_vmx(kvm)->ept_identity_map_addr; 3153 else if (kvm_register_is_dirty(vcpu, VCPU_EXREG_CR3)) 3154 guest_cr3 = vcpu->arch.cr3; 3155 else /* vmcs.GUEST_CR3 is already up-to-date. */ 3156 update_guest_cr3 = false; 3157 vmx_ept_load_pdptrs(vcpu); 3158 } else { 3159 guest_cr3 = root_hpa | kvm_get_active_pcid(vcpu); 3160 } 3161 3162 if (update_guest_cr3) 3163 vmcs_writel(GUEST_CR3, guest_cr3); 3164 } 3165 3166 3167 static bool vmx_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 3168 { 3169 /* 3170 * We operate under the default treatment of SMM, so VMX cannot be 3171 * enabled under SMM. Note, whether or not VMXE is allowed at all is 3172 * handled by kvm_is_valid_cr4(). 3173 */ 3174 if ((cr4 & X86_CR4_VMXE) && is_smm(vcpu)) 3175 return false; 3176 3177 if (to_vmx(vcpu)->nested.vmxon && !nested_cr4_valid(vcpu, cr4)) 3178 return false; 3179 3180 return true; 3181 } 3182 3183 void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 3184 { 3185 unsigned long old_cr4 = vcpu->arch.cr4; 3186 struct vcpu_vmx *vmx = to_vmx(vcpu); 3187 /* 3188 * Pass through host's Machine Check Enable value to hw_cr4, which 3189 * is in force while we are in guest mode. Do not let guests control 3190 * this bit, even if host CR4.MCE == 0. 3191 */ 3192 unsigned long hw_cr4; 3193 3194 hw_cr4 = (cr4_read_shadow() & X86_CR4_MCE) | (cr4 & ~X86_CR4_MCE); 3195 if (is_unrestricted_guest(vcpu)) 3196 hw_cr4 |= KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST; 3197 else if (vmx->rmode.vm86_active) 3198 hw_cr4 |= KVM_RMODE_VM_CR4_ALWAYS_ON; 3199 else 3200 hw_cr4 |= KVM_PMODE_VM_CR4_ALWAYS_ON; 3201 3202 if (!boot_cpu_has(X86_FEATURE_UMIP) && vmx_umip_emulated()) { 3203 if (cr4 & X86_CR4_UMIP) { 3204 secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_DESC); 3205 hw_cr4 &= ~X86_CR4_UMIP; 3206 } else if (!is_guest_mode(vcpu) || 3207 !nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC)) { 3208 secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_DESC); 3209 } 3210 } 3211 3212 vcpu->arch.cr4 = cr4; 3213 kvm_register_mark_available(vcpu, VCPU_EXREG_CR4); 3214 3215 if (!is_unrestricted_guest(vcpu)) { 3216 if (enable_ept) { 3217 if (!is_paging(vcpu)) { 3218 hw_cr4 &= ~X86_CR4_PAE; 3219 hw_cr4 |= X86_CR4_PSE; 3220 } else if (!(cr4 & X86_CR4_PAE)) { 3221 hw_cr4 &= ~X86_CR4_PAE; 3222 } 3223 } 3224 3225 /* 3226 * SMEP/SMAP/PKU is disabled if CPU is in non-paging mode in 3227 * hardware. To emulate this behavior, SMEP/SMAP/PKU needs 3228 * to be manually disabled when guest switches to non-paging 3229 * mode. 3230 * 3231 * If !enable_unrestricted_guest, the CPU is always running 3232 * with CR0.PG=1 and CR4 needs to be modified. 3233 * If enable_unrestricted_guest, the CPU automatically 3234 * disables SMEP/SMAP/PKU when the guest sets CR0.PG=0. 3235 */ 3236 if (!is_paging(vcpu)) 3237 hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE); 3238 } 3239 3240 vmcs_writel(CR4_READ_SHADOW, cr4); 3241 vmcs_writel(GUEST_CR4, hw_cr4); 3242 3243 if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE)) 3244 kvm_update_cpuid_runtime(vcpu); 3245 } 3246 3247 void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) 3248 { 3249 struct vcpu_vmx *vmx = to_vmx(vcpu); 3250 u32 ar; 3251 3252 if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { 3253 *var = vmx->rmode.segs[seg]; 3254 if (seg == VCPU_SREG_TR 3255 || var->selector == vmx_read_guest_seg_selector(vmx, seg)) 3256 return; 3257 var->base = vmx_read_guest_seg_base(vmx, seg); 3258 var->selector = vmx_read_guest_seg_selector(vmx, seg); 3259 return; 3260 } 3261 var->base = vmx_read_guest_seg_base(vmx, seg); 3262 var->limit = vmx_read_guest_seg_limit(vmx, seg); 3263 var->selector = vmx_read_guest_seg_selector(vmx, seg); 3264 ar = vmx_read_guest_seg_ar(vmx, seg); 3265 var->unusable = (ar >> 16) & 1; 3266 var->type = ar & 15; 3267 var->s = (ar >> 4) & 1; 3268 var->dpl = (ar >> 5) & 3; 3269 /* 3270 * Some userspaces do not preserve unusable property. Since usable 3271 * segment has to be present according to VMX spec we can use present 3272 * property to amend userspace bug by making unusable segment always 3273 * nonpresent. vmx_segment_access_rights() already marks nonpresent 3274 * segment as unusable. 3275 */ 3276 var->present = !var->unusable; 3277 var->avl = (ar >> 12) & 1; 3278 var->l = (ar >> 13) & 1; 3279 var->db = (ar >> 14) & 1; 3280 var->g = (ar >> 15) & 1; 3281 } 3282 3283 static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg) 3284 { 3285 struct kvm_segment s; 3286 3287 if (to_vmx(vcpu)->rmode.vm86_active) { 3288 vmx_get_segment(vcpu, &s, seg); 3289 return s.base; 3290 } 3291 return vmx_read_guest_seg_base(to_vmx(vcpu), seg); 3292 } 3293 3294 int vmx_get_cpl(struct kvm_vcpu *vcpu) 3295 { 3296 struct vcpu_vmx *vmx = to_vmx(vcpu); 3297 3298 if (unlikely(vmx->rmode.vm86_active)) 3299 return 0; 3300 else { 3301 int ar = vmx_read_guest_seg_ar(vmx, VCPU_SREG_SS); 3302 return VMX_AR_DPL(ar); 3303 } 3304 } 3305 3306 static u32 vmx_segment_access_rights(struct kvm_segment *var) 3307 { 3308 u32 ar; 3309 3310 if (var->unusable || !var->present) 3311 ar = 1 << 16; 3312 else { 3313 ar = var->type & 15; 3314 ar |= (var->s & 1) << 4; 3315 ar |= (var->dpl & 3) << 5; 3316 ar |= (var->present & 1) << 7; 3317 ar |= (var->avl & 1) << 12; 3318 ar |= (var->l & 1) << 13; 3319 ar |= (var->db & 1) << 14; 3320 ar |= (var->g & 1) << 15; 3321 } 3322 3323 return ar; 3324 } 3325 3326 void __vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) 3327 { 3328 struct vcpu_vmx *vmx = to_vmx(vcpu); 3329 const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; 3330 3331 vmx_segment_cache_clear(vmx); 3332 3333 if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { 3334 vmx->rmode.segs[seg] = *var; 3335 if (seg == VCPU_SREG_TR) 3336 vmcs_write16(sf->selector, var->selector); 3337 else if (var->s) 3338 fix_rmode_seg(seg, &vmx->rmode.segs[seg]); 3339 return; 3340 } 3341 3342 vmcs_writel(sf->base, var->base); 3343 vmcs_write32(sf->limit, var->limit); 3344 vmcs_write16(sf->selector, var->selector); 3345 3346 /* 3347 * Fix the "Accessed" bit in AR field of segment registers for older 3348 * qemu binaries. 3349 * IA32 arch specifies that at the time of processor reset the 3350 * "Accessed" bit in the AR field of segment registers is 1. And qemu 3351 * is setting it to 0 in the userland code. This causes invalid guest 3352 * state vmexit when "unrestricted guest" mode is turned on. 3353 * Fix for this setup issue in cpu_reset is being pushed in the qemu 3354 * tree. Newer qemu binaries with that qemu fix would not need this 3355 * kvm hack. 3356 */ 3357 if (is_unrestricted_guest(vcpu) && (seg != VCPU_SREG_LDTR)) 3358 var->type |= 0x1; /* Accessed */ 3359 3360 vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var)); 3361 } 3362 3363 static void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) 3364 { 3365 __vmx_set_segment(vcpu, var, seg); 3366 3367 to_vmx(vcpu)->emulation_required = vmx_emulation_required(vcpu); 3368 } 3369 3370 static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) 3371 { 3372 u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS); 3373 3374 *db = (ar >> 14) & 1; 3375 *l = (ar >> 13) & 1; 3376 } 3377 3378 static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) 3379 { 3380 dt->size = vmcs_read32(GUEST_IDTR_LIMIT); 3381 dt->address = vmcs_readl(GUEST_IDTR_BASE); 3382 } 3383 3384 static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) 3385 { 3386 vmcs_write32(GUEST_IDTR_LIMIT, dt->size); 3387 vmcs_writel(GUEST_IDTR_BASE, dt->address); 3388 } 3389 3390 static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) 3391 { 3392 dt->size = vmcs_read32(GUEST_GDTR_LIMIT); 3393 dt->address = vmcs_readl(GUEST_GDTR_BASE); 3394 } 3395 3396 static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) 3397 { 3398 vmcs_write32(GUEST_GDTR_LIMIT, dt->size); 3399 vmcs_writel(GUEST_GDTR_BASE, dt->address); 3400 } 3401 3402 static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg) 3403 { 3404 struct kvm_segment var; 3405 u32 ar; 3406 3407 vmx_get_segment(vcpu, &var, seg); 3408 var.dpl = 0x3; 3409 if (seg == VCPU_SREG_CS) 3410 var.type = 0x3; 3411 ar = vmx_segment_access_rights(&var); 3412 3413 if (var.base != (var.selector << 4)) 3414 return false; 3415 if (var.limit != 0xffff) 3416 return false; 3417 if (ar != 0xf3) 3418 return false; 3419 3420 return true; 3421 } 3422 3423 static bool code_segment_valid(struct kvm_vcpu *vcpu) 3424 { 3425 struct kvm_segment cs; 3426 unsigned int cs_rpl; 3427 3428 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); 3429 cs_rpl = cs.selector & SEGMENT_RPL_MASK; 3430 3431 if (cs.unusable) 3432 return false; 3433 if (~cs.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_ACCESSES_MASK)) 3434 return false; 3435 if (!cs.s) 3436 return false; 3437 if (cs.type & VMX_AR_TYPE_WRITEABLE_MASK) { 3438 if (cs.dpl > cs_rpl) 3439 return false; 3440 } else { 3441 if (cs.dpl != cs_rpl) 3442 return false; 3443 } 3444 if (!cs.present) 3445 return false; 3446 3447 /* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */ 3448 return true; 3449 } 3450 3451 static bool stack_segment_valid(struct kvm_vcpu *vcpu) 3452 { 3453 struct kvm_segment ss; 3454 unsigned int ss_rpl; 3455 3456 vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); 3457 ss_rpl = ss.selector & SEGMENT_RPL_MASK; 3458 3459 if (ss.unusable) 3460 return true; 3461 if (ss.type != 3 && ss.type != 7) 3462 return false; 3463 if (!ss.s) 3464 return false; 3465 if (ss.dpl != ss_rpl) /* DPL != RPL */ 3466 return false; 3467 if (!ss.present) 3468 return false; 3469 3470 return true; 3471 } 3472 3473 static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg) 3474 { 3475 struct kvm_segment var; 3476 unsigned int rpl; 3477 3478 vmx_get_segment(vcpu, &var, seg); 3479 rpl = var.selector & SEGMENT_RPL_MASK; 3480 3481 if (var.unusable) 3482 return true; 3483 if (!var.s) 3484 return false; 3485 if (!var.present) 3486 return false; 3487 if (~var.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_WRITEABLE_MASK)) { 3488 if (var.dpl < rpl) /* DPL < RPL */ 3489 return false; 3490 } 3491 3492 /* TODO: Add other members to kvm_segment_field to allow checking for other access 3493 * rights flags 3494 */ 3495 return true; 3496 } 3497 3498 static bool tr_valid(struct kvm_vcpu *vcpu) 3499 { 3500 struct kvm_segment tr; 3501 3502 vmx_get_segment(vcpu, &tr, VCPU_SREG_TR); 3503 3504 if (tr.unusable) 3505 return false; 3506 if (tr.selector & SEGMENT_TI_MASK) /* TI = 1 */ 3507 return false; 3508 if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */ 3509 return false; 3510 if (!tr.present) 3511 return false; 3512 3513 return true; 3514 } 3515 3516 static bool ldtr_valid(struct kvm_vcpu *vcpu) 3517 { 3518 struct kvm_segment ldtr; 3519 3520 vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR); 3521 3522 if (ldtr.unusable) 3523 return true; 3524 if (ldtr.selector & SEGMENT_TI_MASK) /* TI = 1 */ 3525 return false; 3526 if (ldtr.type != 2) 3527 return false; 3528 if (!ldtr.present) 3529 return false; 3530 3531 return true; 3532 } 3533 3534 static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu) 3535 { 3536 struct kvm_segment cs, ss; 3537 3538 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); 3539 vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); 3540 3541 return ((cs.selector & SEGMENT_RPL_MASK) == 3542 (ss.selector & SEGMENT_RPL_MASK)); 3543 } 3544 3545 /* 3546 * Check if guest state is valid. Returns true if valid, false if 3547 * not. 3548 * We assume that registers are always usable 3549 */ 3550 bool __vmx_guest_state_valid(struct kvm_vcpu *vcpu) 3551 { 3552 /* real mode guest state checks */ 3553 if (!is_protmode(vcpu) || (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) { 3554 if (!rmode_segment_valid(vcpu, VCPU_SREG_CS)) 3555 return false; 3556 if (!rmode_segment_valid(vcpu, VCPU_SREG_SS)) 3557 return false; 3558 if (!rmode_segment_valid(vcpu, VCPU_SREG_DS)) 3559 return false; 3560 if (!rmode_segment_valid(vcpu, VCPU_SREG_ES)) 3561 return false; 3562 if (!rmode_segment_valid(vcpu, VCPU_SREG_FS)) 3563 return false; 3564 if (!rmode_segment_valid(vcpu, VCPU_SREG_GS)) 3565 return false; 3566 } else { 3567 /* protected mode guest state checks */ 3568 if (!cs_ss_rpl_check(vcpu)) 3569 return false; 3570 if (!code_segment_valid(vcpu)) 3571 return false; 3572 if (!stack_segment_valid(vcpu)) 3573 return false; 3574 if (!data_segment_valid(vcpu, VCPU_SREG_DS)) 3575 return false; 3576 if (!data_segment_valid(vcpu, VCPU_SREG_ES)) 3577 return false; 3578 if (!data_segment_valid(vcpu, VCPU_SREG_FS)) 3579 return false; 3580 if (!data_segment_valid(vcpu, VCPU_SREG_GS)) 3581 return false; 3582 if (!tr_valid(vcpu)) 3583 return false; 3584 if (!ldtr_valid(vcpu)) 3585 return false; 3586 } 3587 /* TODO: 3588 * - Add checks on RIP 3589 * - Add checks on RFLAGS 3590 */ 3591 3592 return true; 3593 } 3594 3595 static int init_rmode_tss(struct kvm *kvm, void __user *ua) 3596 { 3597 const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0))); 3598 u16 data; 3599 int i; 3600 3601 for (i = 0; i < 3; i++) { 3602 if (__copy_to_user(ua + PAGE_SIZE * i, zero_page, PAGE_SIZE)) 3603 return -EFAULT; 3604 } 3605 3606 data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE; 3607 if (__copy_to_user(ua + TSS_IOPB_BASE_OFFSET, &data, sizeof(u16))) 3608 return -EFAULT; 3609 3610 data = ~0; 3611 if (__copy_to_user(ua + RMODE_TSS_SIZE - 1, &data, sizeof(u8))) 3612 return -EFAULT; 3613 3614 return 0; 3615 } 3616 3617 static int init_rmode_identity_map(struct kvm *kvm) 3618 { 3619 struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm); 3620 int i, r = 0; 3621 void __user *uaddr; 3622 u32 tmp; 3623 3624 /* Protect kvm_vmx->ept_identity_pagetable_done. */ 3625 mutex_lock(&kvm->slots_lock); 3626 3627 if (likely(kvm_vmx->ept_identity_pagetable_done)) 3628 goto out; 3629 3630 if (!kvm_vmx->ept_identity_map_addr) 3631 kvm_vmx->ept_identity_map_addr = VMX_EPT_IDENTITY_PAGETABLE_ADDR; 3632 3633 uaddr = __x86_set_memory_region(kvm, 3634 IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 3635 kvm_vmx->ept_identity_map_addr, 3636 PAGE_SIZE); 3637 if (IS_ERR(uaddr)) { 3638 r = PTR_ERR(uaddr); 3639 goto out; 3640 } 3641 3642 /* Set up identity-mapping pagetable for EPT in real mode */ 3643 for (i = 0; i < PT32_ENT_PER_PAGE; i++) { 3644 tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | 3645 _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE); 3646 if (__copy_to_user(uaddr + i * sizeof(tmp), &tmp, sizeof(tmp))) { 3647 r = -EFAULT; 3648 goto out; 3649 } 3650 } 3651 kvm_vmx->ept_identity_pagetable_done = true; 3652 3653 out: 3654 mutex_unlock(&kvm->slots_lock); 3655 return r; 3656 } 3657 3658 static void seg_setup(int seg) 3659 { 3660 const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; 3661 unsigned int ar; 3662 3663 vmcs_write16(sf->selector, 0); 3664 vmcs_writel(sf->base, 0); 3665 vmcs_write32(sf->limit, 0xffff); 3666 ar = 0x93; 3667 if (seg == VCPU_SREG_CS) 3668 ar |= 0x08; /* code segment */ 3669 3670 vmcs_write32(sf->ar_bytes, ar); 3671 } 3672 3673 static int alloc_apic_access_page(struct kvm *kvm) 3674 { 3675 struct page *page; 3676 void __user *hva; 3677 int ret = 0; 3678 3679 mutex_lock(&kvm->slots_lock); 3680 if (kvm->arch.apic_access_memslot_enabled) 3681 goto out; 3682 hva = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 3683 APIC_DEFAULT_PHYS_BASE, PAGE_SIZE); 3684 if (IS_ERR(hva)) { 3685 ret = PTR_ERR(hva); 3686 goto out; 3687 } 3688 3689 page = gfn_to_page(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT); 3690 if (is_error_page(page)) { 3691 ret = -EFAULT; 3692 goto out; 3693 } 3694 3695 /* 3696 * Do not pin the page in memory, so that memory hot-unplug 3697 * is able to migrate it. 3698 */ 3699 put_page(page); 3700 kvm->arch.apic_access_memslot_enabled = true; 3701 out: 3702 mutex_unlock(&kvm->slots_lock); 3703 return ret; 3704 } 3705 3706 int allocate_vpid(void) 3707 { 3708 int vpid; 3709 3710 if (!enable_vpid) 3711 return 0; 3712 spin_lock(&vmx_vpid_lock); 3713 vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS); 3714 if (vpid < VMX_NR_VPIDS) 3715 __set_bit(vpid, vmx_vpid_bitmap); 3716 else 3717 vpid = 0; 3718 spin_unlock(&vmx_vpid_lock); 3719 return vpid; 3720 } 3721 3722 void free_vpid(int vpid) 3723 { 3724 if (!enable_vpid || vpid == 0) 3725 return; 3726 spin_lock(&vmx_vpid_lock); 3727 __clear_bit(vpid, vmx_vpid_bitmap); 3728 spin_unlock(&vmx_vpid_lock); 3729 } 3730 3731 static void vmx_msr_bitmap_l01_changed(struct vcpu_vmx *vmx) 3732 { 3733 /* 3734 * When KVM is a nested hypervisor on top of Hyper-V and uses 3735 * 'Enlightened MSR Bitmap' feature L0 needs to know that MSR 3736 * bitmap has changed. 3737 */ 3738 if (static_branch_unlikely(&enable_evmcs)) 3739 evmcs_touch_msr_bitmap(); 3740 3741 vmx->nested.force_msr_bitmap_recalc = true; 3742 } 3743 3744 void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type) 3745 { 3746 struct vcpu_vmx *vmx = to_vmx(vcpu); 3747 unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap; 3748 3749 if (!cpu_has_vmx_msr_bitmap()) 3750 return; 3751 3752 vmx_msr_bitmap_l01_changed(vmx); 3753 3754 /* 3755 * Mark the desired intercept state in shadow bitmap, this is needed 3756 * for resync when the MSR filters change. 3757 */ 3758 if (is_valid_passthrough_msr(msr)) { 3759 int idx = possible_passthrough_msr_slot(msr); 3760 3761 if (idx != -ENOENT) { 3762 if (type & MSR_TYPE_R) 3763 clear_bit(idx, vmx->shadow_msr_intercept.read); 3764 if (type & MSR_TYPE_W) 3765 clear_bit(idx, vmx->shadow_msr_intercept.write); 3766 } 3767 } 3768 3769 if ((type & MSR_TYPE_R) && 3770 !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ)) { 3771 vmx_set_msr_bitmap_read(msr_bitmap, msr); 3772 type &= ~MSR_TYPE_R; 3773 } 3774 3775 if ((type & MSR_TYPE_W) && 3776 !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE)) { 3777 vmx_set_msr_bitmap_write(msr_bitmap, msr); 3778 type &= ~MSR_TYPE_W; 3779 } 3780 3781 if (type & MSR_TYPE_R) 3782 vmx_clear_msr_bitmap_read(msr_bitmap, msr); 3783 3784 if (type & MSR_TYPE_W) 3785 vmx_clear_msr_bitmap_write(msr_bitmap, msr); 3786 } 3787 3788 void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type) 3789 { 3790 struct vcpu_vmx *vmx = to_vmx(vcpu); 3791 unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap; 3792 3793 if (!cpu_has_vmx_msr_bitmap()) 3794 return; 3795 3796 vmx_msr_bitmap_l01_changed(vmx); 3797 3798 /* 3799 * Mark the desired intercept state in shadow bitmap, this is needed 3800 * for resync when the MSR filter changes. 3801 */ 3802 if (is_valid_passthrough_msr(msr)) { 3803 int idx = possible_passthrough_msr_slot(msr); 3804 3805 if (idx != -ENOENT) { 3806 if (type & MSR_TYPE_R) 3807 set_bit(idx, vmx->shadow_msr_intercept.read); 3808 if (type & MSR_TYPE_W) 3809 set_bit(idx, vmx->shadow_msr_intercept.write); 3810 } 3811 } 3812 3813 if (type & MSR_TYPE_R) 3814 vmx_set_msr_bitmap_read(msr_bitmap, msr); 3815 3816 if (type & MSR_TYPE_W) 3817 vmx_set_msr_bitmap_write(msr_bitmap, msr); 3818 } 3819 3820 static void vmx_reset_x2apic_msrs(struct kvm_vcpu *vcpu, u8 mode) 3821 { 3822 unsigned long *msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap; 3823 unsigned long read_intercept; 3824 int msr; 3825 3826 read_intercept = (mode & MSR_BITMAP_MODE_X2APIC_APICV) ? 0 : ~0; 3827 3828 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { 3829 unsigned int read_idx = msr / BITS_PER_LONG; 3830 unsigned int write_idx = read_idx + (0x800 / sizeof(long)); 3831 3832 msr_bitmap[read_idx] = read_intercept; 3833 msr_bitmap[write_idx] = ~0ul; 3834 } 3835 } 3836 3837 static void vmx_update_msr_bitmap_x2apic(struct kvm_vcpu *vcpu) 3838 { 3839 struct vcpu_vmx *vmx = to_vmx(vcpu); 3840 u8 mode; 3841 3842 if (!cpu_has_vmx_msr_bitmap()) 3843 return; 3844 3845 if (cpu_has_secondary_exec_ctrls() && 3846 (secondary_exec_controls_get(vmx) & 3847 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) { 3848 mode = MSR_BITMAP_MODE_X2APIC; 3849 if (enable_apicv && kvm_vcpu_apicv_active(vcpu)) 3850 mode |= MSR_BITMAP_MODE_X2APIC_APICV; 3851 } else { 3852 mode = 0; 3853 } 3854 3855 if (mode == vmx->x2apic_msr_bitmap_mode) 3856 return; 3857 3858 vmx->x2apic_msr_bitmap_mode = mode; 3859 3860 vmx_reset_x2apic_msrs(vcpu, mode); 3861 3862 /* 3863 * TPR reads and writes can be virtualized even if virtual interrupt 3864 * delivery is not in use. 3865 */ 3866 vmx_set_intercept_for_msr(vcpu, X2APIC_MSR(APIC_TASKPRI), MSR_TYPE_RW, 3867 !(mode & MSR_BITMAP_MODE_X2APIC)); 3868 3869 if (mode & MSR_BITMAP_MODE_X2APIC_APICV) { 3870 vmx_enable_intercept_for_msr(vcpu, X2APIC_MSR(APIC_TMCCT), MSR_TYPE_RW); 3871 vmx_disable_intercept_for_msr(vcpu, X2APIC_MSR(APIC_EOI), MSR_TYPE_W); 3872 vmx_disable_intercept_for_msr(vcpu, X2APIC_MSR(APIC_SELF_IPI), MSR_TYPE_W); 3873 } 3874 } 3875 3876 void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu) 3877 { 3878 struct vcpu_vmx *vmx = to_vmx(vcpu); 3879 bool flag = !(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN); 3880 u32 i; 3881 3882 vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_STATUS, MSR_TYPE_RW, flag); 3883 vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_OUTPUT_BASE, MSR_TYPE_RW, flag); 3884 vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_OUTPUT_MASK, MSR_TYPE_RW, flag); 3885 vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_CR3_MATCH, MSR_TYPE_RW, flag); 3886 for (i = 0; i < vmx->pt_desc.num_address_ranges; i++) { 3887 vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_ADDR0_A + i * 2, MSR_TYPE_RW, flag); 3888 vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_ADDR0_B + i * 2, MSR_TYPE_RW, flag); 3889 } 3890 } 3891 3892 static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu) 3893 { 3894 struct vcpu_vmx *vmx = to_vmx(vcpu); 3895 void *vapic_page; 3896 u32 vppr; 3897 int rvi; 3898 3899 if (WARN_ON_ONCE(!is_guest_mode(vcpu)) || 3900 !nested_cpu_has_vid(get_vmcs12(vcpu)) || 3901 WARN_ON_ONCE(!vmx->nested.virtual_apic_map.gfn)) 3902 return false; 3903 3904 rvi = vmx_get_rvi(); 3905 3906 vapic_page = vmx->nested.virtual_apic_map.hva; 3907 vppr = *((u32 *)(vapic_page + APIC_PROCPRI)); 3908 3909 return ((rvi & 0xf0) > (vppr & 0xf0)); 3910 } 3911 3912 static void vmx_msr_filter_changed(struct kvm_vcpu *vcpu) 3913 { 3914 struct vcpu_vmx *vmx = to_vmx(vcpu); 3915 u32 i; 3916 3917 /* 3918 * Set intercept permissions for all potentially passed through MSRs 3919 * again. They will automatically get filtered through the MSR filter, 3920 * so we are back in sync after this. 3921 */ 3922 for (i = 0; i < ARRAY_SIZE(vmx_possible_passthrough_msrs); i++) { 3923 u32 msr = vmx_possible_passthrough_msrs[i]; 3924 bool read = test_bit(i, vmx->shadow_msr_intercept.read); 3925 bool write = test_bit(i, vmx->shadow_msr_intercept.write); 3926 3927 vmx_set_intercept_for_msr(vcpu, msr, MSR_TYPE_R, read); 3928 vmx_set_intercept_for_msr(vcpu, msr, MSR_TYPE_W, write); 3929 } 3930 3931 pt_update_intercept_for_msr(vcpu); 3932 } 3933 3934 static inline void kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu, 3935 int pi_vec) 3936 { 3937 #ifdef CONFIG_SMP 3938 if (vcpu->mode == IN_GUEST_MODE) { 3939 /* 3940 * The vector of interrupt to be delivered to vcpu had 3941 * been set in PIR before this function. 3942 * 3943 * Following cases will be reached in this block, and 3944 * we always send a notification event in all cases as 3945 * explained below. 3946 * 3947 * Case 1: vcpu keeps in non-root mode. Sending a 3948 * notification event posts the interrupt to vcpu. 3949 * 3950 * Case 2: vcpu exits to root mode and is still 3951 * runnable. PIR will be synced to vIRR before the 3952 * next vcpu entry. Sending a notification event in 3953 * this case has no effect, as vcpu is not in root 3954 * mode. 3955 * 3956 * Case 3: vcpu exits to root mode and is blocked. 3957 * vcpu_block() has already synced PIR to vIRR and 3958 * never blocks vcpu if vIRR is not cleared. Therefore, 3959 * a blocked vcpu here does not wait for any requested 3960 * interrupts in PIR, and sending a notification event 3961 * which has no effect is safe here. 3962 */ 3963 3964 apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec); 3965 return; 3966 } 3967 #endif 3968 /* 3969 * The vCPU isn't in the guest; wake the vCPU in case it is blocking, 3970 * otherwise do nothing as KVM will grab the highest priority pending 3971 * IRQ via ->sync_pir_to_irr() in vcpu_enter_guest(). 3972 */ 3973 kvm_vcpu_wake_up(vcpu); 3974 } 3975 3976 static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu, 3977 int vector) 3978 { 3979 struct vcpu_vmx *vmx = to_vmx(vcpu); 3980 3981 if (is_guest_mode(vcpu) && 3982 vector == vmx->nested.posted_intr_nv) { 3983 /* 3984 * If a posted intr is not recognized by hardware, 3985 * we will accomplish it in the next vmentry. 3986 */ 3987 vmx->nested.pi_pending = true; 3988 kvm_make_request(KVM_REQ_EVENT, vcpu); 3989 3990 /* 3991 * This pairs with the smp_mb_*() after setting vcpu->mode in 3992 * vcpu_enter_guest() to guarantee the vCPU sees the event 3993 * request if triggering a posted interrupt "fails" because 3994 * vcpu->mode != IN_GUEST_MODE. The extra barrier is needed as 3995 * the smb_wmb() in kvm_make_request() only ensures everything 3996 * done before making the request is visible when the request 3997 * is visible, it doesn't ensure ordering between the store to 3998 * vcpu->requests and the load from vcpu->mode. 3999 */ 4000 smp_mb__after_atomic(); 4001 4002 /* the PIR and ON have been set by L1. */ 4003 kvm_vcpu_trigger_posted_interrupt(vcpu, POSTED_INTR_NESTED_VECTOR); 4004 return 0; 4005 } 4006 return -1; 4007 } 4008 /* 4009 * Send interrupt to vcpu via posted interrupt way. 4010 * 1. If target vcpu is running(non-root mode), send posted interrupt 4011 * notification to vcpu and hardware will sync PIR to vIRR atomically. 4012 * 2. If target vcpu isn't running(root mode), kick it to pick up the 4013 * interrupt from PIR in next vmentry. 4014 */ 4015 static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector) 4016 { 4017 struct vcpu_vmx *vmx = to_vmx(vcpu); 4018 int r; 4019 4020 r = vmx_deliver_nested_posted_interrupt(vcpu, vector); 4021 if (!r) 4022 return 0; 4023 4024 if (!vcpu->arch.apicv_active) 4025 return -1; 4026 4027 if (pi_test_and_set_pir(vector, &vmx->pi_desc)) 4028 return 0; 4029 4030 /* If a previous notification has sent the IPI, nothing to do. */ 4031 if (pi_test_and_set_on(&vmx->pi_desc)) 4032 return 0; 4033 4034 /* 4035 * The implied barrier in pi_test_and_set_on() pairs with the smp_mb_*() 4036 * after setting vcpu->mode in vcpu_enter_guest(), thus the vCPU is 4037 * guaranteed to see PID.ON=1 and sync the PIR to IRR if triggering a 4038 * posted interrupt "fails" because vcpu->mode != IN_GUEST_MODE. 4039 */ 4040 kvm_vcpu_trigger_posted_interrupt(vcpu, POSTED_INTR_VECTOR); 4041 return 0; 4042 } 4043 4044 /* 4045 * Set up the vmcs's constant host-state fields, i.e., host-state fields that 4046 * will not change in the lifetime of the guest. 4047 * Note that host-state that does change is set elsewhere. E.g., host-state 4048 * that is set differently for each CPU is set in vmx_vcpu_load(), not here. 4049 */ 4050 void vmx_set_constant_host_state(struct vcpu_vmx *vmx) 4051 { 4052 u32 low32, high32; 4053 unsigned long tmpl; 4054 unsigned long cr0, cr3, cr4; 4055 4056 cr0 = read_cr0(); 4057 WARN_ON(cr0 & X86_CR0_TS); 4058 vmcs_writel(HOST_CR0, cr0); /* 22.2.3 */ 4059 4060 /* 4061 * Save the most likely value for this task's CR3 in the VMCS. 4062 * We can't use __get_current_cr3_fast() because we're not atomic. 4063 */ 4064 cr3 = __read_cr3(); 4065 vmcs_writel(HOST_CR3, cr3); /* 22.2.3 FIXME: shadow tables */ 4066 vmx->loaded_vmcs->host_state.cr3 = cr3; 4067 4068 /* Save the most likely value for this task's CR4 in the VMCS. */ 4069 cr4 = cr4_read_shadow(); 4070 vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */ 4071 vmx->loaded_vmcs->host_state.cr4 = cr4; 4072 4073 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ 4074 #ifdef CONFIG_X86_64 4075 /* 4076 * Load null selectors, so we can avoid reloading them in 4077 * vmx_prepare_switch_to_host(), in case userspace uses 4078 * the null selectors too (the expected case). 4079 */ 4080 vmcs_write16(HOST_DS_SELECTOR, 0); 4081 vmcs_write16(HOST_ES_SELECTOR, 0); 4082 #else 4083 vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ 4084 vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */ 4085 #endif 4086 vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ 4087 vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */ 4088 4089 vmcs_writel(HOST_IDTR_BASE, host_idt_base); /* 22.2.4 */ 4090 4091 vmcs_writel(HOST_RIP, (unsigned long)vmx_vmexit); /* 22.2.5 */ 4092 4093 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32); 4094 vmcs_write32(HOST_IA32_SYSENTER_CS, low32); 4095 4096 /* 4097 * SYSENTER is used for 32-bit system calls on either 32-bit or 4098 * 64-bit kernels. It is always zero If neither is allowed, otherwise 4099 * vmx_vcpu_load_vmcs loads it with the per-CPU entry stack (and may 4100 * have already done so!). 4101 */ 4102 if (!IS_ENABLED(CONFIG_IA32_EMULATION) && !IS_ENABLED(CONFIG_X86_32)) 4103 vmcs_writel(HOST_IA32_SYSENTER_ESP, 0); 4104 4105 rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl); 4106 vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl); /* 22.2.3 */ 4107 4108 if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) { 4109 rdmsr(MSR_IA32_CR_PAT, low32, high32); 4110 vmcs_write64(HOST_IA32_PAT, low32 | ((u64) high32 << 32)); 4111 } 4112 4113 if (cpu_has_load_ia32_efer()) 4114 vmcs_write64(HOST_IA32_EFER, host_efer); 4115 } 4116 4117 void set_cr4_guest_host_mask(struct vcpu_vmx *vmx) 4118 { 4119 struct kvm_vcpu *vcpu = &vmx->vcpu; 4120 4121 vcpu->arch.cr4_guest_owned_bits = KVM_POSSIBLE_CR4_GUEST_BITS & 4122 ~vcpu->arch.cr4_guest_rsvd_bits; 4123 if (!enable_ept) { 4124 vcpu->arch.cr4_guest_owned_bits &= ~X86_CR4_TLBFLUSH_BITS; 4125 vcpu->arch.cr4_guest_owned_bits &= ~X86_CR4_PDPTR_BITS; 4126 } 4127 if (is_guest_mode(&vmx->vcpu)) 4128 vcpu->arch.cr4_guest_owned_bits &= 4129 ~get_vmcs12(vcpu)->cr4_guest_host_mask; 4130 vmcs_writel(CR4_GUEST_HOST_MASK, ~vcpu->arch.cr4_guest_owned_bits); 4131 } 4132 4133 static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx) 4134 { 4135 u32 pin_based_exec_ctrl = vmcs_config.pin_based_exec_ctrl; 4136 4137 if (!kvm_vcpu_apicv_active(&vmx->vcpu)) 4138 pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR; 4139 4140 if (!enable_vnmi) 4141 pin_based_exec_ctrl &= ~PIN_BASED_VIRTUAL_NMIS; 4142 4143 if (!enable_preemption_timer) 4144 pin_based_exec_ctrl &= ~PIN_BASED_VMX_PREEMPTION_TIMER; 4145 4146 return pin_based_exec_ctrl; 4147 } 4148 4149 static u32 vmx_vmentry_ctrl(void) 4150 { 4151 u32 vmentry_ctrl = vmcs_config.vmentry_ctrl; 4152 4153 if (vmx_pt_mode_is_system()) 4154 vmentry_ctrl &= ~(VM_ENTRY_PT_CONCEAL_PIP | 4155 VM_ENTRY_LOAD_IA32_RTIT_CTL); 4156 /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */ 4157 return vmentry_ctrl & 4158 ~(VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VM_ENTRY_LOAD_IA32_EFER); 4159 } 4160 4161 static u32 vmx_vmexit_ctrl(void) 4162 { 4163 u32 vmexit_ctrl = vmcs_config.vmexit_ctrl; 4164 4165 if (vmx_pt_mode_is_system()) 4166 vmexit_ctrl &= ~(VM_EXIT_PT_CONCEAL_PIP | 4167 VM_EXIT_CLEAR_IA32_RTIT_CTL); 4168 /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */ 4169 return vmexit_ctrl & 4170 ~(VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VM_EXIT_LOAD_IA32_EFER); 4171 } 4172 4173 static void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu) 4174 { 4175 struct vcpu_vmx *vmx = to_vmx(vcpu); 4176 4177 pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx)); 4178 if (cpu_has_secondary_exec_ctrls()) { 4179 if (kvm_vcpu_apicv_active(vcpu)) 4180 secondary_exec_controls_setbit(vmx, 4181 SECONDARY_EXEC_APIC_REGISTER_VIRT | 4182 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); 4183 else 4184 secondary_exec_controls_clearbit(vmx, 4185 SECONDARY_EXEC_APIC_REGISTER_VIRT | 4186 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); 4187 } 4188 4189 vmx_update_msr_bitmap_x2apic(vcpu); 4190 } 4191 4192 static u32 vmx_exec_control(struct vcpu_vmx *vmx) 4193 { 4194 u32 exec_control = vmcs_config.cpu_based_exec_ctrl; 4195 4196 if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT) 4197 exec_control &= ~CPU_BASED_MOV_DR_EXITING; 4198 4199 if (!cpu_need_tpr_shadow(&vmx->vcpu)) { 4200 exec_control &= ~CPU_BASED_TPR_SHADOW; 4201 #ifdef CONFIG_X86_64 4202 exec_control |= CPU_BASED_CR8_STORE_EXITING | 4203 CPU_BASED_CR8_LOAD_EXITING; 4204 #endif 4205 } 4206 if (!enable_ept) 4207 exec_control |= CPU_BASED_CR3_STORE_EXITING | 4208 CPU_BASED_CR3_LOAD_EXITING | 4209 CPU_BASED_INVLPG_EXITING; 4210 if (kvm_mwait_in_guest(vmx->vcpu.kvm)) 4211 exec_control &= ~(CPU_BASED_MWAIT_EXITING | 4212 CPU_BASED_MONITOR_EXITING); 4213 if (kvm_hlt_in_guest(vmx->vcpu.kvm)) 4214 exec_control &= ~CPU_BASED_HLT_EXITING; 4215 return exec_control; 4216 } 4217 4218 /* 4219 * Adjust a single secondary execution control bit to intercept/allow an 4220 * instruction in the guest. This is usually done based on whether or not a 4221 * feature has been exposed to the guest in order to correctly emulate faults. 4222 */ 4223 static inline void 4224 vmx_adjust_secondary_exec_control(struct vcpu_vmx *vmx, u32 *exec_control, 4225 u32 control, bool enabled, bool exiting) 4226 { 4227 /* 4228 * If the control is for an opt-in feature, clear the control if the 4229 * feature is not exposed to the guest, i.e. not enabled. If the 4230 * control is opt-out, i.e. an exiting control, clear the control if 4231 * the feature _is_ exposed to the guest, i.e. exiting/interception is 4232 * disabled for the associated instruction. Note, the caller is 4233 * responsible presetting exec_control to set all supported bits. 4234 */ 4235 if (enabled == exiting) 4236 *exec_control &= ~control; 4237 4238 /* 4239 * Update the nested MSR settings so that a nested VMM can/can't set 4240 * controls for features that are/aren't exposed to the guest. 4241 */ 4242 if (nested) { 4243 if (enabled) 4244 vmx->nested.msrs.secondary_ctls_high |= control; 4245 else 4246 vmx->nested.msrs.secondary_ctls_high &= ~control; 4247 } 4248 } 4249 4250 /* 4251 * Wrapper macro for the common case of adjusting a secondary execution control 4252 * based on a single guest CPUID bit, with a dedicated feature bit. This also 4253 * verifies that the control is actually supported by KVM and hardware. 4254 */ 4255 #define vmx_adjust_sec_exec_control(vmx, exec_control, name, feat_name, ctrl_name, exiting) \ 4256 ({ \ 4257 bool __enabled; \ 4258 \ 4259 if (cpu_has_vmx_##name()) { \ 4260 __enabled = guest_cpuid_has(&(vmx)->vcpu, \ 4261 X86_FEATURE_##feat_name); \ 4262 vmx_adjust_secondary_exec_control(vmx, exec_control, \ 4263 SECONDARY_EXEC_##ctrl_name, __enabled, exiting); \ 4264 } \ 4265 }) 4266 4267 /* More macro magic for ENABLE_/opt-in versus _EXITING/opt-out controls. */ 4268 #define vmx_adjust_sec_exec_feature(vmx, exec_control, lname, uname) \ 4269 vmx_adjust_sec_exec_control(vmx, exec_control, lname, uname, ENABLE_##uname, false) 4270 4271 #define vmx_adjust_sec_exec_exiting(vmx, exec_control, lname, uname) \ 4272 vmx_adjust_sec_exec_control(vmx, exec_control, lname, uname, uname##_EXITING, true) 4273 4274 static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx) 4275 { 4276 struct kvm_vcpu *vcpu = &vmx->vcpu; 4277 4278 u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl; 4279 4280 if (vmx_pt_mode_is_system()) 4281 exec_control &= ~(SECONDARY_EXEC_PT_USE_GPA | SECONDARY_EXEC_PT_CONCEAL_VMX); 4282 if (!cpu_need_virtualize_apic_accesses(vcpu)) 4283 exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; 4284 if (vmx->vpid == 0) 4285 exec_control &= ~SECONDARY_EXEC_ENABLE_VPID; 4286 if (!enable_ept) { 4287 exec_control &= ~SECONDARY_EXEC_ENABLE_EPT; 4288 enable_unrestricted_guest = 0; 4289 } 4290 if (!enable_unrestricted_guest) 4291 exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST; 4292 if (kvm_pause_in_guest(vmx->vcpu.kvm)) 4293 exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING; 4294 if (!kvm_vcpu_apicv_active(vcpu)) 4295 exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT | 4296 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); 4297 exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; 4298 4299 /* SECONDARY_EXEC_DESC is enabled/disabled on writes to CR4.UMIP, 4300 * in vmx_set_cr4. */ 4301 exec_control &= ~SECONDARY_EXEC_DESC; 4302 4303 /* SECONDARY_EXEC_SHADOW_VMCS is enabled when L1 executes VMPTRLD 4304 (handle_vmptrld). 4305 We can NOT enable shadow_vmcs here because we don't have yet 4306 a current VMCS12 4307 */ 4308 exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS; 4309 4310 /* 4311 * PML is enabled/disabled when dirty logging of memsmlots changes, but 4312 * it needs to be set here when dirty logging is already active, e.g. 4313 * if this vCPU was created after dirty logging was enabled. 4314 */ 4315 if (!vcpu->kvm->arch.cpu_dirty_logging_count) 4316 exec_control &= ~SECONDARY_EXEC_ENABLE_PML; 4317 4318 if (cpu_has_vmx_xsaves()) { 4319 /* Exposing XSAVES only when XSAVE is exposed */ 4320 bool xsaves_enabled = 4321 boot_cpu_has(X86_FEATURE_XSAVE) && 4322 guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && 4323 guest_cpuid_has(vcpu, X86_FEATURE_XSAVES); 4324 4325 vcpu->arch.xsaves_enabled = xsaves_enabled; 4326 4327 vmx_adjust_secondary_exec_control(vmx, &exec_control, 4328 SECONDARY_EXEC_XSAVES, 4329 xsaves_enabled, false); 4330 } 4331 4332 /* 4333 * RDPID is also gated by ENABLE_RDTSCP, turn on the control if either 4334 * feature is exposed to the guest. This creates a virtualization hole 4335 * if both are supported in hardware but only one is exposed to the 4336 * guest, but letting the guest execute RDTSCP or RDPID when either one 4337 * is advertised is preferable to emulating the advertised instruction 4338 * in KVM on #UD, and obviously better than incorrectly injecting #UD. 4339 */ 4340 if (cpu_has_vmx_rdtscp()) { 4341 bool rdpid_or_rdtscp_enabled = 4342 guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) || 4343 guest_cpuid_has(vcpu, X86_FEATURE_RDPID); 4344 4345 vmx_adjust_secondary_exec_control(vmx, &exec_control, 4346 SECONDARY_EXEC_ENABLE_RDTSCP, 4347 rdpid_or_rdtscp_enabled, false); 4348 } 4349 vmx_adjust_sec_exec_feature(vmx, &exec_control, invpcid, INVPCID); 4350 4351 vmx_adjust_sec_exec_exiting(vmx, &exec_control, rdrand, RDRAND); 4352 vmx_adjust_sec_exec_exiting(vmx, &exec_control, rdseed, RDSEED); 4353 4354 vmx_adjust_sec_exec_control(vmx, &exec_control, waitpkg, WAITPKG, 4355 ENABLE_USR_WAIT_PAUSE, false); 4356 4357 if (!vcpu->kvm->arch.bus_lock_detection_enabled) 4358 exec_control &= ~SECONDARY_EXEC_BUS_LOCK_DETECTION; 4359 4360 return exec_control; 4361 } 4362 4363 #define VMX_XSS_EXIT_BITMAP 0 4364 4365 static void init_vmcs(struct vcpu_vmx *vmx) 4366 { 4367 if (nested) 4368 nested_vmx_set_vmcs_shadowing_bitmap(); 4369 4370 if (cpu_has_vmx_msr_bitmap()) 4371 vmcs_write64(MSR_BITMAP, __pa(vmx->vmcs01.msr_bitmap)); 4372 4373 vmcs_write64(VMCS_LINK_POINTER, INVALID_GPA); /* 22.3.1.5 */ 4374 4375 /* Control */ 4376 pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx)); 4377 4378 exec_controls_set(vmx, vmx_exec_control(vmx)); 4379 4380 if (cpu_has_secondary_exec_ctrls()) 4381 secondary_exec_controls_set(vmx, vmx_secondary_exec_control(vmx)); 4382 4383 if (kvm_vcpu_apicv_active(&vmx->vcpu)) { 4384 vmcs_write64(EOI_EXIT_BITMAP0, 0); 4385 vmcs_write64(EOI_EXIT_BITMAP1, 0); 4386 vmcs_write64(EOI_EXIT_BITMAP2, 0); 4387 vmcs_write64(EOI_EXIT_BITMAP3, 0); 4388 4389 vmcs_write16(GUEST_INTR_STATUS, 0); 4390 4391 vmcs_write16(POSTED_INTR_NV, POSTED_INTR_VECTOR); 4392 vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->pi_desc))); 4393 } 4394 4395 if (!kvm_pause_in_guest(vmx->vcpu.kvm)) { 4396 vmcs_write32(PLE_GAP, ple_gap); 4397 vmx->ple_window = ple_window; 4398 vmx->ple_window_dirty = true; 4399 } 4400 4401 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0); 4402 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0); 4403 vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */ 4404 4405 vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */ 4406 vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */ 4407 vmx_set_constant_host_state(vmx); 4408 vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */ 4409 vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */ 4410 4411 if (cpu_has_vmx_vmfunc()) 4412 vmcs_write64(VM_FUNCTION_CONTROL, 0); 4413 4414 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); 4415 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); 4416 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val)); 4417 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0); 4418 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val)); 4419 4420 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) 4421 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); 4422 4423 vm_exit_controls_set(vmx, vmx_vmexit_ctrl()); 4424 4425 /* 22.2.1, 20.8.1 */ 4426 vm_entry_controls_set(vmx, vmx_vmentry_ctrl()); 4427 4428 vmx->vcpu.arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS; 4429 vmcs_writel(CR0_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr0_guest_owned_bits); 4430 4431 set_cr4_guest_host_mask(vmx); 4432 4433 if (vmx->vpid != 0) 4434 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); 4435 4436 if (cpu_has_vmx_xsaves()) 4437 vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP); 4438 4439 if (enable_pml) { 4440 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); 4441 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); 4442 } 4443 4444 vmx_write_encls_bitmap(&vmx->vcpu, NULL); 4445 4446 if (vmx_pt_mode_is_host_guest()) { 4447 memset(&vmx->pt_desc, 0, sizeof(vmx->pt_desc)); 4448 /* Bit[6~0] are forced to 1, writes are ignored. */ 4449 vmx->pt_desc.guest.output_mask = 0x7F; 4450 vmcs_write64(GUEST_IA32_RTIT_CTL, 0); 4451 } 4452 4453 vmcs_write32(GUEST_SYSENTER_CS, 0); 4454 vmcs_writel(GUEST_SYSENTER_ESP, 0); 4455 vmcs_writel(GUEST_SYSENTER_EIP, 0); 4456 vmcs_write64(GUEST_IA32_DEBUGCTL, 0); 4457 4458 if (cpu_has_vmx_tpr_shadow()) { 4459 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0); 4460 if (cpu_need_tpr_shadow(&vmx->vcpu)) 4461 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 4462 __pa(vmx->vcpu.arch.apic->regs)); 4463 vmcs_write32(TPR_THRESHOLD, 0); 4464 } 4465 4466 vmx_setup_uret_msrs(vmx); 4467 } 4468 4469 static void __vmx_vcpu_reset(struct kvm_vcpu *vcpu) 4470 { 4471 struct vcpu_vmx *vmx = to_vmx(vcpu); 4472 4473 init_vmcs(vmx); 4474 4475 if (nested) 4476 memcpy(&vmx->nested.msrs, &vmcs_config.nested, sizeof(vmx->nested.msrs)); 4477 4478 vcpu_setup_sgx_lepubkeyhash(vcpu); 4479 4480 vmx->nested.posted_intr_nv = -1; 4481 vmx->nested.vmxon_ptr = INVALID_GPA; 4482 vmx->nested.current_vmptr = INVALID_GPA; 4483 vmx->nested.hv_evmcs_vmptr = EVMPTR_INVALID; 4484 4485 vcpu->arch.microcode_version = 0x100000000ULL; 4486 vmx->msr_ia32_feature_control_valid_bits = FEAT_CTL_LOCKED; 4487 4488 /* 4489 * Enforce invariant: pi_desc.nv is always either POSTED_INTR_VECTOR 4490 * or POSTED_INTR_WAKEUP_VECTOR. 4491 */ 4492 vmx->pi_desc.nv = POSTED_INTR_VECTOR; 4493 vmx->pi_desc.sn = 1; 4494 } 4495 4496 static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) 4497 { 4498 struct vcpu_vmx *vmx = to_vmx(vcpu); 4499 4500 if (!init_event) 4501 __vmx_vcpu_reset(vcpu); 4502 4503 vmx->rmode.vm86_active = 0; 4504 vmx->spec_ctrl = 0; 4505 4506 vmx->msr_ia32_umwait_control = 0; 4507 4508 vmx->hv_deadline_tsc = -1; 4509 kvm_set_cr8(vcpu, 0); 4510 4511 vmx_segment_cache_clear(vmx); 4512 kvm_register_mark_available(vcpu, VCPU_EXREG_SEGMENTS); 4513 4514 seg_setup(VCPU_SREG_CS); 4515 vmcs_write16(GUEST_CS_SELECTOR, 0xf000); 4516 vmcs_writel(GUEST_CS_BASE, 0xffff0000ul); 4517 4518 seg_setup(VCPU_SREG_DS); 4519 seg_setup(VCPU_SREG_ES); 4520 seg_setup(VCPU_SREG_FS); 4521 seg_setup(VCPU_SREG_GS); 4522 seg_setup(VCPU_SREG_SS); 4523 4524 vmcs_write16(GUEST_TR_SELECTOR, 0); 4525 vmcs_writel(GUEST_TR_BASE, 0); 4526 vmcs_write32(GUEST_TR_LIMIT, 0xffff); 4527 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); 4528 4529 vmcs_write16(GUEST_LDTR_SELECTOR, 0); 4530 vmcs_writel(GUEST_LDTR_BASE, 0); 4531 vmcs_write32(GUEST_LDTR_LIMIT, 0xffff); 4532 vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082); 4533 4534 vmcs_writel(GUEST_GDTR_BASE, 0); 4535 vmcs_write32(GUEST_GDTR_LIMIT, 0xffff); 4536 4537 vmcs_writel(GUEST_IDTR_BASE, 0); 4538 vmcs_write32(GUEST_IDTR_LIMIT, 0xffff); 4539 4540 vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE); 4541 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0); 4542 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, 0); 4543 if (kvm_mpx_supported()) 4544 vmcs_write64(GUEST_BNDCFGS, 0); 4545 4546 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */ 4547 4548 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); 4549 4550 vpid_sync_context(vmx->vpid); 4551 } 4552 4553 static void vmx_enable_irq_window(struct kvm_vcpu *vcpu) 4554 { 4555 exec_controls_setbit(to_vmx(vcpu), CPU_BASED_INTR_WINDOW_EXITING); 4556 } 4557 4558 static void vmx_enable_nmi_window(struct kvm_vcpu *vcpu) 4559 { 4560 if (!enable_vnmi || 4561 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) { 4562 vmx_enable_irq_window(vcpu); 4563 return; 4564 } 4565 4566 exec_controls_setbit(to_vmx(vcpu), CPU_BASED_NMI_WINDOW_EXITING); 4567 } 4568 4569 static void vmx_inject_irq(struct kvm_vcpu *vcpu) 4570 { 4571 struct vcpu_vmx *vmx = to_vmx(vcpu); 4572 uint32_t intr; 4573 int irq = vcpu->arch.interrupt.nr; 4574 4575 trace_kvm_inj_virq(irq); 4576 4577 ++vcpu->stat.irq_injections; 4578 if (vmx->rmode.vm86_active) { 4579 int inc_eip = 0; 4580 if (vcpu->arch.interrupt.soft) 4581 inc_eip = vcpu->arch.event_exit_inst_len; 4582 kvm_inject_realmode_interrupt(vcpu, irq, inc_eip); 4583 return; 4584 } 4585 intr = irq | INTR_INFO_VALID_MASK; 4586 if (vcpu->arch.interrupt.soft) { 4587 intr |= INTR_TYPE_SOFT_INTR; 4588 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 4589 vmx->vcpu.arch.event_exit_inst_len); 4590 } else 4591 intr |= INTR_TYPE_EXT_INTR; 4592 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr); 4593 4594 vmx_clear_hlt(vcpu); 4595 } 4596 4597 static void vmx_inject_nmi(struct kvm_vcpu *vcpu) 4598 { 4599 struct vcpu_vmx *vmx = to_vmx(vcpu); 4600 4601 if (!enable_vnmi) { 4602 /* 4603 * Tracking the NMI-blocked state in software is built upon 4604 * finding the next open IRQ window. This, in turn, depends on 4605 * well-behaving guests: They have to keep IRQs disabled at 4606 * least as long as the NMI handler runs. Otherwise we may 4607 * cause NMI nesting, maybe breaking the guest. But as this is 4608 * highly unlikely, we can live with the residual risk. 4609 */ 4610 vmx->loaded_vmcs->soft_vnmi_blocked = 1; 4611 vmx->loaded_vmcs->vnmi_blocked_time = 0; 4612 } 4613 4614 ++vcpu->stat.nmi_injections; 4615 vmx->loaded_vmcs->nmi_known_unmasked = false; 4616 4617 if (vmx->rmode.vm86_active) { 4618 kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0); 4619 return; 4620 } 4621 4622 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 4623 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR); 4624 4625 vmx_clear_hlt(vcpu); 4626 } 4627 4628 bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu) 4629 { 4630 struct vcpu_vmx *vmx = to_vmx(vcpu); 4631 bool masked; 4632 4633 if (!enable_vnmi) 4634 return vmx->loaded_vmcs->soft_vnmi_blocked; 4635 if (vmx->loaded_vmcs->nmi_known_unmasked) 4636 return false; 4637 masked = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI; 4638 vmx->loaded_vmcs->nmi_known_unmasked = !masked; 4639 return masked; 4640 } 4641 4642 void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) 4643 { 4644 struct vcpu_vmx *vmx = to_vmx(vcpu); 4645 4646 if (!enable_vnmi) { 4647 if (vmx->loaded_vmcs->soft_vnmi_blocked != masked) { 4648 vmx->loaded_vmcs->soft_vnmi_blocked = masked; 4649 vmx->loaded_vmcs->vnmi_blocked_time = 0; 4650 } 4651 } else { 4652 vmx->loaded_vmcs->nmi_known_unmasked = !masked; 4653 if (masked) 4654 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, 4655 GUEST_INTR_STATE_NMI); 4656 else 4657 vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, 4658 GUEST_INTR_STATE_NMI); 4659 } 4660 } 4661 4662 bool vmx_nmi_blocked(struct kvm_vcpu *vcpu) 4663 { 4664 if (is_guest_mode(vcpu) && nested_exit_on_nmi(vcpu)) 4665 return false; 4666 4667 if (!enable_vnmi && to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked) 4668 return true; 4669 4670 return (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 4671 (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI | 4672 GUEST_INTR_STATE_NMI)); 4673 } 4674 4675 static int vmx_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection) 4676 { 4677 if (to_vmx(vcpu)->nested.nested_run_pending) 4678 return -EBUSY; 4679 4680 /* An NMI must not be injected into L2 if it's supposed to VM-Exit. */ 4681 if (for_injection && is_guest_mode(vcpu) && nested_exit_on_nmi(vcpu)) 4682 return -EBUSY; 4683 4684 return !vmx_nmi_blocked(vcpu); 4685 } 4686 4687 bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu) 4688 { 4689 if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) 4690 return false; 4691 4692 return !(vmx_get_rflags(vcpu) & X86_EFLAGS_IF) || 4693 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 4694 (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS)); 4695 } 4696 4697 static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection) 4698 { 4699 if (to_vmx(vcpu)->nested.nested_run_pending) 4700 return -EBUSY; 4701 4702 /* 4703 * An IRQ must not be injected into L2 if it's supposed to VM-Exit, 4704 * e.g. if the IRQ arrived asynchronously after checking nested events. 4705 */ 4706 if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) 4707 return -EBUSY; 4708 4709 return !vmx_interrupt_blocked(vcpu); 4710 } 4711 4712 static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) 4713 { 4714 void __user *ret; 4715 4716 if (enable_unrestricted_guest) 4717 return 0; 4718 4719 mutex_lock(&kvm->slots_lock); 4720 ret = __x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, addr, 4721 PAGE_SIZE * 3); 4722 mutex_unlock(&kvm->slots_lock); 4723 4724 if (IS_ERR(ret)) 4725 return PTR_ERR(ret); 4726 4727 to_kvm_vmx(kvm)->tss_addr = addr; 4728 4729 return init_rmode_tss(kvm, ret); 4730 } 4731 4732 static int vmx_set_identity_map_addr(struct kvm *kvm, u64 ident_addr) 4733 { 4734 to_kvm_vmx(kvm)->ept_identity_map_addr = ident_addr; 4735 return 0; 4736 } 4737 4738 static bool rmode_exception(struct kvm_vcpu *vcpu, int vec) 4739 { 4740 switch (vec) { 4741 case BP_VECTOR: 4742 /* 4743 * Update instruction length as we may reinject the exception 4744 * from user space while in guest debugging mode. 4745 */ 4746 to_vmx(vcpu)->vcpu.arch.event_exit_inst_len = 4747 vmcs_read32(VM_EXIT_INSTRUCTION_LEN); 4748 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) 4749 return false; 4750 fallthrough; 4751 case DB_VECTOR: 4752 return !(vcpu->guest_debug & 4753 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)); 4754 case DE_VECTOR: 4755 case OF_VECTOR: 4756 case BR_VECTOR: 4757 case UD_VECTOR: 4758 case DF_VECTOR: 4759 case SS_VECTOR: 4760 case GP_VECTOR: 4761 case MF_VECTOR: 4762 return true; 4763 } 4764 return false; 4765 } 4766 4767 static int handle_rmode_exception(struct kvm_vcpu *vcpu, 4768 int vec, u32 err_code) 4769 { 4770 /* 4771 * Instruction with address size override prefix opcode 0x67 4772 * Cause the #SS fault with 0 error code in VM86 mode. 4773 */ 4774 if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) { 4775 if (kvm_emulate_instruction(vcpu, 0)) { 4776 if (vcpu->arch.halt_request) { 4777 vcpu->arch.halt_request = 0; 4778 return kvm_emulate_halt_noskip(vcpu); 4779 } 4780 return 1; 4781 } 4782 return 0; 4783 } 4784 4785 /* 4786 * Forward all other exceptions that are valid in real mode. 4787 * FIXME: Breaks guest debugging in real mode, needs to be fixed with 4788 * the required debugging infrastructure rework. 4789 */ 4790 kvm_queue_exception(vcpu, vec); 4791 return 1; 4792 } 4793 4794 static int handle_machine_check(struct kvm_vcpu *vcpu) 4795 { 4796 /* handled by vmx_vcpu_run() */ 4797 return 1; 4798 } 4799 4800 /* 4801 * If the host has split lock detection disabled, then #AC is 4802 * unconditionally injected into the guest, which is the pre split lock 4803 * detection behaviour. 4804 * 4805 * If the host has split lock detection enabled then #AC is 4806 * only injected into the guest when: 4807 * - Guest CPL == 3 (user mode) 4808 * - Guest has #AC detection enabled in CR0 4809 * - Guest EFLAGS has AC bit set 4810 */ 4811 bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu) 4812 { 4813 if (!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) 4814 return true; 4815 4816 return vmx_get_cpl(vcpu) == 3 && kvm_read_cr0_bits(vcpu, X86_CR0_AM) && 4817 (kvm_get_rflags(vcpu) & X86_EFLAGS_AC); 4818 } 4819 4820 static int handle_exception_nmi(struct kvm_vcpu *vcpu) 4821 { 4822 struct vcpu_vmx *vmx = to_vmx(vcpu); 4823 struct kvm_run *kvm_run = vcpu->run; 4824 u32 intr_info, ex_no, error_code; 4825 unsigned long cr2, dr6; 4826 u32 vect_info; 4827 4828 vect_info = vmx->idt_vectoring_info; 4829 intr_info = vmx_get_intr_info(vcpu); 4830 4831 if (is_machine_check(intr_info) || is_nmi(intr_info)) 4832 return 1; /* handled by handle_exception_nmi_irqoff() */ 4833 4834 /* 4835 * Queue the exception here instead of in handle_nm_fault_irqoff(). 4836 * This ensures the nested_vmx check is not skipped so vmexit can 4837 * be reflected to L1 (when it intercepts #NM) before reaching this 4838 * point. 4839 */ 4840 if (is_nm_fault(intr_info)) { 4841 kvm_queue_exception(vcpu, NM_VECTOR); 4842 return 1; 4843 } 4844 4845 if (is_invalid_opcode(intr_info)) 4846 return handle_ud(vcpu); 4847 4848 error_code = 0; 4849 if (intr_info & INTR_INFO_DELIVER_CODE_MASK) 4850 error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE); 4851 4852 if (!vmx->rmode.vm86_active && is_gp_fault(intr_info)) { 4853 WARN_ON_ONCE(!enable_vmware_backdoor); 4854 4855 /* 4856 * VMware backdoor emulation on #GP interception only handles 4857 * IN{S}, OUT{S}, and RDPMC, none of which generate a non-zero 4858 * error code on #GP. 4859 */ 4860 if (error_code) { 4861 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); 4862 return 1; 4863 } 4864 return kvm_emulate_instruction(vcpu, EMULTYPE_VMWARE_GP); 4865 } 4866 4867 /* 4868 * The #PF with PFEC.RSVD = 1 indicates the guest is accessing 4869 * MMIO, it is better to report an internal error. 4870 * See the comments in vmx_handle_exit. 4871 */ 4872 if ((vect_info & VECTORING_INFO_VALID_MASK) && 4873 !(is_page_fault(intr_info) && !(error_code & PFERR_RSVD_MASK))) { 4874 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 4875 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX; 4876 vcpu->run->internal.ndata = 4; 4877 vcpu->run->internal.data[0] = vect_info; 4878 vcpu->run->internal.data[1] = intr_info; 4879 vcpu->run->internal.data[2] = error_code; 4880 vcpu->run->internal.data[3] = vcpu->arch.last_vmentry_cpu; 4881 return 0; 4882 } 4883 4884 if (is_page_fault(intr_info)) { 4885 cr2 = vmx_get_exit_qual(vcpu); 4886 if (enable_ept && !vcpu->arch.apf.host_apf_flags) { 4887 /* 4888 * EPT will cause page fault only if we need to 4889 * detect illegal GPAs. 4890 */ 4891 WARN_ON_ONCE(!allow_smaller_maxphyaddr); 4892 kvm_fixup_and_inject_pf_error(vcpu, cr2, error_code); 4893 return 1; 4894 } else 4895 return kvm_handle_page_fault(vcpu, error_code, cr2, NULL, 0); 4896 } 4897 4898 ex_no = intr_info & INTR_INFO_VECTOR_MASK; 4899 4900 if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no)) 4901 return handle_rmode_exception(vcpu, ex_no, error_code); 4902 4903 switch (ex_no) { 4904 case DB_VECTOR: 4905 dr6 = vmx_get_exit_qual(vcpu); 4906 if (!(vcpu->guest_debug & 4907 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) { 4908 /* 4909 * If the #DB was due to ICEBP, a.k.a. INT1, skip the 4910 * instruction. ICEBP generates a trap-like #DB, but 4911 * despite its interception control being tied to #DB, 4912 * is an instruction intercept, i.e. the VM-Exit occurs 4913 * on the ICEBP itself. Note, skipping ICEBP also 4914 * clears STI and MOVSS blocking. 4915 * 4916 * For all other #DBs, set vmcs.PENDING_DBG_EXCEPTIONS.BS 4917 * if single-step is enabled in RFLAGS and STI or MOVSS 4918 * blocking is active, as the CPU doesn't set the bit 4919 * on VM-Exit due to #DB interception. VM-Entry has a 4920 * consistency check that a single-step #DB is pending 4921 * in this scenario as the previous instruction cannot 4922 * have toggled RFLAGS.TF 0=>1 (because STI and POP/MOV 4923 * don't modify RFLAGS), therefore the one instruction 4924 * delay when activating single-step breakpoints must 4925 * have already expired. Note, the CPU sets/clears BS 4926 * as appropriate for all other VM-Exits types. 4927 */ 4928 if (is_icebp(intr_info)) 4929 WARN_ON(!skip_emulated_instruction(vcpu)); 4930 else if ((vmx_get_rflags(vcpu) & X86_EFLAGS_TF) && 4931 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 4932 (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS))) 4933 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, 4934 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS) | DR6_BS); 4935 4936 kvm_queue_exception_p(vcpu, DB_VECTOR, dr6); 4937 return 1; 4938 } 4939 kvm_run->debug.arch.dr6 = dr6 | DR6_ACTIVE_LOW; 4940 kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7); 4941 fallthrough; 4942 case BP_VECTOR: 4943 /* 4944 * Update instruction length as we may reinject #BP from 4945 * user space while in guest debugging mode. Reading it for 4946 * #DB as well causes no harm, it is not used in that case. 4947 */ 4948 vmx->vcpu.arch.event_exit_inst_len = 4949 vmcs_read32(VM_EXIT_INSTRUCTION_LEN); 4950 kvm_run->exit_reason = KVM_EXIT_DEBUG; 4951 kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu); 4952 kvm_run->debug.arch.exception = ex_no; 4953 break; 4954 case AC_VECTOR: 4955 if (vmx_guest_inject_ac(vcpu)) { 4956 kvm_queue_exception_e(vcpu, AC_VECTOR, error_code); 4957 return 1; 4958 } 4959 4960 /* 4961 * Handle split lock. Depending on detection mode this will 4962 * either warn and disable split lock detection for this 4963 * task or force SIGBUS on it. 4964 */ 4965 if (handle_guest_split_lock(kvm_rip_read(vcpu))) 4966 return 1; 4967 fallthrough; 4968 default: 4969 kvm_run->exit_reason = KVM_EXIT_EXCEPTION; 4970 kvm_run->ex.exception = ex_no; 4971 kvm_run->ex.error_code = error_code; 4972 break; 4973 } 4974 return 0; 4975 } 4976 4977 static __always_inline int handle_external_interrupt(struct kvm_vcpu *vcpu) 4978 { 4979 ++vcpu->stat.irq_exits; 4980 return 1; 4981 } 4982 4983 static int handle_triple_fault(struct kvm_vcpu *vcpu) 4984 { 4985 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; 4986 vcpu->mmio_needed = 0; 4987 return 0; 4988 } 4989 4990 static int handle_io(struct kvm_vcpu *vcpu) 4991 { 4992 unsigned long exit_qualification; 4993 int size, in, string; 4994 unsigned port; 4995 4996 exit_qualification = vmx_get_exit_qual(vcpu); 4997 string = (exit_qualification & 16) != 0; 4998 4999 ++vcpu->stat.io_exits; 5000 5001 if (string) 5002 return kvm_emulate_instruction(vcpu, 0); 5003 5004 port = exit_qualification >> 16; 5005 size = (exit_qualification & 7) + 1; 5006 in = (exit_qualification & 8) != 0; 5007 5008 return kvm_fast_pio(vcpu, size, port, in); 5009 } 5010 5011 static void 5012 vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) 5013 { 5014 /* 5015 * Patch in the VMCALL instruction: 5016 */ 5017 hypercall[0] = 0x0f; 5018 hypercall[1] = 0x01; 5019 hypercall[2] = 0xc1; 5020 } 5021 5022 /* called to set cr0 as appropriate for a mov-to-cr0 exit. */ 5023 static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val) 5024 { 5025 if (is_guest_mode(vcpu)) { 5026 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 5027 unsigned long orig_val = val; 5028 5029 /* 5030 * We get here when L2 changed cr0 in a way that did not change 5031 * any of L1's shadowed bits (see nested_vmx_exit_handled_cr), 5032 * but did change L0 shadowed bits. So we first calculate the 5033 * effective cr0 value that L1 would like to write into the 5034 * hardware. It consists of the L2-owned bits from the new 5035 * value combined with the L1-owned bits from L1's guest_cr0. 5036 */ 5037 val = (val & ~vmcs12->cr0_guest_host_mask) | 5038 (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask); 5039 5040 if (!nested_guest_cr0_valid(vcpu, val)) 5041 return 1; 5042 5043 if (kvm_set_cr0(vcpu, val)) 5044 return 1; 5045 vmcs_writel(CR0_READ_SHADOW, orig_val); 5046 return 0; 5047 } else { 5048 if (to_vmx(vcpu)->nested.vmxon && 5049 !nested_host_cr0_valid(vcpu, val)) 5050 return 1; 5051 5052 return kvm_set_cr0(vcpu, val); 5053 } 5054 } 5055 5056 static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val) 5057 { 5058 if (is_guest_mode(vcpu)) { 5059 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 5060 unsigned long orig_val = val; 5061 5062 /* analogously to handle_set_cr0 */ 5063 val = (val & ~vmcs12->cr4_guest_host_mask) | 5064 (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask); 5065 if (kvm_set_cr4(vcpu, val)) 5066 return 1; 5067 vmcs_writel(CR4_READ_SHADOW, orig_val); 5068 return 0; 5069 } else 5070 return kvm_set_cr4(vcpu, val); 5071 } 5072 5073 static int handle_desc(struct kvm_vcpu *vcpu) 5074 { 5075 WARN_ON(!(vcpu->arch.cr4 & X86_CR4_UMIP)); 5076 return kvm_emulate_instruction(vcpu, 0); 5077 } 5078 5079 static int handle_cr(struct kvm_vcpu *vcpu) 5080 { 5081 unsigned long exit_qualification, val; 5082 int cr; 5083 int reg; 5084 int err; 5085 int ret; 5086 5087 exit_qualification = vmx_get_exit_qual(vcpu); 5088 cr = exit_qualification & 15; 5089 reg = (exit_qualification >> 8) & 15; 5090 switch ((exit_qualification >> 4) & 3) { 5091 case 0: /* mov to cr */ 5092 val = kvm_register_read(vcpu, reg); 5093 trace_kvm_cr_write(cr, val); 5094 switch (cr) { 5095 case 0: 5096 err = handle_set_cr0(vcpu, val); 5097 return kvm_complete_insn_gp(vcpu, err); 5098 case 3: 5099 WARN_ON_ONCE(enable_unrestricted_guest); 5100 5101 err = kvm_set_cr3(vcpu, val); 5102 return kvm_complete_insn_gp(vcpu, err); 5103 case 4: 5104 err = handle_set_cr4(vcpu, val); 5105 return kvm_complete_insn_gp(vcpu, err); 5106 case 8: { 5107 u8 cr8_prev = kvm_get_cr8(vcpu); 5108 u8 cr8 = (u8)val; 5109 err = kvm_set_cr8(vcpu, cr8); 5110 ret = kvm_complete_insn_gp(vcpu, err); 5111 if (lapic_in_kernel(vcpu)) 5112 return ret; 5113 if (cr8_prev <= cr8) 5114 return ret; 5115 /* 5116 * TODO: we might be squashing a 5117 * KVM_GUESTDBG_SINGLESTEP-triggered 5118 * KVM_EXIT_DEBUG here. 5119 */ 5120 vcpu->run->exit_reason = KVM_EXIT_SET_TPR; 5121 return 0; 5122 } 5123 } 5124 break; 5125 case 2: /* clts */ 5126 KVM_BUG(1, vcpu->kvm, "Guest always owns CR0.TS"); 5127 return -EIO; 5128 case 1: /*mov from cr*/ 5129 switch (cr) { 5130 case 3: 5131 WARN_ON_ONCE(enable_unrestricted_guest); 5132 5133 val = kvm_read_cr3(vcpu); 5134 kvm_register_write(vcpu, reg, val); 5135 trace_kvm_cr_read(cr, val); 5136 return kvm_skip_emulated_instruction(vcpu); 5137 case 8: 5138 val = kvm_get_cr8(vcpu); 5139 kvm_register_write(vcpu, reg, val); 5140 trace_kvm_cr_read(cr, val); 5141 return kvm_skip_emulated_instruction(vcpu); 5142 } 5143 break; 5144 case 3: /* lmsw */ 5145 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f; 5146 trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val); 5147 kvm_lmsw(vcpu, val); 5148 5149 return kvm_skip_emulated_instruction(vcpu); 5150 default: 5151 break; 5152 } 5153 vcpu->run->exit_reason = 0; 5154 vcpu_unimpl(vcpu, "unhandled control register: op %d cr %d\n", 5155 (int)(exit_qualification >> 4) & 3, cr); 5156 return 0; 5157 } 5158 5159 static int handle_dr(struct kvm_vcpu *vcpu) 5160 { 5161 unsigned long exit_qualification; 5162 int dr, dr7, reg; 5163 int err = 1; 5164 5165 exit_qualification = vmx_get_exit_qual(vcpu); 5166 dr = exit_qualification & DEBUG_REG_ACCESS_NUM; 5167 5168 /* First, if DR does not exist, trigger UD */ 5169 if (!kvm_require_dr(vcpu, dr)) 5170 return 1; 5171 5172 if (kvm_x86_ops.get_cpl(vcpu) > 0) 5173 goto out; 5174 5175 dr7 = vmcs_readl(GUEST_DR7); 5176 if (dr7 & DR7_GD) { 5177 /* 5178 * As the vm-exit takes precedence over the debug trap, we 5179 * need to emulate the latter, either for the host or the 5180 * guest debugging itself. 5181 */ 5182 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { 5183 vcpu->run->debug.arch.dr6 = DR6_BD | DR6_ACTIVE_LOW; 5184 vcpu->run->debug.arch.dr7 = dr7; 5185 vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu); 5186 vcpu->run->debug.arch.exception = DB_VECTOR; 5187 vcpu->run->exit_reason = KVM_EXIT_DEBUG; 5188 return 0; 5189 } else { 5190 kvm_queue_exception_p(vcpu, DB_VECTOR, DR6_BD); 5191 return 1; 5192 } 5193 } 5194 5195 if (vcpu->guest_debug == 0) { 5196 exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_MOV_DR_EXITING); 5197 5198 /* 5199 * No more DR vmexits; force a reload of the debug registers 5200 * and reenter on this instruction. The next vmexit will 5201 * retrieve the full state of the debug registers. 5202 */ 5203 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT; 5204 return 1; 5205 } 5206 5207 reg = DEBUG_REG_ACCESS_REG(exit_qualification); 5208 if (exit_qualification & TYPE_MOV_FROM_DR) { 5209 unsigned long val; 5210 5211 kvm_get_dr(vcpu, dr, &val); 5212 kvm_register_write(vcpu, reg, val); 5213 err = 0; 5214 } else { 5215 err = kvm_set_dr(vcpu, dr, kvm_register_read(vcpu, reg)); 5216 } 5217 5218 out: 5219 return kvm_complete_insn_gp(vcpu, err); 5220 } 5221 5222 static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu) 5223 { 5224 get_debugreg(vcpu->arch.db[0], 0); 5225 get_debugreg(vcpu->arch.db[1], 1); 5226 get_debugreg(vcpu->arch.db[2], 2); 5227 get_debugreg(vcpu->arch.db[3], 3); 5228 get_debugreg(vcpu->arch.dr6, 6); 5229 vcpu->arch.dr7 = vmcs_readl(GUEST_DR7); 5230 5231 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT; 5232 exec_controls_setbit(to_vmx(vcpu), CPU_BASED_MOV_DR_EXITING); 5233 5234 /* 5235 * exc_debug expects dr6 to be cleared after it runs, avoid that it sees 5236 * a stale dr6 from the guest. 5237 */ 5238 set_debugreg(DR6_RESERVED, 6); 5239 } 5240 5241 static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val) 5242 { 5243 vmcs_writel(GUEST_DR7, val); 5244 } 5245 5246 static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu) 5247 { 5248 kvm_apic_update_ppr(vcpu); 5249 return 1; 5250 } 5251 5252 static int handle_interrupt_window(struct kvm_vcpu *vcpu) 5253 { 5254 exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_INTR_WINDOW_EXITING); 5255 5256 kvm_make_request(KVM_REQ_EVENT, vcpu); 5257 5258 ++vcpu->stat.irq_window_exits; 5259 return 1; 5260 } 5261 5262 static int handle_invlpg(struct kvm_vcpu *vcpu) 5263 { 5264 unsigned long exit_qualification = vmx_get_exit_qual(vcpu); 5265 5266 kvm_mmu_invlpg(vcpu, exit_qualification); 5267 return kvm_skip_emulated_instruction(vcpu); 5268 } 5269 5270 static int handle_apic_access(struct kvm_vcpu *vcpu) 5271 { 5272 if (likely(fasteoi)) { 5273 unsigned long exit_qualification = vmx_get_exit_qual(vcpu); 5274 int access_type, offset; 5275 5276 access_type = exit_qualification & APIC_ACCESS_TYPE; 5277 offset = exit_qualification & APIC_ACCESS_OFFSET; 5278 /* 5279 * Sane guest uses MOV to write EOI, with written value 5280 * not cared. So make a short-circuit here by avoiding 5281 * heavy instruction emulation. 5282 */ 5283 if ((access_type == TYPE_LINEAR_APIC_INST_WRITE) && 5284 (offset == APIC_EOI)) { 5285 kvm_lapic_set_eoi(vcpu); 5286 return kvm_skip_emulated_instruction(vcpu); 5287 } 5288 } 5289 return kvm_emulate_instruction(vcpu, 0); 5290 } 5291 5292 static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu) 5293 { 5294 unsigned long exit_qualification = vmx_get_exit_qual(vcpu); 5295 int vector = exit_qualification & 0xff; 5296 5297 /* EOI-induced VM exit is trap-like and thus no need to adjust IP */ 5298 kvm_apic_set_eoi_accelerated(vcpu, vector); 5299 return 1; 5300 } 5301 5302 static int handle_apic_write(struct kvm_vcpu *vcpu) 5303 { 5304 unsigned long exit_qualification = vmx_get_exit_qual(vcpu); 5305 u32 offset = exit_qualification & 0xfff; 5306 5307 /* APIC-write VM exit is trap-like and thus no need to adjust IP */ 5308 kvm_apic_write_nodecode(vcpu, offset); 5309 return 1; 5310 } 5311 5312 static int handle_task_switch(struct kvm_vcpu *vcpu) 5313 { 5314 struct vcpu_vmx *vmx = to_vmx(vcpu); 5315 unsigned long exit_qualification; 5316 bool has_error_code = false; 5317 u32 error_code = 0; 5318 u16 tss_selector; 5319 int reason, type, idt_v, idt_index; 5320 5321 idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK); 5322 idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK); 5323 type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK); 5324 5325 exit_qualification = vmx_get_exit_qual(vcpu); 5326 5327 reason = (u32)exit_qualification >> 30; 5328 if (reason == TASK_SWITCH_GATE && idt_v) { 5329 switch (type) { 5330 case INTR_TYPE_NMI_INTR: 5331 vcpu->arch.nmi_injected = false; 5332 vmx_set_nmi_mask(vcpu, true); 5333 break; 5334 case INTR_TYPE_EXT_INTR: 5335 case INTR_TYPE_SOFT_INTR: 5336 kvm_clear_interrupt_queue(vcpu); 5337 break; 5338 case INTR_TYPE_HARD_EXCEPTION: 5339 if (vmx->idt_vectoring_info & 5340 VECTORING_INFO_DELIVER_CODE_MASK) { 5341 has_error_code = true; 5342 error_code = 5343 vmcs_read32(IDT_VECTORING_ERROR_CODE); 5344 } 5345 fallthrough; 5346 case INTR_TYPE_SOFT_EXCEPTION: 5347 kvm_clear_exception_queue(vcpu); 5348 break; 5349 default: 5350 break; 5351 } 5352 } 5353 tss_selector = exit_qualification; 5354 5355 if (!idt_v || (type != INTR_TYPE_HARD_EXCEPTION && 5356 type != INTR_TYPE_EXT_INTR && 5357 type != INTR_TYPE_NMI_INTR)) 5358 WARN_ON(!skip_emulated_instruction(vcpu)); 5359 5360 /* 5361 * TODO: What about debug traps on tss switch? 5362 * Are we supposed to inject them and update dr6? 5363 */ 5364 return kvm_task_switch(vcpu, tss_selector, 5365 type == INTR_TYPE_SOFT_INTR ? idt_index : -1, 5366 reason, has_error_code, error_code); 5367 } 5368 5369 static int handle_ept_violation(struct kvm_vcpu *vcpu) 5370 { 5371 unsigned long exit_qualification; 5372 gpa_t gpa; 5373 u64 error_code; 5374 5375 exit_qualification = vmx_get_exit_qual(vcpu); 5376 5377 /* 5378 * EPT violation happened while executing iret from NMI, 5379 * "blocked by NMI" bit has to be set before next VM entry. 5380 * There are errata that may cause this bit to not be set: 5381 * AAK134, BY25. 5382 */ 5383 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && 5384 enable_vnmi && 5385 (exit_qualification & INTR_INFO_UNBLOCK_NMI)) 5386 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); 5387 5388 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); 5389 trace_kvm_page_fault(gpa, exit_qualification); 5390 5391 /* Is it a read fault? */ 5392 error_code = (exit_qualification & EPT_VIOLATION_ACC_READ) 5393 ? PFERR_USER_MASK : 0; 5394 /* Is it a write fault? */ 5395 error_code |= (exit_qualification & EPT_VIOLATION_ACC_WRITE) 5396 ? PFERR_WRITE_MASK : 0; 5397 /* Is it a fetch fault? */ 5398 error_code |= (exit_qualification & EPT_VIOLATION_ACC_INSTR) 5399 ? PFERR_FETCH_MASK : 0; 5400 /* ept page table entry is present? */ 5401 error_code |= (exit_qualification & 5402 (EPT_VIOLATION_READABLE | EPT_VIOLATION_WRITABLE | 5403 EPT_VIOLATION_EXECUTABLE)) 5404 ? PFERR_PRESENT_MASK : 0; 5405 5406 error_code |= (exit_qualification & EPT_VIOLATION_GVA_TRANSLATED) != 0 ? 5407 PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK; 5408 5409 vcpu->arch.exit_qualification = exit_qualification; 5410 5411 /* 5412 * Check that the GPA doesn't exceed physical memory limits, as that is 5413 * a guest page fault. We have to emulate the instruction here, because 5414 * if the illegal address is that of a paging structure, then 5415 * EPT_VIOLATION_ACC_WRITE bit is set. Alternatively, if supported we 5416 * would also use advanced VM-exit information for EPT violations to 5417 * reconstruct the page fault error code. 5418 */ 5419 if (unlikely(allow_smaller_maxphyaddr && kvm_vcpu_is_illegal_gpa(vcpu, gpa))) 5420 return kvm_emulate_instruction(vcpu, 0); 5421 5422 return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0); 5423 } 5424 5425 static int handle_ept_misconfig(struct kvm_vcpu *vcpu) 5426 { 5427 gpa_t gpa; 5428 5429 if (!vmx_can_emulate_instruction(vcpu, EMULTYPE_PF, NULL, 0)) 5430 return 1; 5431 5432 /* 5433 * A nested guest cannot optimize MMIO vmexits, because we have an 5434 * nGPA here instead of the required GPA. 5435 */ 5436 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); 5437 if (!is_guest_mode(vcpu) && 5438 !kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) { 5439 trace_kvm_fast_mmio(gpa); 5440 return kvm_skip_emulated_instruction(vcpu); 5441 } 5442 5443 return kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0); 5444 } 5445 5446 static int handle_nmi_window(struct kvm_vcpu *vcpu) 5447 { 5448 if (KVM_BUG_ON(!enable_vnmi, vcpu->kvm)) 5449 return -EIO; 5450 5451 exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_NMI_WINDOW_EXITING); 5452 ++vcpu->stat.nmi_window_exits; 5453 kvm_make_request(KVM_REQ_EVENT, vcpu); 5454 5455 return 1; 5456 } 5457 5458 static bool vmx_emulation_required_with_pending_exception(struct kvm_vcpu *vcpu) 5459 { 5460 struct vcpu_vmx *vmx = to_vmx(vcpu); 5461 5462 return vmx->emulation_required && !vmx->rmode.vm86_active && 5463 vcpu->arch.exception.pending; 5464 } 5465 5466 static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) 5467 { 5468 struct vcpu_vmx *vmx = to_vmx(vcpu); 5469 bool intr_window_requested; 5470 unsigned count = 130; 5471 5472 intr_window_requested = exec_controls_get(vmx) & 5473 CPU_BASED_INTR_WINDOW_EXITING; 5474 5475 while (vmx->emulation_required && count-- != 0) { 5476 if (intr_window_requested && !vmx_interrupt_blocked(vcpu)) 5477 return handle_interrupt_window(&vmx->vcpu); 5478 5479 if (kvm_test_request(KVM_REQ_EVENT, vcpu)) 5480 return 1; 5481 5482 if (!kvm_emulate_instruction(vcpu, 0)) 5483 return 0; 5484 5485 if (vmx_emulation_required_with_pending_exception(vcpu)) { 5486 kvm_prepare_emulation_failure_exit(vcpu); 5487 return 0; 5488 } 5489 5490 if (vcpu->arch.halt_request) { 5491 vcpu->arch.halt_request = 0; 5492 return kvm_emulate_halt_noskip(vcpu); 5493 } 5494 5495 /* 5496 * Note, return 1 and not 0, vcpu_run() will invoke 5497 * xfer_to_guest_mode() which will create a proper return 5498 * code. 5499 */ 5500 if (__xfer_to_guest_mode_work_pending()) 5501 return 1; 5502 } 5503 5504 return 1; 5505 } 5506 5507 static int vmx_vcpu_pre_run(struct kvm_vcpu *vcpu) 5508 { 5509 if (vmx_emulation_required_with_pending_exception(vcpu)) { 5510 kvm_prepare_emulation_failure_exit(vcpu); 5511 return 0; 5512 } 5513 5514 return 1; 5515 } 5516 5517 static void grow_ple_window(struct kvm_vcpu *vcpu) 5518 { 5519 struct vcpu_vmx *vmx = to_vmx(vcpu); 5520 unsigned int old = vmx->ple_window; 5521 5522 vmx->ple_window = __grow_ple_window(old, ple_window, 5523 ple_window_grow, 5524 ple_window_max); 5525 5526 if (vmx->ple_window != old) { 5527 vmx->ple_window_dirty = true; 5528 trace_kvm_ple_window_update(vcpu->vcpu_id, 5529 vmx->ple_window, old); 5530 } 5531 } 5532 5533 static void shrink_ple_window(struct kvm_vcpu *vcpu) 5534 { 5535 struct vcpu_vmx *vmx = to_vmx(vcpu); 5536 unsigned int old = vmx->ple_window; 5537 5538 vmx->ple_window = __shrink_ple_window(old, ple_window, 5539 ple_window_shrink, 5540 ple_window); 5541 5542 if (vmx->ple_window != old) { 5543 vmx->ple_window_dirty = true; 5544 trace_kvm_ple_window_update(vcpu->vcpu_id, 5545 vmx->ple_window, old); 5546 } 5547 } 5548 5549 /* 5550 * Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE 5551 * exiting, so only get here on cpu with PAUSE-Loop-Exiting. 5552 */ 5553 static int handle_pause(struct kvm_vcpu *vcpu) 5554 { 5555 if (!kvm_pause_in_guest(vcpu->kvm)) 5556 grow_ple_window(vcpu); 5557 5558 /* 5559 * Intel sdm vol3 ch-25.1.3 says: The "PAUSE-loop exiting" 5560 * VM-execution control is ignored if CPL > 0. OTOH, KVM 5561 * never set PAUSE_EXITING and just set PLE if supported, 5562 * so the vcpu must be CPL=0 if it gets a PAUSE exit. 5563 */ 5564 kvm_vcpu_on_spin(vcpu, true); 5565 return kvm_skip_emulated_instruction(vcpu); 5566 } 5567 5568 static int handle_monitor_trap(struct kvm_vcpu *vcpu) 5569 { 5570 return 1; 5571 } 5572 5573 static int handle_invpcid(struct kvm_vcpu *vcpu) 5574 { 5575 u32 vmx_instruction_info; 5576 unsigned long type; 5577 gva_t gva; 5578 struct { 5579 u64 pcid; 5580 u64 gla; 5581 } operand; 5582 int gpr_index; 5583 5584 if (!guest_cpuid_has(vcpu, X86_FEATURE_INVPCID)) { 5585 kvm_queue_exception(vcpu, UD_VECTOR); 5586 return 1; 5587 } 5588 5589 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 5590 gpr_index = vmx_get_instr_info_reg2(vmx_instruction_info); 5591 type = kvm_register_read(vcpu, gpr_index); 5592 5593 /* According to the Intel instruction reference, the memory operand 5594 * is read even if it isn't needed (e.g., for type==all) 5595 */ 5596 if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu), 5597 vmx_instruction_info, false, 5598 sizeof(operand), &gva)) 5599 return 1; 5600 5601 return kvm_handle_invpcid(vcpu, type, gva); 5602 } 5603 5604 static int handle_pml_full(struct kvm_vcpu *vcpu) 5605 { 5606 unsigned long exit_qualification; 5607 5608 trace_kvm_pml_full(vcpu->vcpu_id); 5609 5610 exit_qualification = vmx_get_exit_qual(vcpu); 5611 5612 /* 5613 * PML buffer FULL happened while executing iret from NMI, 5614 * "blocked by NMI" bit has to be set before next VM entry. 5615 */ 5616 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && 5617 enable_vnmi && 5618 (exit_qualification & INTR_INFO_UNBLOCK_NMI)) 5619 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, 5620 GUEST_INTR_STATE_NMI); 5621 5622 /* 5623 * PML buffer already flushed at beginning of VMEXIT. Nothing to do 5624 * here.., and there's no userspace involvement needed for PML. 5625 */ 5626 return 1; 5627 } 5628 5629 static fastpath_t handle_fastpath_preemption_timer(struct kvm_vcpu *vcpu) 5630 { 5631 struct vcpu_vmx *vmx = to_vmx(vcpu); 5632 5633 if (!vmx->req_immediate_exit && 5634 !unlikely(vmx->loaded_vmcs->hv_timer_soft_disabled)) { 5635 kvm_lapic_expired_hv_timer(vcpu); 5636 return EXIT_FASTPATH_REENTER_GUEST; 5637 } 5638 5639 return EXIT_FASTPATH_NONE; 5640 } 5641 5642 static int handle_preemption_timer(struct kvm_vcpu *vcpu) 5643 { 5644 handle_fastpath_preemption_timer(vcpu); 5645 return 1; 5646 } 5647 5648 /* 5649 * When nested=0, all VMX instruction VM Exits filter here. The handlers 5650 * are overwritten by nested_vmx_setup() when nested=1. 5651 */ 5652 static int handle_vmx_instruction(struct kvm_vcpu *vcpu) 5653 { 5654 kvm_queue_exception(vcpu, UD_VECTOR); 5655 return 1; 5656 } 5657 5658 #ifndef CONFIG_X86_SGX_KVM 5659 static int handle_encls(struct kvm_vcpu *vcpu) 5660 { 5661 /* 5662 * SGX virtualization is disabled. There is no software enable bit for 5663 * SGX, so KVM intercepts all ENCLS leafs and injects a #UD to prevent 5664 * the guest from executing ENCLS (when SGX is supported by hardware). 5665 */ 5666 kvm_queue_exception(vcpu, UD_VECTOR); 5667 return 1; 5668 } 5669 #endif /* CONFIG_X86_SGX_KVM */ 5670 5671 static int handle_bus_lock_vmexit(struct kvm_vcpu *vcpu) 5672 { 5673 /* 5674 * Hardware may or may not set the BUS_LOCK_DETECTED flag on BUS_LOCK 5675 * VM-Exits. Unconditionally set the flag here and leave the handling to 5676 * vmx_handle_exit(). 5677 */ 5678 to_vmx(vcpu)->exit_reason.bus_lock_detected = true; 5679 return 1; 5680 } 5681 5682 /* 5683 * The exit handlers return 1 if the exit was handled fully and guest execution 5684 * may resume. Otherwise they set the kvm_run parameter to indicate what needs 5685 * to be done to userspace and return 0. 5686 */ 5687 static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { 5688 [EXIT_REASON_EXCEPTION_NMI] = handle_exception_nmi, 5689 [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, 5690 [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault, 5691 [EXIT_REASON_NMI_WINDOW] = handle_nmi_window, 5692 [EXIT_REASON_IO_INSTRUCTION] = handle_io, 5693 [EXIT_REASON_CR_ACCESS] = handle_cr, 5694 [EXIT_REASON_DR_ACCESS] = handle_dr, 5695 [EXIT_REASON_CPUID] = kvm_emulate_cpuid, 5696 [EXIT_REASON_MSR_READ] = kvm_emulate_rdmsr, 5697 [EXIT_REASON_MSR_WRITE] = kvm_emulate_wrmsr, 5698 [EXIT_REASON_INTERRUPT_WINDOW] = handle_interrupt_window, 5699 [EXIT_REASON_HLT] = kvm_emulate_halt, 5700 [EXIT_REASON_INVD] = kvm_emulate_invd, 5701 [EXIT_REASON_INVLPG] = handle_invlpg, 5702 [EXIT_REASON_RDPMC] = kvm_emulate_rdpmc, 5703 [EXIT_REASON_VMCALL] = kvm_emulate_hypercall, 5704 [EXIT_REASON_VMCLEAR] = handle_vmx_instruction, 5705 [EXIT_REASON_VMLAUNCH] = handle_vmx_instruction, 5706 [EXIT_REASON_VMPTRLD] = handle_vmx_instruction, 5707 [EXIT_REASON_VMPTRST] = handle_vmx_instruction, 5708 [EXIT_REASON_VMREAD] = handle_vmx_instruction, 5709 [EXIT_REASON_VMRESUME] = handle_vmx_instruction, 5710 [EXIT_REASON_VMWRITE] = handle_vmx_instruction, 5711 [EXIT_REASON_VMOFF] = handle_vmx_instruction, 5712 [EXIT_REASON_VMON] = handle_vmx_instruction, 5713 [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold, 5714 [EXIT_REASON_APIC_ACCESS] = handle_apic_access, 5715 [EXIT_REASON_APIC_WRITE] = handle_apic_write, 5716 [EXIT_REASON_EOI_INDUCED] = handle_apic_eoi_induced, 5717 [EXIT_REASON_WBINVD] = kvm_emulate_wbinvd, 5718 [EXIT_REASON_XSETBV] = kvm_emulate_xsetbv, 5719 [EXIT_REASON_TASK_SWITCH] = handle_task_switch, 5720 [EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check, 5721 [EXIT_REASON_GDTR_IDTR] = handle_desc, 5722 [EXIT_REASON_LDTR_TR] = handle_desc, 5723 [EXIT_REASON_EPT_VIOLATION] = handle_ept_violation, 5724 [EXIT_REASON_EPT_MISCONFIG] = handle_ept_misconfig, 5725 [EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause, 5726 [EXIT_REASON_MWAIT_INSTRUCTION] = kvm_emulate_mwait, 5727 [EXIT_REASON_MONITOR_TRAP_FLAG] = handle_monitor_trap, 5728 [EXIT_REASON_MONITOR_INSTRUCTION] = kvm_emulate_monitor, 5729 [EXIT_REASON_INVEPT] = handle_vmx_instruction, 5730 [EXIT_REASON_INVVPID] = handle_vmx_instruction, 5731 [EXIT_REASON_RDRAND] = kvm_handle_invalid_op, 5732 [EXIT_REASON_RDSEED] = kvm_handle_invalid_op, 5733 [EXIT_REASON_PML_FULL] = handle_pml_full, 5734 [EXIT_REASON_INVPCID] = handle_invpcid, 5735 [EXIT_REASON_VMFUNC] = handle_vmx_instruction, 5736 [EXIT_REASON_PREEMPTION_TIMER] = handle_preemption_timer, 5737 [EXIT_REASON_ENCLS] = handle_encls, 5738 [EXIT_REASON_BUS_LOCK] = handle_bus_lock_vmexit, 5739 }; 5740 5741 static const int kvm_vmx_max_exit_handlers = 5742 ARRAY_SIZE(kvm_vmx_exit_handlers); 5743 5744 static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason, 5745 u64 *info1, u64 *info2, 5746 u32 *intr_info, u32 *error_code) 5747 { 5748 struct vcpu_vmx *vmx = to_vmx(vcpu); 5749 5750 *reason = vmx->exit_reason.full; 5751 *info1 = vmx_get_exit_qual(vcpu); 5752 if (!(vmx->exit_reason.failed_vmentry)) { 5753 *info2 = vmx->idt_vectoring_info; 5754 *intr_info = vmx_get_intr_info(vcpu); 5755 if (is_exception_with_error_code(*intr_info)) 5756 *error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE); 5757 else 5758 *error_code = 0; 5759 } else { 5760 *info2 = 0; 5761 *intr_info = 0; 5762 *error_code = 0; 5763 } 5764 } 5765 5766 static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx) 5767 { 5768 if (vmx->pml_pg) { 5769 __free_page(vmx->pml_pg); 5770 vmx->pml_pg = NULL; 5771 } 5772 } 5773 5774 static void vmx_flush_pml_buffer(struct kvm_vcpu *vcpu) 5775 { 5776 struct vcpu_vmx *vmx = to_vmx(vcpu); 5777 u64 *pml_buf; 5778 u16 pml_idx; 5779 5780 pml_idx = vmcs_read16(GUEST_PML_INDEX); 5781 5782 /* Do nothing if PML buffer is empty */ 5783 if (pml_idx == (PML_ENTITY_NUM - 1)) 5784 return; 5785 5786 /* PML index always points to next available PML buffer entity */ 5787 if (pml_idx >= PML_ENTITY_NUM) 5788 pml_idx = 0; 5789 else 5790 pml_idx++; 5791 5792 pml_buf = page_address(vmx->pml_pg); 5793 for (; pml_idx < PML_ENTITY_NUM; pml_idx++) { 5794 u64 gpa; 5795 5796 gpa = pml_buf[pml_idx]; 5797 WARN_ON(gpa & (PAGE_SIZE - 1)); 5798 kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT); 5799 } 5800 5801 /* reset PML index */ 5802 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); 5803 } 5804 5805 static void vmx_dump_sel(char *name, uint32_t sel) 5806 { 5807 pr_err("%s sel=0x%04x, attr=0x%05x, limit=0x%08x, base=0x%016lx\n", 5808 name, vmcs_read16(sel), 5809 vmcs_read32(sel + GUEST_ES_AR_BYTES - GUEST_ES_SELECTOR), 5810 vmcs_read32(sel + GUEST_ES_LIMIT - GUEST_ES_SELECTOR), 5811 vmcs_readl(sel + GUEST_ES_BASE - GUEST_ES_SELECTOR)); 5812 } 5813 5814 static void vmx_dump_dtsel(char *name, uint32_t limit) 5815 { 5816 pr_err("%s limit=0x%08x, base=0x%016lx\n", 5817 name, vmcs_read32(limit), 5818 vmcs_readl(limit + GUEST_GDTR_BASE - GUEST_GDTR_LIMIT)); 5819 } 5820 5821 static void vmx_dump_msrs(char *name, struct vmx_msrs *m) 5822 { 5823 unsigned int i; 5824 struct vmx_msr_entry *e; 5825 5826 pr_err("MSR %s:\n", name); 5827 for (i = 0, e = m->val; i < m->nr; ++i, ++e) 5828 pr_err(" %2d: msr=0x%08x value=0x%016llx\n", i, e->index, e->value); 5829 } 5830 5831 void dump_vmcs(struct kvm_vcpu *vcpu) 5832 { 5833 struct vcpu_vmx *vmx = to_vmx(vcpu); 5834 u32 vmentry_ctl, vmexit_ctl; 5835 u32 cpu_based_exec_ctrl, pin_based_exec_ctrl, secondary_exec_control; 5836 unsigned long cr4; 5837 int efer_slot; 5838 5839 if (!dump_invalid_vmcs) { 5840 pr_warn_ratelimited("set kvm_intel.dump_invalid_vmcs=1 to dump internal KVM state.\n"); 5841 return; 5842 } 5843 5844 vmentry_ctl = vmcs_read32(VM_ENTRY_CONTROLS); 5845 vmexit_ctl = vmcs_read32(VM_EXIT_CONTROLS); 5846 cpu_based_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); 5847 pin_based_exec_ctrl = vmcs_read32(PIN_BASED_VM_EXEC_CONTROL); 5848 cr4 = vmcs_readl(GUEST_CR4); 5849 secondary_exec_control = 0; 5850 if (cpu_has_secondary_exec_ctrls()) 5851 secondary_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); 5852 5853 pr_err("VMCS %p, last attempted VM-entry on CPU %d\n", 5854 vmx->loaded_vmcs->vmcs, vcpu->arch.last_vmentry_cpu); 5855 pr_err("*** Guest State ***\n"); 5856 pr_err("CR0: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n", 5857 vmcs_readl(GUEST_CR0), vmcs_readl(CR0_READ_SHADOW), 5858 vmcs_readl(CR0_GUEST_HOST_MASK)); 5859 pr_err("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n", 5860 cr4, vmcs_readl(CR4_READ_SHADOW), vmcs_readl(CR4_GUEST_HOST_MASK)); 5861 pr_err("CR3 = 0x%016lx\n", vmcs_readl(GUEST_CR3)); 5862 if (cpu_has_vmx_ept()) { 5863 pr_err("PDPTR0 = 0x%016llx PDPTR1 = 0x%016llx\n", 5864 vmcs_read64(GUEST_PDPTR0), vmcs_read64(GUEST_PDPTR1)); 5865 pr_err("PDPTR2 = 0x%016llx PDPTR3 = 0x%016llx\n", 5866 vmcs_read64(GUEST_PDPTR2), vmcs_read64(GUEST_PDPTR3)); 5867 } 5868 pr_err("RSP = 0x%016lx RIP = 0x%016lx\n", 5869 vmcs_readl(GUEST_RSP), vmcs_readl(GUEST_RIP)); 5870 pr_err("RFLAGS=0x%08lx DR7 = 0x%016lx\n", 5871 vmcs_readl(GUEST_RFLAGS), vmcs_readl(GUEST_DR7)); 5872 pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n", 5873 vmcs_readl(GUEST_SYSENTER_ESP), 5874 vmcs_read32(GUEST_SYSENTER_CS), vmcs_readl(GUEST_SYSENTER_EIP)); 5875 vmx_dump_sel("CS: ", GUEST_CS_SELECTOR); 5876 vmx_dump_sel("DS: ", GUEST_DS_SELECTOR); 5877 vmx_dump_sel("SS: ", GUEST_SS_SELECTOR); 5878 vmx_dump_sel("ES: ", GUEST_ES_SELECTOR); 5879 vmx_dump_sel("FS: ", GUEST_FS_SELECTOR); 5880 vmx_dump_sel("GS: ", GUEST_GS_SELECTOR); 5881 vmx_dump_dtsel("GDTR:", GUEST_GDTR_LIMIT); 5882 vmx_dump_sel("LDTR:", GUEST_LDTR_SELECTOR); 5883 vmx_dump_dtsel("IDTR:", GUEST_IDTR_LIMIT); 5884 vmx_dump_sel("TR: ", GUEST_TR_SELECTOR); 5885 efer_slot = vmx_find_loadstore_msr_slot(&vmx->msr_autoload.guest, MSR_EFER); 5886 if (vmentry_ctl & VM_ENTRY_LOAD_IA32_EFER) 5887 pr_err("EFER= 0x%016llx\n", vmcs_read64(GUEST_IA32_EFER)); 5888 else if (efer_slot >= 0) 5889 pr_err("EFER= 0x%016llx (autoload)\n", 5890 vmx->msr_autoload.guest.val[efer_slot].value); 5891 else if (vmentry_ctl & VM_ENTRY_IA32E_MODE) 5892 pr_err("EFER= 0x%016llx (effective)\n", 5893 vcpu->arch.efer | (EFER_LMA | EFER_LME)); 5894 else 5895 pr_err("EFER= 0x%016llx (effective)\n", 5896 vcpu->arch.efer & ~(EFER_LMA | EFER_LME)); 5897 if (vmentry_ctl & VM_ENTRY_LOAD_IA32_PAT) 5898 pr_err("PAT = 0x%016llx\n", vmcs_read64(GUEST_IA32_PAT)); 5899 pr_err("DebugCtl = 0x%016llx DebugExceptions = 0x%016lx\n", 5900 vmcs_read64(GUEST_IA32_DEBUGCTL), 5901 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS)); 5902 if (cpu_has_load_perf_global_ctrl() && 5903 vmentry_ctl & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) 5904 pr_err("PerfGlobCtl = 0x%016llx\n", 5905 vmcs_read64(GUEST_IA32_PERF_GLOBAL_CTRL)); 5906 if (vmentry_ctl & VM_ENTRY_LOAD_BNDCFGS) 5907 pr_err("BndCfgS = 0x%016llx\n", vmcs_read64(GUEST_BNDCFGS)); 5908 pr_err("Interruptibility = %08x ActivityState = %08x\n", 5909 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO), 5910 vmcs_read32(GUEST_ACTIVITY_STATE)); 5911 if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) 5912 pr_err("InterruptStatus = %04x\n", 5913 vmcs_read16(GUEST_INTR_STATUS)); 5914 if (vmcs_read32(VM_ENTRY_MSR_LOAD_COUNT) > 0) 5915 vmx_dump_msrs("guest autoload", &vmx->msr_autoload.guest); 5916 if (vmcs_read32(VM_EXIT_MSR_STORE_COUNT) > 0) 5917 vmx_dump_msrs("guest autostore", &vmx->msr_autostore.guest); 5918 5919 pr_err("*** Host State ***\n"); 5920 pr_err("RIP = 0x%016lx RSP = 0x%016lx\n", 5921 vmcs_readl(HOST_RIP), vmcs_readl(HOST_RSP)); 5922 pr_err("CS=%04x SS=%04x DS=%04x ES=%04x FS=%04x GS=%04x TR=%04x\n", 5923 vmcs_read16(HOST_CS_SELECTOR), vmcs_read16(HOST_SS_SELECTOR), 5924 vmcs_read16(HOST_DS_SELECTOR), vmcs_read16(HOST_ES_SELECTOR), 5925 vmcs_read16(HOST_FS_SELECTOR), vmcs_read16(HOST_GS_SELECTOR), 5926 vmcs_read16(HOST_TR_SELECTOR)); 5927 pr_err("FSBase=%016lx GSBase=%016lx TRBase=%016lx\n", 5928 vmcs_readl(HOST_FS_BASE), vmcs_readl(HOST_GS_BASE), 5929 vmcs_readl(HOST_TR_BASE)); 5930 pr_err("GDTBase=%016lx IDTBase=%016lx\n", 5931 vmcs_readl(HOST_GDTR_BASE), vmcs_readl(HOST_IDTR_BASE)); 5932 pr_err("CR0=%016lx CR3=%016lx CR4=%016lx\n", 5933 vmcs_readl(HOST_CR0), vmcs_readl(HOST_CR3), 5934 vmcs_readl(HOST_CR4)); 5935 pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n", 5936 vmcs_readl(HOST_IA32_SYSENTER_ESP), 5937 vmcs_read32(HOST_IA32_SYSENTER_CS), 5938 vmcs_readl(HOST_IA32_SYSENTER_EIP)); 5939 if (vmexit_ctl & VM_EXIT_LOAD_IA32_EFER) 5940 pr_err("EFER= 0x%016llx\n", vmcs_read64(HOST_IA32_EFER)); 5941 if (vmexit_ctl & VM_EXIT_LOAD_IA32_PAT) 5942 pr_err("PAT = 0x%016llx\n", vmcs_read64(HOST_IA32_PAT)); 5943 if (cpu_has_load_perf_global_ctrl() && 5944 vmexit_ctl & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) 5945 pr_err("PerfGlobCtl = 0x%016llx\n", 5946 vmcs_read64(HOST_IA32_PERF_GLOBAL_CTRL)); 5947 if (vmcs_read32(VM_EXIT_MSR_LOAD_COUNT) > 0) 5948 vmx_dump_msrs("host autoload", &vmx->msr_autoload.host); 5949 5950 pr_err("*** Control State ***\n"); 5951 pr_err("PinBased=%08x CPUBased=%08x SecondaryExec=%08x\n", 5952 pin_based_exec_ctrl, cpu_based_exec_ctrl, secondary_exec_control); 5953 pr_err("EntryControls=%08x ExitControls=%08x\n", vmentry_ctl, vmexit_ctl); 5954 pr_err("ExceptionBitmap=%08x PFECmask=%08x PFECmatch=%08x\n", 5955 vmcs_read32(EXCEPTION_BITMAP), 5956 vmcs_read32(PAGE_FAULT_ERROR_CODE_MASK), 5957 vmcs_read32(PAGE_FAULT_ERROR_CODE_MATCH)); 5958 pr_err("VMEntry: intr_info=%08x errcode=%08x ilen=%08x\n", 5959 vmcs_read32(VM_ENTRY_INTR_INFO_FIELD), 5960 vmcs_read32(VM_ENTRY_EXCEPTION_ERROR_CODE), 5961 vmcs_read32(VM_ENTRY_INSTRUCTION_LEN)); 5962 pr_err("VMExit: intr_info=%08x errcode=%08x ilen=%08x\n", 5963 vmcs_read32(VM_EXIT_INTR_INFO), 5964 vmcs_read32(VM_EXIT_INTR_ERROR_CODE), 5965 vmcs_read32(VM_EXIT_INSTRUCTION_LEN)); 5966 pr_err(" reason=%08x qualification=%016lx\n", 5967 vmcs_read32(VM_EXIT_REASON), vmcs_readl(EXIT_QUALIFICATION)); 5968 pr_err("IDTVectoring: info=%08x errcode=%08x\n", 5969 vmcs_read32(IDT_VECTORING_INFO_FIELD), 5970 vmcs_read32(IDT_VECTORING_ERROR_CODE)); 5971 pr_err("TSC Offset = 0x%016llx\n", vmcs_read64(TSC_OFFSET)); 5972 if (secondary_exec_control & SECONDARY_EXEC_TSC_SCALING) 5973 pr_err("TSC Multiplier = 0x%016llx\n", 5974 vmcs_read64(TSC_MULTIPLIER)); 5975 if (cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW) { 5976 if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) { 5977 u16 status = vmcs_read16(GUEST_INTR_STATUS); 5978 pr_err("SVI|RVI = %02x|%02x ", status >> 8, status & 0xff); 5979 } 5980 pr_cont("TPR Threshold = 0x%02x\n", vmcs_read32(TPR_THRESHOLD)); 5981 if (secondary_exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) 5982 pr_err("APIC-access addr = 0x%016llx ", vmcs_read64(APIC_ACCESS_ADDR)); 5983 pr_cont("virt-APIC addr = 0x%016llx\n", vmcs_read64(VIRTUAL_APIC_PAGE_ADDR)); 5984 } 5985 if (pin_based_exec_ctrl & PIN_BASED_POSTED_INTR) 5986 pr_err("PostedIntrVec = 0x%02x\n", vmcs_read16(POSTED_INTR_NV)); 5987 if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT)) 5988 pr_err("EPT pointer = 0x%016llx\n", vmcs_read64(EPT_POINTER)); 5989 if (secondary_exec_control & SECONDARY_EXEC_PAUSE_LOOP_EXITING) 5990 pr_err("PLE Gap=%08x Window=%08x\n", 5991 vmcs_read32(PLE_GAP), vmcs_read32(PLE_WINDOW)); 5992 if (secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID) 5993 pr_err("Virtual processor ID = 0x%04x\n", 5994 vmcs_read16(VIRTUAL_PROCESSOR_ID)); 5995 } 5996 5997 /* 5998 * The guest has exited. See if we can fix it or if we need userspace 5999 * assistance. 6000 */ 6001 static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) 6002 { 6003 struct vcpu_vmx *vmx = to_vmx(vcpu); 6004 union vmx_exit_reason exit_reason = vmx->exit_reason; 6005 u32 vectoring_info = vmx->idt_vectoring_info; 6006 u16 exit_handler_index; 6007 6008 /* 6009 * Flush logged GPAs PML buffer, this will make dirty_bitmap more 6010 * updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before 6011 * querying dirty_bitmap, we only need to kick all vcpus out of guest 6012 * mode as if vcpus is in root mode, the PML buffer must has been 6013 * flushed already. Note, PML is never enabled in hardware while 6014 * running L2. 6015 */ 6016 if (enable_pml && !is_guest_mode(vcpu)) 6017 vmx_flush_pml_buffer(vcpu); 6018 6019 /* 6020 * KVM should never reach this point with a pending nested VM-Enter. 6021 * More specifically, short-circuiting VM-Entry to emulate L2 due to 6022 * invalid guest state should never happen as that means KVM knowingly 6023 * allowed a nested VM-Enter with an invalid vmcs12. More below. 6024 */ 6025 if (KVM_BUG_ON(vmx->nested.nested_run_pending, vcpu->kvm)) 6026 return -EIO; 6027 6028 if (is_guest_mode(vcpu)) { 6029 /* 6030 * PML is never enabled when running L2, bail immediately if a 6031 * PML full exit occurs as something is horribly wrong. 6032 */ 6033 if (exit_reason.basic == EXIT_REASON_PML_FULL) 6034 goto unexpected_vmexit; 6035 6036 /* 6037 * The host physical addresses of some pages of guest memory 6038 * are loaded into the vmcs02 (e.g. vmcs12's Virtual APIC 6039 * Page). The CPU may write to these pages via their host 6040 * physical address while L2 is running, bypassing any 6041 * address-translation-based dirty tracking (e.g. EPT write 6042 * protection). 6043 * 6044 * Mark them dirty on every exit from L2 to prevent them from 6045 * getting out of sync with dirty tracking. 6046 */ 6047 nested_mark_vmcs12_pages_dirty(vcpu); 6048 6049 /* 6050 * Synthesize a triple fault if L2 state is invalid. In normal 6051 * operation, nested VM-Enter rejects any attempt to enter L2 6052 * with invalid state. However, those checks are skipped if 6053 * state is being stuffed via RSM or KVM_SET_NESTED_STATE. If 6054 * L2 state is invalid, it means either L1 modified SMRAM state 6055 * or userspace provided bad state. Synthesize TRIPLE_FAULT as 6056 * doing so is architecturally allowed in the RSM case, and is 6057 * the least awful solution for the userspace case without 6058 * risking false positives. 6059 */ 6060 if (vmx->emulation_required) { 6061 nested_vmx_vmexit(vcpu, EXIT_REASON_TRIPLE_FAULT, 0, 0); 6062 return 1; 6063 } 6064 6065 if (nested_vmx_reflect_vmexit(vcpu)) 6066 return 1; 6067 } 6068 6069 /* If guest state is invalid, start emulating. L2 is handled above. */ 6070 if (vmx->emulation_required) 6071 return handle_invalid_guest_state(vcpu); 6072 6073 if (exit_reason.failed_vmentry) { 6074 dump_vmcs(vcpu); 6075 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; 6076 vcpu->run->fail_entry.hardware_entry_failure_reason 6077 = exit_reason.full; 6078 vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu; 6079 return 0; 6080 } 6081 6082 if (unlikely(vmx->fail)) { 6083 dump_vmcs(vcpu); 6084 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; 6085 vcpu->run->fail_entry.hardware_entry_failure_reason 6086 = vmcs_read32(VM_INSTRUCTION_ERROR); 6087 vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu; 6088 return 0; 6089 } 6090 6091 /* 6092 * Note: 6093 * Do not try to fix EXIT_REASON_EPT_MISCONFIG if it caused by 6094 * delivery event since it indicates guest is accessing MMIO. 6095 * The vm-exit can be triggered again after return to guest that 6096 * will cause infinite loop. 6097 */ 6098 if ((vectoring_info & VECTORING_INFO_VALID_MASK) && 6099 (exit_reason.basic != EXIT_REASON_EXCEPTION_NMI && 6100 exit_reason.basic != EXIT_REASON_EPT_VIOLATION && 6101 exit_reason.basic != EXIT_REASON_PML_FULL && 6102 exit_reason.basic != EXIT_REASON_APIC_ACCESS && 6103 exit_reason.basic != EXIT_REASON_TASK_SWITCH)) { 6104 int ndata = 3; 6105 6106 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 6107 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV; 6108 vcpu->run->internal.data[0] = vectoring_info; 6109 vcpu->run->internal.data[1] = exit_reason.full; 6110 vcpu->run->internal.data[2] = vcpu->arch.exit_qualification; 6111 if (exit_reason.basic == EXIT_REASON_EPT_MISCONFIG) { 6112 vcpu->run->internal.data[ndata++] = 6113 vmcs_read64(GUEST_PHYSICAL_ADDRESS); 6114 } 6115 vcpu->run->internal.data[ndata++] = vcpu->arch.last_vmentry_cpu; 6116 vcpu->run->internal.ndata = ndata; 6117 return 0; 6118 } 6119 6120 if (unlikely(!enable_vnmi && 6121 vmx->loaded_vmcs->soft_vnmi_blocked)) { 6122 if (!vmx_interrupt_blocked(vcpu)) { 6123 vmx->loaded_vmcs->soft_vnmi_blocked = 0; 6124 } else if (vmx->loaded_vmcs->vnmi_blocked_time > 1000000000LL && 6125 vcpu->arch.nmi_pending) { 6126 /* 6127 * This CPU don't support us in finding the end of an 6128 * NMI-blocked window if the guest runs with IRQs 6129 * disabled. So we pull the trigger after 1 s of 6130 * futile waiting, but inform the user about this. 6131 */ 6132 printk(KERN_WARNING "%s: Breaking out of NMI-blocked " 6133 "state on VCPU %d after 1 s timeout\n", 6134 __func__, vcpu->vcpu_id); 6135 vmx->loaded_vmcs->soft_vnmi_blocked = 0; 6136 } 6137 } 6138 6139 if (exit_fastpath != EXIT_FASTPATH_NONE) 6140 return 1; 6141 6142 if (exit_reason.basic >= kvm_vmx_max_exit_handlers) 6143 goto unexpected_vmexit; 6144 #ifdef CONFIG_RETPOLINE 6145 if (exit_reason.basic == EXIT_REASON_MSR_WRITE) 6146 return kvm_emulate_wrmsr(vcpu); 6147 else if (exit_reason.basic == EXIT_REASON_PREEMPTION_TIMER) 6148 return handle_preemption_timer(vcpu); 6149 else if (exit_reason.basic == EXIT_REASON_INTERRUPT_WINDOW) 6150 return handle_interrupt_window(vcpu); 6151 else if (exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT) 6152 return handle_external_interrupt(vcpu); 6153 else if (exit_reason.basic == EXIT_REASON_HLT) 6154 return kvm_emulate_halt(vcpu); 6155 else if (exit_reason.basic == EXIT_REASON_EPT_MISCONFIG) 6156 return handle_ept_misconfig(vcpu); 6157 #endif 6158 6159 exit_handler_index = array_index_nospec((u16)exit_reason.basic, 6160 kvm_vmx_max_exit_handlers); 6161 if (!kvm_vmx_exit_handlers[exit_handler_index]) 6162 goto unexpected_vmexit; 6163 6164 return kvm_vmx_exit_handlers[exit_handler_index](vcpu); 6165 6166 unexpected_vmexit: 6167 vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n", 6168 exit_reason.full); 6169 dump_vmcs(vcpu); 6170 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 6171 vcpu->run->internal.suberror = 6172 KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON; 6173 vcpu->run->internal.ndata = 2; 6174 vcpu->run->internal.data[0] = exit_reason.full; 6175 vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu; 6176 return 0; 6177 } 6178 6179 static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) 6180 { 6181 int ret = __vmx_handle_exit(vcpu, exit_fastpath); 6182 6183 /* 6184 * Exit to user space when bus lock detected to inform that there is 6185 * a bus lock in guest. 6186 */ 6187 if (to_vmx(vcpu)->exit_reason.bus_lock_detected) { 6188 if (ret > 0) 6189 vcpu->run->exit_reason = KVM_EXIT_X86_BUS_LOCK; 6190 6191 vcpu->run->flags |= KVM_RUN_X86_BUS_LOCK; 6192 return 0; 6193 } 6194 return ret; 6195 } 6196 6197 /* 6198 * Software based L1D cache flush which is used when microcode providing 6199 * the cache control MSR is not loaded. 6200 * 6201 * The L1D cache is 32 KiB on Nehalem and later microarchitectures, but to 6202 * flush it is required to read in 64 KiB because the replacement algorithm 6203 * is not exactly LRU. This could be sized at runtime via topology 6204 * information but as all relevant affected CPUs have 32KiB L1D cache size 6205 * there is no point in doing so. 6206 */ 6207 static noinstr void vmx_l1d_flush(struct kvm_vcpu *vcpu) 6208 { 6209 int size = PAGE_SIZE << L1D_CACHE_ORDER; 6210 6211 /* 6212 * This code is only executed when the the flush mode is 'cond' or 6213 * 'always' 6214 */ 6215 if (static_branch_likely(&vmx_l1d_flush_cond)) { 6216 bool flush_l1d; 6217 6218 /* 6219 * Clear the per-vcpu flush bit, it gets set again 6220 * either from vcpu_run() or from one of the unsafe 6221 * VMEXIT handlers. 6222 */ 6223 flush_l1d = vcpu->arch.l1tf_flush_l1d; 6224 vcpu->arch.l1tf_flush_l1d = false; 6225 6226 /* 6227 * Clear the per-cpu flush bit, it gets set again from 6228 * the interrupt handlers. 6229 */ 6230 flush_l1d |= kvm_get_cpu_l1tf_flush_l1d(); 6231 kvm_clear_cpu_l1tf_flush_l1d(); 6232 6233 if (!flush_l1d) 6234 return; 6235 } 6236 6237 vcpu->stat.l1d_flush++; 6238 6239 if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) { 6240 native_wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH); 6241 return; 6242 } 6243 6244 asm volatile( 6245 /* First ensure the pages are in the TLB */ 6246 "xorl %%eax, %%eax\n" 6247 ".Lpopulate_tlb:\n\t" 6248 "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t" 6249 "addl $4096, %%eax\n\t" 6250 "cmpl %%eax, %[size]\n\t" 6251 "jne .Lpopulate_tlb\n\t" 6252 "xorl %%eax, %%eax\n\t" 6253 "cpuid\n\t" 6254 /* Now fill the cache */ 6255 "xorl %%eax, %%eax\n" 6256 ".Lfill_cache:\n" 6257 "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t" 6258 "addl $64, %%eax\n\t" 6259 "cmpl %%eax, %[size]\n\t" 6260 "jne .Lfill_cache\n\t" 6261 "lfence\n" 6262 :: [flush_pages] "r" (vmx_l1d_flush_pages), 6263 [size] "r" (size) 6264 : "eax", "ebx", "ecx", "edx"); 6265 } 6266 6267 static void vmx_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) 6268 { 6269 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 6270 int tpr_threshold; 6271 6272 if (is_guest_mode(vcpu) && 6273 nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) 6274 return; 6275 6276 tpr_threshold = (irr == -1 || tpr < irr) ? 0 : irr; 6277 if (is_guest_mode(vcpu)) 6278 to_vmx(vcpu)->nested.l1_tpr_threshold = tpr_threshold; 6279 else 6280 vmcs_write32(TPR_THRESHOLD, tpr_threshold); 6281 } 6282 6283 void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu) 6284 { 6285 struct vcpu_vmx *vmx = to_vmx(vcpu); 6286 u32 sec_exec_control; 6287 6288 if (!lapic_in_kernel(vcpu)) 6289 return; 6290 6291 if (!flexpriority_enabled && 6292 !cpu_has_vmx_virtualize_x2apic_mode()) 6293 return; 6294 6295 /* Postpone execution until vmcs01 is the current VMCS. */ 6296 if (is_guest_mode(vcpu)) { 6297 vmx->nested.change_vmcs01_virtual_apic_mode = true; 6298 return; 6299 } 6300 6301 sec_exec_control = secondary_exec_controls_get(vmx); 6302 sec_exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 6303 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE); 6304 6305 switch (kvm_get_apic_mode(vcpu)) { 6306 case LAPIC_MODE_INVALID: 6307 WARN_ONCE(true, "Invalid local APIC state"); 6308 break; 6309 case LAPIC_MODE_DISABLED: 6310 break; 6311 case LAPIC_MODE_XAPIC: 6312 if (flexpriority_enabled) { 6313 sec_exec_control |= 6314 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; 6315 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); 6316 6317 /* 6318 * Flush the TLB, reloading the APIC access page will 6319 * only do so if its physical address has changed, but 6320 * the guest may have inserted a non-APIC mapping into 6321 * the TLB while the APIC access page was disabled. 6322 */ 6323 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); 6324 } 6325 break; 6326 case LAPIC_MODE_X2APIC: 6327 if (cpu_has_vmx_virtualize_x2apic_mode()) 6328 sec_exec_control |= 6329 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; 6330 break; 6331 } 6332 secondary_exec_controls_set(vmx, sec_exec_control); 6333 6334 vmx_update_msr_bitmap_x2apic(vcpu); 6335 } 6336 6337 static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu) 6338 { 6339 struct page *page; 6340 6341 /* Defer reload until vmcs01 is the current VMCS. */ 6342 if (is_guest_mode(vcpu)) { 6343 to_vmx(vcpu)->nested.reload_vmcs01_apic_access_page = true; 6344 return; 6345 } 6346 6347 if (!(secondary_exec_controls_get(to_vmx(vcpu)) & 6348 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) 6349 return; 6350 6351 page = gfn_to_page(vcpu->kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT); 6352 if (is_error_page(page)) 6353 return; 6354 6355 vmcs_write64(APIC_ACCESS_ADDR, page_to_phys(page)); 6356 vmx_flush_tlb_current(vcpu); 6357 6358 /* 6359 * Do not pin apic access page in memory, the MMU notifier 6360 * will call us again if it is migrated or swapped out. 6361 */ 6362 put_page(page); 6363 } 6364 6365 static void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr) 6366 { 6367 u16 status; 6368 u8 old; 6369 6370 if (max_isr == -1) 6371 max_isr = 0; 6372 6373 status = vmcs_read16(GUEST_INTR_STATUS); 6374 old = status >> 8; 6375 if (max_isr != old) { 6376 status &= 0xff; 6377 status |= max_isr << 8; 6378 vmcs_write16(GUEST_INTR_STATUS, status); 6379 } 6380 } 6381 6382 static void vmx_set_rvi(int vector) 6383 { 6384 u16 status; 6385 u8 old; 6386 6387 if (vector == -1) 6388 vector = 0; 6389 6390 status = vmcs_read16(GUEST_INTR_STATUS); 6391 old = (u8)status & 0xff; 6392 if ((u8)vector != old) { 6393 status &= ~0xff; 6394 status |= (u8)vector; 6395 vmcs_write16(GUEST_INTR_STATUS, status); 6396 } 6397 } 6398 6399 static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr) 6400 { 6401 /* 6402 * When running L2, updating RVI is only relevant when 6403 * vmcs12 virtual-interrupt-delivery enabled. 6404 * However, it can be enabled only when L1 also 6405 * intercepts external-interrupts and in that case 6406 * we should not update vmcs02 RVI but instead intercept 6407 * interrupt. Therefore, do nothing when running L2. 6408 */ 6409 if (!is_guest_mode(vcpu)) 6410 vmx_set_rvi(max_irr); 6411 } 6412 6413 static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu) 6414 { 6415 struct vcpu_vmx *vmx = to_vmx(vcpu); 6416 int max_irr; 6417 bool got_posted_interrupt; 6418 6419 if (KVM_BUG_ON(!enable_apicv, vcpu->kvm)) 6420 return -EIO; 6421 6422 if (pi_test_on(&vmx->pi_desc)) { 6423 pi_clear_on(&vmx->pi_desc); 6424 /* 6425 * IOMMU can write to PID.ON, so the barrier matters even on UP. 6426 * But on x86 this is just a compiler barrier anyway. 6427 */ 6428 smp_mb__after_atomic(); 6429 got_posted_interrupt = 6430 kvm_apic_update_irr(vcpu, vmx->pi_desc.pir, &max_irr); 6431 } else { 6432 max_irr = kvm_lapic_find_highest_irr(vcpu); 6433 got_posted_interrupt = false; 6434 } 6435 6436 /* 6437 * Newly recognized interrupts are injected via either virtual interrupt 6438 * delivery (RVI) or KVM_REQ_EVENT. Virtual interrupt delivery is 6439 * disabled in two cases: 6440 * 6441 * 1) If L2 is running and the vCPU has a new pending interrupt. If L1 6442 * wants to exit on interrupts, KVM_REQ_EVENT is needed to synthesize a 6443 * VM-Exit to L1. If L1 doesn't want to exit, the interrupt is injected 6444 * into L2, but KVM doesn't use virtual interrupt delivery to inject 6445 * interrupts into L2, and so KVM_REQ_EVENT is again needed. 6446 * 6447 * 2) If APICv is disabled for this vCPU, assigned devices may still 6448 * attempt to post interrupts. The posted interrupt vector will cause 6449 * a VM-Exit and the subsequent entry will call sync_pir_to_irr. 6450 */ 6451 if (!is_guest_mode(vcpu) && kvm_vcpu_apicv_active(vcpu)) 6452 vmx_set_rvi(max_irr); 6453 else if (got_posted_interrupt) 6454 kvm_make_request(KVM_REQ_EVENT, vcpu); 6455 6456 return max_irr; 6457 } 6458 6459 static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap) 6460 { 6461 if (!kvm_vcpu_apicv_active(vcpu)) 6462 return; 6463 6464 vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]); 6465 vmcs_write64(EOI_EXIT_BITMAP1, eoi_exit_bitmap[1]); 6466 vmcs_write64(EOI_EXIT_BITMAP2, eoi_exit_bitmap[2]); 6467 vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]); 6468 } 6469 6470 static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu) 6471 { 6472 struct vcpu_vmx *vmx = to_vmx(vcpu); 6473 6474 pi_clear_on(&vmx->pi_desc); 6475 memset(vmx->pi_desc.pir, 0, sizeof(vmx->pi_desc.pir)); 6476 } 6477 6478 void vmx_do_interrupt_nmi_irqoff(unsigned long entry); 6479 6480 static void handle_interrupt_nmi_irqoff(struct kvm_vcpu *vcpu, 6481 unsigned long entry) 6482 { 6483 bool is_nmi = entry == (unsigned long)asm_exc_nmi_noist; 6484 6485 kvm_before_interrupt(vcpu, is_nmi ? KVM_HANDLING_NMI : KVM_HANDLING_IRQ); 6486 vmx_do_interrupt_nmi_irqoff(entry); 6487 kvm_after_interrupt(vcpu); 6488 } 6489 6490 static void handle_nm_fault_irqoff(struct kvm_vcpu *vcpu) 6491 { 6492 /* 6493 * Save xfd_err to guest_fpu before interrupt is enabled, so the 6494 * MSR value is not clobbered by the host activity before the guest 6495 * has chance to consume it. 6496 * 6497 * Do not blindly read xfd_err here, since this exception might 6498 * be caused by L1 interception on a platform which doesn't 6499 * support xfd at all. 6500 * 6501 * Do it conditionally upon guest_fpu::xfd. xfd_err matters 6502 * only when xfd contains a non-zero value. 6503 * 6504 * Queuing exception is done in vmx_handle_exit. See comment there. 6505 */ 6506 if (vcpu->arch.guest_fpu.fpstate->xfd) 6507 rdmsrl(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err); 6508 } 6509 6510 static void handle_exception_nmi_irqoff(struct vcpu_vmx *vmx) 6511 { 6512 const unsigned long nmi_entry = (unsigned long)asm_exc_nmi_noist; 6513 u32 intr_info = vmx_get_intr_info(&vmx->vcpu); 6514 6515 /* if exit due to PF check for async PF */ 6516 if (is_page_fault(intr_info)) 6517 vmx->vcpu.arch.apf.host_apf_flags = kvm_read_and_reset_apf_flags(); 6518 /* if exit due to NM, handle before interrupts are enabled */ 6519 else if (is_nm_fault(intr_info)) 6520 handle_nm_fault_irqoff(&vmx->vcpu); 6521 /* Handle machine checks before interrupts are enabled */ 6522 else if (is_machine_check(intr_info)) 6523 kvm_machine_check(); 6524 /* We need to handle NMIs before interrupts are enabled */ 6525 else if (is_nmi(intr_info)) 6526 handle_interrupt_nmi_irqoff(&vmx->vcpu, nmi_entry); 6527 } 6528 6529 static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu) 6530 { 6531 u32 intr_info = vmx_get_intr_info(vcpu); 6532 unsigned int vector = intr_info & INTR_INFO_VECTOR_MASK; 6533 gate_desc *desc = (gate_desc *)host_idt_base + vector; 6534 6535 if (KVM_BUG(!is_external_intr(intr_info), vcpu->kvm, 6536 "KVM: unexpected VM-Exit interrupt info: 0x%x", intr_info)) 6537 return; 6538 6539 handle_interrupt_nmi_irqoff(vcpu, gate_offset(desc)); 6540 } 6541 6542 static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu) 6543 { 6544 struct vcpu_vmx *vmx = to_vmx(vcpu); 6545 6546 if (vmx->emulation_required) 6547 return; 6548 6549 if (vmx->exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT) 6550 handle_external_interrupt_irqoff(vcpu); 6551 else if (vmx->exit_reason.basic == EXIT_REASON_EXCEPTION_NMI) 6552 handle_exception_nmi_irqoff(vmx); 6553 } 6554 6555 /* 6556 * The kvm parameter can be NULL (module initialization, or invocation before 6557 * VM creation). Be sure to check the kvm parameter before using it. 6558 */ 6559 static bool vmx_has_emulated_msr(struct kvm *kvm, u32 index) 6560 { 6561 switch (index) { 6562 case MSR_IA32_SMBASE: 6563 /* 6564 * We cannot do SMM unless we can run the guest in big 6565 * real mode. 6566 */ 6567 return enable_unrestricted_guest || emulate_invalid_guest_state; 6568 case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: 6569 return nested; 6570 case MSR_AMD64_VIRT_SPEC_CTRL: 6571 case MSR_AMD64_TSC_RATIO: 6572 /* This is AMD only. */ 6573 return false; 6574 default: 6575 return true; 6576 } 6577 } 6578 6579 static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx) 6580 { 6581 u32 exit_intr_info; 6582 bool unblock_nmi; 6583 u8 vector; 6584 bool idtv_info_valid; 6585 6586 idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK; 6587 6588 if (enable_vnmi) { 6589 if (vmx->loaded_vmcs->nmi_known_unmasked) 6590 return; 6591 6592 exit_intr_info = vmx_get_intr_info(&vmx->vcpu); 6593 unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0; 6594 vector = exit_intr_info & INTR_INFO_VECTOR_MASK; 6595 /* 6596 * SDM 3: 27.7.1.2 (September 2008) 6597 * Re-set bit "block by NMI" before VM entry if vmexit caused by 6598 * a guest IRET fault. 6599 * SDM 3: 23.2.2 (September 2008) 6600 * Bit 12 is undefined in any of the following cases: 6601 * If the VM exit sets the valid bit in the IDT-vectoring 6602 * information field. 6603 * If the VM exit is due to a double fault. 6604 */ 6605 if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi && 6606 vector != DF_VECTOR && !idtv_info_valid) 6607 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, 6608 GUEST_INTR_STATE_NMI); 6609 else 6610 vmx->loaded_vmcs->nmi_known_unmasked = 6611 !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) 6612 & GUEST_INTR_STATE_NMI); 6613 } else if (unlikely(vmx->loaded_vmcs->soft_vnmi_blocked)) 6614 vmx->loaded_vmcs->vnmi_blocked_time += 6615 ktime_to_ns(ktime_sub(ktime_get(), 6616 vmx->loaded_vmcs->entry_time)); 6617 } 6618 6619 static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu, 6620 u32 idt_vectoring_info, 6621 int instr_len_field, 6622 int error_code_field) 6623 { 6624 u8 vector; 6625 int type; 6626 bool idtv_info_valid; 6627 6628 idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK; 6629 6630 vcpu->arch.nmi_injected = false; 6631 kvm_clear_exception_queue(vcpu); 6632 kvm_clear_interrupt_queue(vcpu); 6633 6634 if (!idtv_info_valid) 6635 return; 6636 6637 kvm_make_request(KVM_REQ_EVENT, vcpu); 6638 6639 vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK; 6640 type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK; 6641 6642 switch (type) { 6643 case INTR_TYPE_NMI_INTR: 6644 vcpu->arch.nmi_injected = true; 6645 /* 6646 * SDM 3: 27.7.1.2 (September 2008) 6647 * Clear bit "block by NMI" before VM entry if a NMI 6648 * delivery faulted. 6649 */ 6650 vmx_set_nmi_mask(vcpu, false); 6651 break; 6652 case INTR_TYPE_SOFT_EXCEPTION: 6653 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); 6654 fallthrough; 6655 case INTR_TYPE_HARD_EXCEPTION: 6656 if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) { 6657 u32 err = vmcs_read32(error_code_field); 6658 kvm_requeue_exception_e(vcpu, vector, err); 6659 } else 6660 kvm_requeue_exception(vcpu, vector); 6661 break; 6662 case INTR_TYPE_SOFT_INTR: 6663 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); 6664 fallthrough; 6665 case INTR_TYPE_EXT_INTR: 6666 kvm_queue_interrupt(vcpu, vector, type == INTR_TYPE_SOFT_INTR); 6667 break; 6668 default: 6669 break; 6670 } 6671 } 6672 6673 static void vmx_complete_interrupts(struct vcpu_vmx *vmx) 6674 { 6675 __vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info, 6676 VM_EXIT_INSTRUCTION_LEN, 6677 IDT_VECTORING_ERROR_CODE); 6678 } 6679 6680 static void vmx_cancel_injection(struct kvm_vcpu *vcpu) 6681 { 6682 __vmx_complete_interrupts(vcpu, 6683 vmcs_read32(VM_ENTRY_INTR_INFO_FIELD), 6684 VM_ENTRY_INSTRUCTION_LEN, 6685 VM_ENTRY_EXCEPTION_ERROR_CODE); 6686 6687 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); 6688 } 6689 6690 static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx) 6691 { 6692 int i, nr_msrs; 6693 struct perf_guest_switch_msr *msrs; 6694 6695 /* Note, nr_msrs may be garbage if perf_guest_get_msrs() returns NULL. */ 6696 msrs = perf_guest_get_msrs(&nr_msrs); 6697 if (!msrs) 6698 return; 6699 6700 for (i = 0; i < nr_msrs; i++) 6701 if (msrs[i].host == msrs[i].guest) 6702 clear_atomic_switch_msr(vmx, msrs[i].msr); 6703 else 6704 add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest, 6705 msrs[i].host, false); 6706 } 6707 6708 static void vmx_update_hv_timer(struct kvm_vcpu *vcpu) 6709 { 6710 struct vcpu_vmx *vmx = to_vmx(vcpu); 6711 u64 tscl; 6712 u32 delta_tsc; 6713 6714 if (vmx->req_immediate_exit) { 6715 vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, 0); 6716 vmx->loaded_vmcs->hv_timer_soft_disabled = false; 6717 } else if (vmx->hv_deadline_tsc != -1) { 6718 tscl = rdtsc(); 6719 if (vmx->hv_deadline_tsc > tscl) 6720 /* set_hv_timer ensures the delta fits in 32-bits */ 6721 delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >> 6722 cpu_preemption_timer_multi); 6723 else 6724 delta_tsc = 0; 6725 6726 vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, delta_tsc); 6727 vmx->loaded_vmcs->hv_timer_soft_disabled = false; 6728 } else if (!vmx->loaded_vmcs->hv_timer_soft_disabled) { 6729 vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, -1); 6730 vmx->loaded_vmcs->hv_timer_soft_disabled = true; 6731 } 6732 } 6733 6734 void noinstr vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp) 6735 { 6736 if (unlikely(host_rsp != vmx->loaded_vmcs->host_state.rsp)) { 6737 vmx->loaded_vmcs->host_state.rsp = host_rsp; 6738 vmcs_writel(HOST_RSP, host_rsp); 6739 } 6740 } 6741 6742 static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu) 6743 { 6744 switch (to_vmx(vcpu)->exit_reason.basic) { 6745 case EXIT_REASON_MSR_WRITE: 6746 return handle_fastpath_set_msr_irqoff(vcpu); 6747 case EXIT_REASON_PREEMPTION_TIMER: 6748 return handle_fastpath_preemption_timer(vcpu); 6749 default: 6750 return EXIT_FASTPATH_NONE; 6751 } 6752 } 6753 6754 static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu, 6755 struct vcpu_vmx *vmx) 6756 { 6757 kvm_guest_enter_irqoff(); 6758 6759 /* L1D Flush includes CPU buffer clear to mitigate MDS */ 6760 if (static_branch_unlikely(&vmx_l1d_should_flush)) 6761 vmx_l1d_flush(vcpu); 6762 else if (static_branch_unlikely(&mds_user_clear)) 6763 mds_clear_cpu_buffers(); 6764 6765 if (vcpu->arch.cr2 != native_read_cr2()) 6766 native_write_cr2(vcpu->arch.cr2); 6767 6768 vmx->fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs, 6769 vmx->loaded_vmcs->launched); 6770 6771 vcpu->arch.cr2 = native_read_cr2(); 6772 6773 kvm_guest_exit_irqoff(); 6774 } 6775 6776 static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) 6777 { 6778 struct vcpu_vmx *vmx = to_vmx(vcpu); 6779 unsigned long cr4; 6780 6781 /* Record the guest's net vcpu time for enforced NMI injections. */ 6782 if (unlikely(!enable_vnmi && 6783 vmx->loaded_vmcs->soft_vnmi_blocked)) 6784 vmx->loaded_vmcs->entry_time = ktime_get(); 6785 6786 /* 6787 * Don't enter VMX if guest state is invalid, let the exit handler 6788 * start emulation until we arrive back to a valid state. Synthesize a 6789 * consistency check VM-Exit due to invalid guest state and bail. 6790 */ 6791 if (unlikely(vmx->emulation_required)) { 6792 vmx->fail = 0; 6793 6794 vmx->exit_reason.full = EXIT_REASON_INVALID_STATE; 6795 vmx->exit_reason.failed_vmentry = 1; 6796 kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1); 6797 vmx->exit_qualification = ENTRY_FAIL_DEFAULT; 6798 kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2); 6799 vmx->exit_intr_info = 0; 6800 return EXIT_FASTPATH_NONE; 6801 } 6802 6803 trace_kvm_entry(vcpu); 6804 6805 if (vmx->ple_window_dirty) { 6806 vmx->ple_window_dirty = false; 6807 vmcs_write32(PLE_WINDOW, vmx->ple_window); 6808 } 6809 6810 /* 6811 * We did this in prepare_switch_to_guest, because it needs to 6812 * be within srcu_read_lock. 6813 */ 6814 WARN_ON_ONCE(vmx->nested.need_vmcs12_to_shadow_sync); 6815 6816 if (kvm_register_is_dirty(vcpu, VCPU_REGS_RSP)) 6817 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); 6818 if (kvm_register_is_dirty(vcpu, VCPU_REGS_RIP)) 6819 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); 6820 vcpu->arch.regs_dirty = 0; 6821 6822 cr4 = cr4_read_shadow(); 6823 if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) { 6824 vmcs_writel(HOST_CR4, cr4); 6825 vmx->loaded_vmcs->host_state.cr4 = cr4; 6826 } 6827 6828 /* When KVM_DEBUGREG_WONT_EXIT, dr6 is accessible in guest. */ 6829 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) 6830 set_debugreg(vcpu->arch.dr6, 6); 6831 6832 /* When single-stepping over STI and MOV SS, we must clear the 6833 * corresponding interruptibility bits in the guest state. Otherwise 6834 * vmentry fails as it then expects bit 14 (BS) in pending debug 6835 * exceptions being set, but that's not correct for the guest debugging 6836 * case. */ 6837 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) 6838 vmx_set_interrupt_shadow(vcpu, 0); 6839 6840 kvm_load_guest_xsave_state(vcpu); 6841 6842 pt_guest_enter(vmx); 6843 6844 atomic_switch_perf_msrs(vmx); 6845 if (intel_pmu_lbr_is_enabled(vcpu)) 6846 vmx_passthrough_lbr_msrs(vcpu); 6847 6848 if (enable_preemption_timer) 6849 vmx_update_hv_timer(vcpu); 6850 6851 kvm_wait_lapic_expire(vcpu); 6852 6853 /* 6854 * If this vCPU has touched SPEC_CTRL, restore the guest's value if 6855 * it's non-zero. Since vmentry is serialising on affected CPUs, there 6856 * is no need to worry about the conditional branch over the wrmsr 6857 * being speculatively taken. 6858 */ 6859 x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0); 6860 6861 /* The actual VMENTER/EXIT is in the .noinstr.text section. */ 6862 vmx_vcpu_enter_exit(vcpu, vmx); 6863 6864 /* 6865 * We do not use IBRS in the kernel. If this vCPU has used the 6866 * SPEC_CTRL MSR it may have left it on; save the value and 6867 * turn it off. This is much more efficient than blindly adding 6868 * it to the atomic save/restore list. Especially as the former 6869 * (Saving guest MSRs on vmexit) doesn't even exist in KVM. 6870 * 6871 * For non-nested case: 6872 * If the L01 MSR bitmap does not intercept the MSR, then we need to 6873 * save it. 6874 * 6875 * For nested case: 6876 * If the L02 MSR bitmap does not intercept the MSR, then we need to 6877 * save it. 6878 */ 6879 if (unlikely(!msr_write_intercepted(vmx, MSR_IA32_SPEC_CTRL))) 6880 vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); 6881 6882 x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0); 6883 6884 /* All fields are clean at this point */ 6885 if (static_branch_unlikely(&enable_evmcs)) { 6886 current_evmcs->hv_clean_fields |= 6887 HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; 6888 6889 current_evmcs->hv_vp_id = kvm_hv_get_vpindex(vcpu); 6890 } 6891 6892 /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */ 6893 if (vmx->host_debugctlmsr) 6894 update_debugctlmsr(vmx->host_debugctlmsr); 6895 6896 #ifndef CONFIG_X86_64 6897 /* 6898 * The sysexit path does not restore ds/es, so we must set them to 6899 * a reasonable value ourselves. 6900 * 6901 * We can't defer this to vmx_prepare_switch_to_host() since that 6902 * function may be executed in interrupt context, which saves and 6903 * restore segments around it, nullifying its effect. 6904 */ 6905 loadsegment(ds, __USER_DS); 6906 loadsegment(es, __USER_DS); 6907 #endif 6908 6909 vcpu->arch.regs_avail &= ~VMX_REGS_LAZY_LOAD_SET; 6910 6911 pt_guest_exit(vmx); 6912 6913 kvm_load_host_xsave_state(vcpu); 6914 6915 if (is_guest_mode(vcpu)) { 6916 /* 6917 * Track VMLAUNCH/VMRESUME that have made past guest state 6918 * checking. 6919 */ 6920 if (vmx->nested.nested_run_pending && 6921 !vmx->exit_reason.failed_vmentry) 6922 ++vcpu->stat.nested_run; 6923 6924 vmx->nested.nested_run_pending = 0; 6925 } 6926 6927 vmx->idt_vectoring_info = 0; 6928 6929 if (unlikely(vmx->fail)) { 6930 vmx->exit_reason.full = 0xdead; 6931 return EXIT_FASTPATH_NONE; 6932 } 6933 6934 vmx->exit_reason.full = vmcs_read32(VM_EXIT_REASON); 6935 if (unlikely((u16)vmx->exit_reason.basic == EXIT_REASON_MCE_DURING_VMENTRY)) 6936 kvm_machine_check(); 6937 6938 if (likely(!vmx->exit_reason.failed_vmentry)) 6939 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); 6940 6941 trace_kvm_exit(vcpu, KVM_ISA_VMX); 6942 6943 if (unlikely(vmx->exit_reason.failed_vmentry)) 6944 return EXIT_FASTPATH_NONE; 6945 6946 vmx->loaded_vmcs->launched = 1; 6947 6948 vmx_recover_nmi_blocking(vmx); 6949 vmx_complete_interrupts(vmx); 6950 6951 if (is_guest_mode(vcpu)) 6952 return EXIT_FASTPATH_NONE; 6953 6954 return vmx_exit_handlers_fastpath(vcpu); 6955 } 6956 6957 static void vmx_free_vcpu(struct kvm_vcpu *vcpu) 6958 { 6959 struct vcpu_vmx *vmx = to_vmx(vcpu); 6960 6961 if (enable_pml) 6962 vmx_destroy_pml_buffer(vmx); 6963 free_vpid(vmx->vpid); 6964 nested_vmx_free_vcpu(vcpu); 6965 free_loaded_vmcs(vmx->loaded_vmcs); 6966 } 6967 6968 static int vmx_create_vcpu(struct kvm_vcpu *vcpu) 6969 { 6970 struct vmx_uret_msr *tsx_ctrl; 6971 struct vcpu_vmx *vmx; 6972 int i, err; 6973 6974 BUILD_BUG_ON(offsetof(struct vcpu_vmx, vcpu) != 0); 6975 vmx = to_vmx(vcpu); 6976 6977 INIT_LIST_HEAD(&vmx->pi_wakeup_list); 6978 6979 err = -ENOMEM; 6980 6981 vmx->vpid = allocate_vpid(); 6982 6983 /* 6984 * If PML is turned on, failure on enabling PML just results in failure 6985 * of creating the vcpu, therefore we can simplify PML logic (by 6986 * avoiding dealing with cases, such as enabling PML partially on vcpus 6987 * for the guest), etc. 6988 */ 6989 if (enable_pml) { 6990 vmx->pml_pg = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); 6991 if (!vmx->pml_pg) 6992 goto free_vpid; 6993 } 6994 6995 for (i = 0; i < kvm_nr_uret_msrs; ++i) 6996 vmx->guest_uret_msrs[i].mask = -1ull; 6997 if (boot_cpu_has(X86_FEATURE_RTM)) { 6998 /* 6999 * TSX_CTRL_CPUID_CLEAR is handled in the CPUID interception. 7000 * Keep the host value unchanged to avoid changing CPUID bits 7001 * under the host kernel's feet. 7002 */ 7003 tsx_ctrl = vmx_find_uret_msr(vmx, MSR_IA32_TSX_CTRL); 7004 if (tsx_ctrl) 7005 tsx_ctrl->mask = ~(u64)TSX_CTRL_CPUID_CLEAR; 7006 } 7007 7008 err = alloc_loaded_vmcs(&vmx->vmcs01); 7009 if (err < 0) 7010 goto free_pml; 7011 7012 /* 7013 * Use Hyper-V 'Enlightened MSR Bitmap' feature when KVM runs as a 7014 * nested (L1) hypervisor and Hyper-V in L0 supports it. Enable the 7015 * feature only for vmcs01, KVM currently isn't equipped to realize any 7016 * performance benefits from enabling it for vmcs02. 7017 */ 7018 if (IS_ENABLED(CONFIG_HYPERV) && static_branch_unlikely(&enable_evmcs) && 7019 (ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) { 7020 struct hv_enlightened_vmcs *evmcs = (void *)vmx->vmcs01.vmcs; 7021 7022 evmcs->hv_enlightenments_control.msr_bitmap = 1; 7023 } 7024 7025 /* The MSR bitmap starts with all ones */ 7026 bitmap_fill(vmx->shadow_msr_intercept.read, MAX_POSSIBLE_PASSTHROUGH_MSRS); 7027 bitmap_fill(vmx->shadow_msr_intercept.write, MAX_POSSIBLE_PASSTHROUGH_MSRS); 7028 7029 vmx_disable_intercept_for_msr(vcpu, MSR_IA32_TSC, MSR_TYPE_R); 7030 #ifdef CONFIG_X86_64 7031 vmx_disable_intercept_for_msr(vcpu, MSR_FS_BASE, MSR_TYPE_RW); 7032 vmx_disable_intercept_for_msr(vcpu, MSR_GS_BASE, MSR_TYPE_RW); 7033 vmx_disable_intercept_for_msr(vcpu, MSR_KERNEL_GS_BASE, MSR_TYPE_RW); 7034 #endif 7035 vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW); 7036 vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW); 7037 vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW); 7038 if (kvm_cstate_in_guest(vcpu->kvm)) { 7039 vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C1_RES, MSR_TYPE_R); 7040 vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C3_RESIDENCY, MSR_TYPE_R); 7041 vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C6_RESIDENCY, MSR_TYPE_R); 7042 vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C7_RESIDENCY, MSR_TYPE_R); 7043 } 7044 7045 vmx->loaded_vmcs = &vmx->vmcs01; 7046 7047 if (cpu_need_virtualize_apic_accesses(vcpu)) { 7048 err = alloc_apic_access_page(vcpu->kvm); 7049 if (err) 7050 goto free_vmcs; 7051 } 7052 7053 if (enable_ept && !enable_unrestricted_guest) { 7054 err = init_rmode_identity_map(vcpu->kvm); 7055 if (err) 7056 goto free_vmcs; 7057 } 7058 7059 return 0; 7060 7061 free_vmcs: 7062 free_loaded_vmcs(vmx->loaded_vmcs); 7063 free_pml: 7064 vmx_destroy_pml_buffer(vmx); 7065 free_vpid: 7066 free_vpid(vmx->vpid); 7067 return err; 7068 } 7069 7070 #define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n" 7071 #define L1TF_MSG_L1D "L1TF CPU bug present and virtualization mitigation disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n" 7072 7073 static int vmx_vm_init(struct kvm *kvm) 7074 { 7075 if (!ple_gap) 7076 kvm->arch.pause_in_guest = true; 7077 7078 if (boot_cpu_has(X86_BUG_L1TF) && enable_ept) { 7079 switch (l1tf_mitigation) { 7080 case L1TF_MITIGATION_OFF: 7081 case L1TF_MITIGATION_FLUSH_NOWARN: 7082 /* 'I explicitly don't care' is set */ 7083 break; 7084 case L1TF_MITIGATION_FLUSH: 7085 case L1TF_MITIGATION_FLUSH_NOSMT: 7086 case L1TF_MITIGATION_FULL: 7087 /* 7088 * Warn upon starting the first VM in a potentially 7089 * insecure environment. 7090 */ 7091 if (sched_smt_active()) 7092 pr_warn_once(L1TF_MSG_SMT); 7093 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER) 7094 pr_warn_once(L1TF_MSG_L1D); 7095 break; 7096 case L1TF_MITIGATION_FULL_FORCE: 7097 /* Flush is enforced */ 7098 break; 7099 } 7100 } 7101 return 0; 7102 } 7103 7104 static int __init vmx_check_processor_compat(void) 7105 { 7106 struct vmcs_config vmcs_conf; 7107 struct vmx_capability vmx_cap; 7108 7109 if (!this_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) || 7110 !this_cpu_has(X86_FEATURE_VMX)) { 7111 pr_err("kvm: VMX is disabled on CPU %d\n", smp_processor_id()); 7112 return -EIO; 7113 } 7114 7115 if (setup_vmcs_config(&vmcs_conf, &vmx_cap) < 0) 7116 return -EIO; 7117 if (nested) 7118 nested_vmx_setup_ctls_msrs(&vmcs_conf.nested, vmx_cap.ept); 7119 if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) { 7120 printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n", 7121 smp_processor_id()); 7122 return -EIO; 7123 } 7124 return 0; 7125 } 7126 7127 static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) 7128 { 7129 u8 cache; 7130 7131 /* We wanted to honor guest CD/MTRR/PAT, but doing so could result in 7132 * memory aliases with conflicting memory types and sometimes MCEs. 7133 * We have to be careful as to what are honored and when. 7134 * 7135 * For MMIO, guest CD/MTRR are ignored. The EPT memory type is set to 7136 * UC. The effective memory type is UC or WC depending on guest PAT. 7137 * This was historically the source of MCEs and we want to be 7138 * conservative. 7139 * 7140 * When there is no need to deal with noncoherent DMA (e.g., no VT-d 7141 * or VT-d has snoop control), guest CD/MTRR/PAT are all ignored. The 7142 * EPT memory type is set to WB. The effective memory type is forced 7143 * WB. 7144 * 7145 * Otherwise, we trust guest. Guest CD/MTRR/PAT are all honored. The 7146 * EPT memory type is used to emulate guest CD/MTRR. 7147 */ 7148 7149 if (is_mmio) 7150 return MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT; 7151 7152 if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) 7153 return (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT) | VMX_EPT_IPAT_BIT; 7154 7155 if (kvm_read_cr0(vcpu) & X86_CR0_CD) { 7156 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) 7157 cache = MTRR_TYPE_WRBACK; 7158 else 7159 cache = MTRR_TYPE_UNCACHABLE; 7160 7161 return (cache << VMX_EPT_MT_EPTE_SHIFT) | VMX_EPT_IPAT_BIT; 7162 } 7163 7164 return kvm_mtrr_get_guest_memory_type(vcpu, gfn) << VMX_EPT_MT_EPTE_SHIFT; 7165 } 7166 7167 static void vmcs_set_secondary_exec_control(struct vcpu_vmx *vmx, u32 new_ctl) 7168 { 7169 /* 7170 * These bits in the secondary execution controls field 7171 * are dynamic, the others are mostly based on the hypervisor 7172 * architecture and the guest's CPUID. Do not touch the 7173 * dynamic bits. 7174 */ 7175 u32 mask = 7176 SECONDARY_EXEC_SHADOW_VMCS | 7177 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 7178 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 7179 SECONDARY_EXEC_DESC; 7180 7181 u32 cur_ctl = secondary_exec_controls_get(vmx); 7182 7183 secondary_exec_controls_set(vmx, (new_ctl & ~mask) | (cur_ctl & mask)); 7184 } 7185 7186 /* 7187 * Generate MSR_IA32_VMX_CR{0,4}_FIXED1 according to CPUID. Only set bits 7188 * (indicating "allowed-1") if they are supported in the guest's CPUID. 7189 */ 7190 static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu) 7191 { 7192 struct vcpu_vmx *vmx = to_vmx(vcpu); 7193 struct kvm_cpuid_entry2 *entry; 7194 7195 vmx->nested.msrs.cr0_fixed1 = 0xffffffff; 7196 vmx->nested.msrs.cr4_fixed1 = X86_CR4_PCE; 7197 7198 #define cr4_fixed1_update(_cr4_mask, _reg, _cpuid_mask) do { \ 7199 if (entry && (entry->_reg & (_cpuid_mask))) \ 7200 vmx->nested.msrs.cr4_fixed1 |= (_cr4_mask); \ 7201 } while (0) 7202 7203 entry = kvm_find_cpuid_entry(vcpu, 0x1, 0); 7204 cr4_fixed1_update(X86_CR4_VME, edx, feature_bit(VME)); 7205 cr4_fixed1_update(X86_CR4_PVI, edx, feature_bit(VME)); 7206 cr4_fixed1_update(X86_CR4_TSD, edx, feature_bit(TSC)); 7207 cr4_fixed1_update(X86_CR4_DE, edx, feature_bit(DE)); 7208 cr4_fixed1_update(X86_CR4_PSE, edx, feature_bit(PSE)); 7209 cr4_fixed1_update(X86_CR4_PAE, edx, feature_bit(PAE)); 7210 cr4_fixed1_update(X86_CR4_MCE, edx, feature_bit(MCE)); 7211 cr4_fixed1_update(X86_CR4_PGE, edx, feature_bit(PGE)); 7212 cr4_fixed1_update(X86_CR4_OSFXSR, edx, feature_bit(FXSR)); 7213 cr4_fixed1_update(X86_CR4_OSXMMEXCPT, edx, feature_bit(XMM)); 7214 cr4_fixed1_update(X86_CR4_VMXE, ecx, feature_bit(VMX)); 7215 cr4_fixed1_update(X86_CR4_SMXE, ecx, feature_bit(SMX)); 7216 cr4_fixed1_update(X86_CR4_PCIDE, ecx, feature_bit(PCID)); 7217 cr4_fixed1_update(X86_CR4_OSXSAVE, ecx, feature_bit(XSAVE)); 7218 7219 entry = kvm_find_cpuid_entry(vcpu, 0x7, 0); 7220 cr4_fixed1_update(X86_CR4_FSGSBASE, ebx, feature_bit(FSGSBASE)); 7221 cr4_fixed1_update(X86_CR4_SMEP, ebx, feature_bit(SMEP)); 7222 cr4_fixed1_update(X86_CR4_SMAP, ebx, feature_bit(SMAP)); 7223 cr4_fixed1_update(X86_CR4_PKE, ecx, feature_bit(PKU)); 7224 cr4_fixed1_update(X86_CR4_UMIP, ecx, feature_bit(UMIP)); 7225 cr4_fixed1_update(X86_CR4_LA57, ecx, feature_bit(LA57)); 7226 7227 #undef cr4_fixed1_update 7228 } 7229 7230 static void nested_vmx_entry_exit_ctls_update(struct kvm_vcpu *vcpu) 7231 { 7232 struct vcpu_vmx *vmx = to_vmx(vcpu); 7233 7234 if (kvm_mpx_supported()) { 7235 bool mpx_enabled = guest_cpuid_has(vcpu, X86_FEATURE_MPX); 7236 7237 if (mpx_enabled) { 7238 vmx->nested.msrs.entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS; 7239 vmx->nested.msrs.exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS; 7240 } else { 7241 vmx->nested.msrs.entry_ctls_high &= ~VM_ENTRY_LOAD_BNDCFGS; 7242 vmx->nested.msrs.exit_ctls_high &= ~VM_EXIT_CLEAR_BNDCFGS; 7243 } 7244 } 7245 } 7246 7247 static void update_intel_pt_cfg(struct kvm_vcpu *vcpu) 7248 { 7249 struct vcpu_vmx *vmx = to_vmx(vcpu); 7250 struct kvm_cpuid_entry2 *best = NULL; 7251 int i; 7252 7253 for (i = 0; i < PT_CPUID_LEAVES; i++) { 7254 best = kvm_find_cpuid_entry(vcpu, 0x14, i); 7255 if (!best) 7256 return; 7257 vmx->pt_desc.caps[CPUID_EAX + i*PT_CPUID_REGS_NUM] = best->eax; 7258 vmx->pt_desc.caps[CPUID_EBX + i*PT_CPUID_REGS_NUM] = best->ebx; 7259 vmx->pt_desc.caps[CPUID_ECX + i*PT_CPUID_REGS_NUM] = best->ecx; 7260 vmx->pt_desc.caps[CPUID_EDX + i*PT_CPUID_REGS_NUM] = best->edx; 7261 } 7262 7263 /* Get the number of configurable Address Ranges for filtering */ 7264 vmx->pt_desc.num_address_ranges = intel_pt_validate_cap(vmx->pt_desc.caps, 7265 PT_CAP_num_address_ranges); 7266 7267 /* Initialize and clear the no dependency bits */ 7268 vmx->pt_desc.ctl_bitmask = ~(RTIT_CTL_TRACEEN | RTIT_CTL_OS | 7269 RTIT_CTL_USR | RTIT_CTL_TSC_EN | RTIT_CTL_DISRETC | 7270 RTIT_CTL_BRANCH_EN); 7271 7272 /* 7273 * If CPUID.(EAX=14H,ECX=0):EBX[0]=1 CR3Filter can be set otherwise 7274 * will inject an #GP 7275 */ 7276 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_cr3_filtering)) 7277 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_CR3EN; 7278 7279 /* 7280 * If CPUID.(EAX=14H,ECX=0):EBX[1]=1 CYCEn, CycThresh and 7281 * PSBFreq can be set 7282 */ 7283 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc)) 7284 vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_CYCLEACC | 7285 RTIT_CTL_CYC_THRESH | RTIT_CTL_PSB_FREQ); 7286 7287 /* 7288 * If CPUID.(EAX=14H,ECX=0):EBX[3]=1 MTCEn and MTCFreq can be set 7289 */ 7290 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc)) 7291 vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_MTC_EN | 7292 RTIT_CTL_MTC_RANGE); 7293 7294 /* If CPUID.(EAX=14H,ECX=0):EBX[4]=1 FUPonPTW and PTWEn can be set */ 7295 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_ptwrite)) 7296 vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_FUP_ON_PTW | 7297 RTIT_CTL_PTW_EN); 7298 7299 /* If CPUID.(EAX=14H,ECX=0):EBX[5]=1 PwrEvEn can be set */ 7300 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_power_event_trace)) 7301 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_PWR_EVT_EN; 7302 7303 /* If CPUID.(EAX=14H,ECX=0):ECX[0]=1 ToPA can be set */ 7304 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_topa_output)) 7305 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_TOPA; 7306 7307 /* If CPUID.(EAX=14H,ECX=0):ECX[3]=1 FabricEn can be set */ 7308 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_output_subsys)) 7309 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_FABRIC_EN; 7310 7311 /* unmask address range configure area */ 7312 for (i = 0; i < vmx->pt_desc.num_address_ranges; i++) 7313 vmx->pt_desc.ctl_bitmask &= ~(0xfULL << (32 + i * 4)); 7314 } 7315 7316 static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) 7317 { 7318 struct vcpu_vmx *vmx = to_vmx(vcpu); 7319 7320 /* xsaves_enabled is recomputed in vmx_compute_secondary_exec_control(). */ 7321 vcpu->arch.xsaves_enabled = false; 7322 7323 vmx_setup_uret_msrs(vmx); 7324 7325 if (cpu_has_secondary_exec_ctrls()) 7326 vmcs_set_secondary_exec_control(vmx, 7327 vmx_secondary_exec_control(vmx)); 7328 7329 if (nested_vmx_allowed(vcpu)) 7330 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |= 7331 FEAT_CTL_VMX_ENABLED_INSIDE_SMX | 7332 FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX; 7333 else 7334 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &= 7335 ~(FEAT_CTL_VMX_ENABLED_INSIDE_SMX | 7336 FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX); 7337 7338 if (nested_vmx_allowed(vcpu)) { 7339 nested_vmx_cr_fixed1_bits_update(vcpu); 7340 nested_vmx_entry_exit_ctls_update(vcpu); 7341 } 7342 7343 if (boot_cpu_has(X86_FEATURE_INTEL_PT) && 7344 guest_cpuid_has(vcpu, X86_FEATURE_INTEL_PT)) 7345 update_intel_pt_cfg(vcpu); 7346 7347 if (boot_cpu_has(X86_FEATURE_RTM)) { 7348 struct vmx_uret_msr *msr; 7349 msr = vmx_find_uret_msr(vmx, MSR_IA32_TSX_CTRL); 7350 if (msr) { 7351 bool enabled = guest_cpuid_has(vcpu, X86_FEATURE_RTM); 7352 vmx_set_guest_uret_msr(vmx, msr, enabled ? 0 : TSX_CTRL_RTM_DISABLE); 7353 } 7354 } 7355 7356 if (kvm_cpu_cap_has(X86_FEATURE_XFD)) 7357 vmx_set_intercept_for_msr(vcpu, MSR_IA32_XFD_ERR, MSR_TYPE_R, 7358 !guest_cpuid_has(vcpu, X86_FEATURE_XFD)); 7359 7360 7361 set_cr4_guest_host_mask(vmx); 7362 7363 vmx_write_encls_bitmap(vcpu, NULL); 7364 if (guest_cpuid_has(vcpu, X86_FEATURE_SGX)) 7365 vmx->msr_ia32_feature_control_valid_bits |= FEAT_CTL_SGX_ENABLED; 7366 else 7367 vmx->msr_ia32_feature_control_valid_bits &= ~FEAT_CTL_SGX_ENABLED; 7368 7369 if (guest_cpuid_has(vcpu, X86_FEATURE_SGX_LC)) 7370 vmx->msr_ia32_feature_control_valid_bits |= 7371 FEAT_CTL_SGX_LC_ENABLED; 7372 else 7373 vmx->msr_ia32_feature_control_valid_bits &= 7374 ~FEAT_CTL_SGX_LC_ENABLED; 7375 7376 /* Refresh #PF interception to account for MAXPHYADDR changes. */ 7377 vmx_update_exception_bitmap(vcpu); 7378 } 7379 7380 static __init void vmx_set_cpu_caps(void) 7381 { 7382 kvm_set_cpu_caps(); 7383 7384 /* CPUID 0x1 */ 7385 if (nested) 7386 kvm_cpu_cap_set(X86_FEATURE_VMX); 7387 7388 /* CPUID 0x7 */ 7389 if (kvm_mpx_supported()) 7390 kvm_cpu_cap_check_and_set(X86_FEATURE_MPX); 7391 if (!cpu_has_vmx_invpcid()) 7392 kvm_cpu_cap_clear(X86_FEATURE_INVPCID); 7393 if (vmx_pt_mode_is_host_guest()) 7394 kvm_cpu_cap_check_and_set(X86_FEATURE_INTEL_PT); 7395 7396 if (!enable_sgx) { 7397 kvm_cpu_cap_clear(X86_FEATURE_SGX); 7398 kvm_cpu_cap_clear(X86_FEATURE_SGX_LC); 7399 kvm_cpu_cap_clear(X86_FEATURE_SGX1); 7400 kvm_cpu_cap_clear(X86_FEATURE_SGX2); 7401 } 7402 7403 if (vmx_umip_emulated()) 7404 kvm_cpu_cap_set(X86_FEATURE_UMIP); 7405 7406 /* CPUID 0xD.1 */ 7407 supported_xss = 0; 7408 if (!cpu_has_vmx_xsaves()) 7409 kvm_cpu_cap_clear(X86_FEATURE_XSAVES); 7410 7411 /* CPUID 0x80000001 and 0x7 (RDPID) */ 7412 if (!cpu_has_vmx_rdtscp()) { 7413 kvm_cpu_cap_clear(X86_FEATURE_RDTSCP); 7414 kvm_cpu_cap_clear(X86_FEATURE_RDPID); 7415 } 7416 7417 if (cpu_has_vmx_waitpkg()) 7418 kvm_cpu_cap_check_and_set(X86_FEATURE_WAITPKG); 7419 } 7420 7421 static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu) 7422 { 7423 to_vmx(vcpu)->req_immediate_exit = true; 7424 } 7425 7426 static int vmx_check_intercept_io(struct kvm_vcpu *vcpu, 7427 struct x86_instruction_info *info) 7428 { 7429 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 7430 unsigned short port; 7431 bool intercept; 7432 int size; 7433 7434 if (info->intercept == x86_intercept_in || 7435 info->intercept == x86_intercept_ins) { 7436 port = info->src_val; 7437 size = info->dst_bytes; 7438 } else { 7439 port = info->dst_val; 7440 size = info->src_bytes; 7441 } 7442 7443 /* 7444 * If the 'use IO bitmaps' VM-execution control is 0, IO instruction 7445 * VM-exits depend on the 'unconditional IO exiting' VM-execution 7446 * control. 7447 * 7448 * Otherwise, IO instruction VM-exits are controlled by the IO bitmaps. 7449 */ 7450 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) 7451 intercept = nested_cpu_has(vmcs12, 7452 CPU_BASED_UNCOND_IO_EXITING); 7453 else 7454 intercept = nested_vmx_check_io_bitmaps(vcpu, port, size); 7455 7456 /* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED. */ 7457 return intercept ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; 7458 } 7459 7460 static int vmx_check_intercept(struct kvm_vcpu *vcpu, 7461 struct x86_instruction_info *info, 7462 enum x86_intercept_stage stage, 7463 struct x86_exception *exception) 7464 { 7465 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 7466 7467 switch (info->intercept) { 7468 /* 7469 * RDPID causes #UD if disabled through secondary execution controls. 7470 * Because it is marked as EmulateOnUD, we need to intercept it here. 7471 * Note, RDPID is hidden behind ENABLE_RDTSCP. 7472 */ 7473 case x86_intercept_rdpid: 7474 if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_RDTSCP)) { 7475 exception->vector = UD_VECTOR; 7476 exception->error_code_valid = false; 7477 return X86EMUL_PROPAGATE_FAULT; 7478 } 7479 break; 7480 7481 case x86_intercept_in: 7482 case x86_intercept_ins: 7483 case x86_intercept_out: 7484 case x86_intercept_outs: 7485 return vmx_check_intercept_io(vcpu, info); 7486 7487 case x86_intercept_lgdt: 7488 case x86_intercept_lidt: 7489 case x86_intercept_lldt: 7490 case x86_intercept_ltr: 7491 case x86_intercept_sgdt: 7492 case x86_intercept_sidt: 7493 case x86_intercept_sldt: 7494 case x86_intercept_str: 7495 if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC)) 7496 return X86EMUL_CONTINUE; 7497 7498 /* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED. */ 7499 break; 7500 7501 /* TODO: check more intercepts... */ 7502 default: 7503 break; 7504 } 7505 7506 return X86EMUL_UNHANDLEABLE; 7507 } 7508 7509 #ifdef CONFIG_X86_64 7510 /* (a << shift) / divisor, return 1 if overflow otherwise 0 */ 7511 static inline int u64_shl_div_u64(u64 a, unsigned int shift, 7512 u64 divisor, u64 *result) 7513 { 7514 u64 low = a << shift, high = a >> (64 - shift); 7515 7516 /* To avoid the overflow on divq */ 7517 if (high >= divisor) 7518 return 1; 7519 7520 /* Low hold the result, high hold rem which is discarded */ 7521 asm("divq %2\n\t" : "=a" (low), "=d" (high) : 7522 "rm" (divisor), "0" (low), "1" (high)); 7523 *result = low; 7524 7525 return 0; 7526 } 7527 7528 static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc, 7529 bool *expired) 7530 { 7531 struct vcpu_vmx *vmx; 7532 u64 tscl, guest_tscl, delta_tsc, lapic_timer_advance_cycles; 7533 struct kvm_timer *ktimer = &vcpu->arch.apic->lapic_timer; 7534 7535 vmx = to_vmx(vcpu); 7536 tscl = rdtsc(); 7537 guest_tscl = kvm_read_l1_tsc(vcpu, tscl); 7538 delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl; 7539 lapic_timer_advance_cycles = nsec_to_cycles(vcpu, 7540 ktimer->timer_advance_ns); 7541 7542 if (delta_tsc > lapic_timer_advance_cycles) 7543 delta_tsc -= lapic_timer_advance_cycles; 7544 else 7545 delta_tsc = 0; 7546 7547 /* Convert to host delta tsc if tsc scaling is enabled */ 7548 if (vcpu->arch.l1_tsc_scaling_ratio != kvm_default_tsc_scaling_ratio && 7549 delta_tsc && u64_shl_div_u64(delta_tsc, 7550 kvm_tsc_scaling_ratio_frac_bits, 7551 vcpu->arch.l1_tsc_scaling_ratio, &delta_tsc)) 7552 return -ERANGE; 7553 7554 /* 7555 * If the delta tsc can't fit in the 32 bit after the multi shift, 7556 * we can't use the preemption timer. 7557 * It's possible that it fits on later vmentries, but checking 7558 * on every vmentry is costly so we just use an hrtimer. 7559 */ 7560 if (delta_tsc >> (cpu_preemption_timer_multi + 32)) 7561 return -ERANGE; 7562 7563 vmx->hv_deadline_tsc = tscl + delta_tsc; 7564 *expired = !delta_tsc; 7565 return 0; 7566 } 7567 7568 static void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu) 7569 { 7570 to_vmx(vcpu)->hv_deadline_tsc = -1; 7571 } 7572 #endif 7573 7574 static void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu) 7575 { 7576 if (!kvm_pause_in_guest(vcpu->kvm)) 7577 shrink_ple_window(vcpu); 7578 } 7579 7580 void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu) 7581 { 7582 struct vcpu_vmx *vmx = to_vmx(vcpu); 7583 7584 if (is_guest_mode(vcpu)) { 7585 vmx->nested.update_vmcs01_cpu_dirty_logging = true; 7586 return; 7587 } 7588 7589 /* 7590 * Note, cpu_dirty_logging_count can be changed concurrent with this 7591 * code, but in that case another update request will be made and so 7592 * the guest will never run with a stale PML value. 7593 */ 7594 if (vcpu->kvm->arch.cpu_dirty_logging_count) 7595 secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_ENABLE_PML); 7596 else 7597 secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_ENABLE_PML); 7598 } 7599 7600 static void vmx_setup_mce(struct kvm_vcpu *vcpu) 7601 { 7602 if (vcpu->arch.mcg_cap & MCG_LMCE_P) 7603 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |= 7604 FEAT_CTL_LMCE_ENABLED; 7605 else 7606 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &= 7607 ~FEAT_CTL_LMCE_ENABLED; 7608 } 7609 7610 static int vmx_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection) 7611 { 7612 /* we need a nested vmexit to enter SMM, postpone if run is pending */ 7613 if (to_vmx(vcpu)->nested.nested_run_pending) 7614 return -EBUSY; 7615 return !is_smm(vcpu); 7616 } 7617 7618 static int vmx_enter_smm(struct kvm_vcpu *vcpu, char *smstate) 7619 { 7620 struct vcpu_vmx *vmx = to_vmx(vcpu); 7621 7622 vmx->nested.smm.guest_mode = is_guest_mode(vcpu); 7623 if (vmx->nested.smm.guest_mode) 7624 nested_vmx_vmexit(vcpu, -1, 0, 0); 7625 7626 vmx->nested.smm.vmxon = vmx->nested.vmxon; 7627 vmx->nested.vmxon = false; 7628 vmx_clear_hlt(vcpu); 7629 return 0; 7630 } 7631 7632 static int vmx_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) 7633 { 7634 struct vcpu_vmx *vmx = to_vmx(vcpu); 7635 int ret; 7636 7637 if (vmx->nested.smm.vmxon) { 7638 vmx->nested.vmxon = true; 7639 vmx->nested.smm.vmxon = false; 7640 } 7641 7642 if (vmx->nested.smm.guest_mode) { 7643 ret = nested_vmx_enter_non_root_mode(vcpu, false); 7644 if (ret) 7645 return ret; 7646 7647 vmx->nested.smm.guest_mode = false; 7648 } 7649 return 0; 7650 } 7651 7652 static void vmx_enable_smi_window(struct kvm_vcpu *vcpu) 7653 { 7654 /* RSM will cause a vmexit anyway. */ 7655 } 7656 7657 static bool vmx_apic_init_signal_blocked(struct kvm_vcpu *vcpu) 7658 { 7659 return to_vmx(vcpu)->nested.vmxon && !is_guest_mode(vcpu); 7660 } 7661 7662 static void vmx_migrate_timers(struct kvm_vcpu *vcpu) 7663 { 7664 if (is_guest_mode(vcpu)) { 7665 struct hrtimer *timer = &to_vmx(vcpu)->nested.preemption_timer; 7666 7667 if (hrtimer_try_to_cancel(timer) == 1) 7668 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED); 7669 } 7670 } 7671 7672 static void hardware_unsetup(void) 7673 { 7674 kvm_set_posted_intr_wakeup_handler(NULL); 7675 7676 if (nested) 7677 nested_vmx_hardware_unsetup(); 7678 7679 free_kvm_area(); 7680 } 7681 7682 static bool vmx_check_apicv_inhibit_reasons(ulong bit) 7683 { 7684 ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) | 7685 BIT(APICV_INHIBIT_REASON_ABSENT) | 7686 BIT(APICV_INHIBIT_REASON_HYPERV) | 7687 BIT(APICV_INHIBIT_REASON_BLOCKIRQ); 7688 7689 return supported & BIT(bit); 7690 } 7691 7692 static struct kvm_x86_ops vmx_x86_ops __initdata = { 7693 .name = "kvm_intel", 7694 7695 .hardware_unsetup = hardware_unsetup, 7696 7697 .hardware_enable = hardware_enable, 7698 .hardware_disable = hardware_disable, 7699 .cpu_has_accelerated_tpr = report_flexpriority, 7700 .has_emulated_msr = vmx_has_emulated_msr, 7701 7702 .vm_size = sizeof(struct kvm_vmx), 7703 .vm_init = vmx_vm_init, 7704 7705 .vcpu_create = vmx_create_vcpu, 7706 .vcpu_free = vmx_free_vcpu, 7707 .vcpu_reset = vmx_vcpu_reset, 7708 7709 .prepare_guest_switch = vmx_prepare_switch_to_guest, 7710 .vcpu_load = vmx_vcpu_load, 7711 .vcpu_put = vmx_vcpu_put, 7712 7713 .update_exception_bitmap = vmx_update_exception_bitmap, 7714 .get_msr_feature = vmx_get_msr_feature, 7715 .get_msr = vmx_get_msr, 7716 .set_msr = vmx_set_msr, 7717 .get_segment_base = vmx_get_segment_base, 7718 .get_segment = vmx_get_segment, 7719 .set_segment = vmx_set_segment, 7720 .get_cpl = vmx_get_cpl, 7721 .get_cs_db_l_bits = vmx_get_cs_db_l_bits, 7722 .set_cr0 = vmx_set_cr0, 7723 .is_valid_cr4 = vmx_is_valid_cr4, 7724 .set_cr4 = vmx_set_cr4, 7725 .set_efer = vmx_set_efer, 7726 .get_idt = vmx_get_idt, 7727 .set_idt = vmx_set_idt, 7728 .get_gdt = vmx_get_gdt, 7729 .set_gdt = vmx_set_gdt, 7730 .set_dr7 = vmx_set_dr7, 7731 .sync_dirty_debug_regs = vmx_sync_dirty_debug_regs, 7732 .cache_reg = vmx_cache_reg, 7733 .get_rflags = vmx_get_rflags, 7734 .set_rflags = vmx_set_rflags, 7735 .get_if_flag = vmx_get_if_flag, 7736 7737 .tlb_flush_all = vmx_flush_tlb_all, 7738 .tlb_flush_current = vmx_flush_tlb_current, 7739 .tlb_flush_gva = vmx_flush_tlb_gva, 7740 .tlb_flush_guest = vmx_flush_tlb_guest, 7741 7742 .vcpu_pre_run = vmx_vcpu_pre_run, 7743 .run = vmx_vcpu_run, 7744 .handle_exit = vmx_handle_exit, 7745 .skip_emulated_instruction = vmx_skip_emulated_instruction, 7746 .update_emulated_instruction = vmx_update_emulated_instruction, 7747 .set_interrupt_shadow = vmx_set_interrupt_shadow, 7748 .get_interrupt_shadow = vmx_get_interrupt_shadow, 7749 .patch_hypercall = vmx_patch_hypercall, 7750 .set_irq = vmx_inject_irq, 7751 .set_nmi = vmx_inject_nmi, 7752 .queue_exception = vmx_queue_exception, 7753 .cancel_injection = vmx_cancel_injection, 7754 .interrupt_allowed = vmx_interrupt_allowed, 7755 .nmi_allowed = vmx_nmi_allowed, 7756 .get_nmi_mask = vmx_get_nmi_mask, 7757 .set_nmi_mask = vmx_set_nmi_mask, 7758 .enable_nmi_window = vmx_enable_nmi_window, 7759 .enable_irq_window = vmx_enable_irq_window, 7760 .update_cr8_intercept = vmx_update_cr8_intercept, 7761 .set_virtual_apic_mode = vmx_set_virtual_apic_mode, 7762 .set_apic_access_page_addr = vmx_set_apic_access_page_addr, 7763 .refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl, 7764 .load_eoi_exitmap = vmx_load_eoi_exitmap, 7765 .apicv_post_state_restore = vmx_apicv_post_state_restore, 7766 .check_apicv_inhibit_reasons = vmx_check_apicv_inhibit_reasons, 7767 .hwapic_irr_update = vmx_hwapic_irr_update, 7768 .hwapic_isr_update = vmx_hwapic_isr_update, 7769 .guest_apic_has_interrupt = vmx_guest_apic_has_interrupt, 7770 .sync_pir_to_irr = vmx_sync_pir_to_irr, 7771 .deliver_posted_interrupt = vmx_deliver_posted_interrupt, 7772 .dy_apicv_has_pending_interrupt = pi_has_pending_interrupt, 7773 7774 .set_tss_addr = vmx_set_tss_addr, 7775 .set_identity_map_addr = vmx_set_identity_map_addr, 7776 .get_mt_mask = vmx_get_mt_mask, 7777 7778 .get_exit_info = vmx_get_exit_info, 7779 7780 .vcpu_after_set_cpuid = vmx_vcpu_after_set_cpuid, 7781 7782 .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit, 7783 7784 .get_l2_tsc_offset = vmx_get_l2_tsc_offset, 7785 .get_l2_tsc_multiplier = vmx_get_l2_tsc_multiplier, 7786 .write_tsc_offset = vmx_write_tsc_offset, 7787 .write_tsc_multiplier = vmx_write_tsc_multiplier, 7788 7789 .load_mmu_pgd = vmx_load_mmu_pgd, 7790 7791 .check_intercept = vmx_check_intercept, 7792 .handle_exit_irqoff = vmx_handle_exit_irqoff, 7793 7794 .request_immediate_exit = vmx_request_immediate_exit, 7795 7796 .sched_in = vmx_sched_in, 7797 7798 .cpu_dirty_log_size = PML_ENTITY_NUM, 7799 .update_cpu_dirty_logging = vmx_update_cpu_dirty_logging, 7800 7801 .pmu_ops = &intel_pmu_ops, 7802 .nested_ops = &vmx_nested_ops, 7803 7804 .update_pi_irte = pi_update_irte, 7805 .start_assignment = vmx_pi_start_assignment, 7806 7807 #ifdef CONFIG_X86_64 7808 .set_hv_timer = vmx_set_hv_timer, 7809 .cancel_hv_timer = vmx_cancel_hv_timer, 7810 #endif 7811 7812 .setup_mce = vmx_setup_mce, 7813 7814 .smi_allowed = vmx_smi_allowed, 7815 .enter_smm = vmx_enter_smm, 7816 .leave_smm = vmx_leave_smm, 7817 .enable_smi_window = vmx_enable_smi_window, 7818 7819 .can_emulate_instruction = vmx_can_emulate_instruction, 7820 .apic_init_signal_blocked = vmx_apic_init_signal_blocked, 7821 .migrate_timers = vmx_migrate_timers, 7822 7823 .msr_filter_changed = vmx_msr_filter_changed, 7824 .complete_emulated_msr = kvm_complete_insn_gp, 7825 7826 .vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector, 7827 }; 7828 7829 static unsigned int vmx_handle_intel_pt_intr(void) 7830 { 7831 struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); 7832 7833 /* '0' on failure so that the !PT case can use a RET0 static call. */ 7834 if (!kvm_arch_pmi_in_guest(vcpu)) 7835 return 0; 7836 7837 kvm_make_request(KVM_REQ_PMI, vcpu); 7838 __set_bit(MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI_BIT, 7839 (unsigned long *)&vcpu->arch.pmu.global_status); 7840 return 1; 7841 } 7842 7843 static __init void vmx_setup_user_return_msrs(void) 7844 { 7845 7846 /* 7847 * Though SYSCALL is only supported in 64-bit mode on Intel CPUs, kvm 7848 * will emulate SYSCALL in legacy mode if the vendor string in guest 7849 * CPUID.0:{EBX,ECX,EDX} is "AuthenticAMD" or "AMDisbetter!" To 7850 * support this emulation, MSR_STAR is included in the list for i386, 7851 * but is never loaded into hardware. MSR_CSTAR is also never loaded 7852 * into hardware and is here purely for emulation purposes. 7853 */ 7854 const u32 vmx_uret_msrs_list[] = { 7855 #ifdef CONFIG_X86_64 7856 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, 7857 #endif 7858 MSR_EFER, MSR_TSC_AUX, MSR_STAR, 7859 MSR_IA32_TSX_CTRL, 7860 }; 7861 int i; 7862 7863 BUILD_BUG_ON(ARRAY_SIZE(vmx_uret_msrs_list) != MAX_NR_USER_RETURN_MSRS); 7864 7865 for (i = 0; i < ARRAY_SIZE(vmx_uret_msrs_list); ++i) 7866 kvm_add_user_return_msr(vmx_uret_msrs_list[i]); 7867 } 7868 7869 static struct kvm_x86_init_ops vmx_init_ops __initdata; 7870 7871 static __init int hardware_setup(void) 7872 { 7873 unsigned long host_bndcfgs; 7874 struct desc_ptr dt; 7875 int r; 7876 7877 store_idt(&dt); 7878 host_idt_base = dt.address; 7879 7880 vmx_setup_user_return_msrs(); 7881 7882 if (setup_vmcs_config(&vmcs_config, &vmx_capability) < 0) 7883 return -EIO; 7884 7885 if (boot_cpu_has(X86_FEATURE_NX)) 7886 kvm_enable_efer_bits(EFER_NX); 7887 7888 if (boot_cpu_has(X86_FEATURE_MPX)) { 7889 rdmsrl(MSR_IA32_BNDCFGS, host_bndcfgs); 7890 WARN_ONCE(host_bndcfgs, "KVM: BNDCFGS in host will be lost"); 7891 } 7892 7893 if (!cpu_has_vmx_mpx()) 7894 supported_xcr0 &= ~(XFEATURE_MASK_BNDREGS | 7895 XFEATURE_MASK_BNDCSR); 7896 7897 if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() || 7898 !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global())) 7899 enable_vpid = 0; 7900 7901 if (!cpu_has_vmx_ept() || 7902 !cpu_has_vmx_ept_4levels() || 7903 !cpu_has_vmx_ept_mt_wb() || 7904 !cpu_has_vmx_invept_global()) 7905 enable_ept = 0; 7906 7907 /* NX support is required for shadow paging. */ 7908 if (!enable_ept && !boot_cpu_has(X86_FEATURE_NX)) { 7909 pr_err_ratelimited("kvm: NX (Execute Disable) not supported\n"); 7910 return -EOPNOTSUPP; 7911 } 7912 7913 if (!cpu_has_vmx_ept_ad_bits() || !enable_ept) 7914 enable_ept_ad_bits = 0; 7915 7916 if (!cpu_has_vmx_unrestricted_guest() || !enable_ept) 7917 enable_unrestricted_guest = 0; 7918 7919 if (!cpu_has_vmx_flexpriority()) 7920 flexpriority_enabled = 0; 7921 7922 if (!cpu_has_virtual_nmis()) 7923 enable_vnmi = 0; 7924 7925 /* 7926 * set_apic_access_page_addr() is used to reload apic access 7927 * page upon invalidation. No need to do anything if not 7928 * using the APIC_ACCESS_ADDR VMCS field. 7929 */ 7930 if (!flexpriority_enabled) 7931 vmx_x86_ops.set_apic_access_page_addr = NULL; 7932 7933 if (!cpu_has_vmx_tpr_shadow()) 7934 vmx_x86_ops.update_cr8_intercept = NULL; 7935 7936 #if IS_ENABLED(CONFIG_HYPERV) 7937 if (ms_hyperv.nested_features & HV_X64_NESTED_GUEST_MAPPING_FLUSH 7938 && enable_ept) { 7939 vmx_x86_ops.tlb_remote_flush = hv_remote_flush_tlb; 7940 vmx_x86_ops.tlb_remote_flush_with_range = 7941 hv_remote_flush_tlb_with_range; 7942 } 7943 #endif 7944 7945 if (!cpu_has_vmx_ple()) { 7946 ple_gap = 0; 7947 ple_window = 0; 7948 ple_window_grow = 0; 7949 ple_window_max = 0; 7950 ple_window_shrink = 0; 7951 } 7952 7953 if (!cpu_has_vmx_apicv()) 7954 enable_apicv = 0; 7955 if (!enable_apicv) 7956 vmx_x86_ops.sync_pir_to_irr = NULL; 7957 7958 if (cpu_has_vmx_tsc_scaling()) { 7959 kvm_has_tsc_control = true; 7960 kvm_max_tsc_scaling_ratio = KVM_VMX_TSC_MULTIPLIER_MAX; 7961 kvm_tsc_scaling_ratio_frac_bits = 48; 7962 } 7963 7964 kvm_has_bus_lock_exit = cpu_has_vmx_bus_lock_detection(); 7965 7966 set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */ 7967 7968 if (enable_ept) 7969 kvm_mmu_set_ept_masks(enable_ept_ad_bits, 7970 cpu_has_vmx_ept_execute_only()); 7971 7972 kvm_configure_mmu(enable_ept, 0, vmx_get_max_tdp_level(), 7973 ept_caps_to_lpage_level(vmx_capability.ept)); 7974 7975 /* 7976 * Only enable PML when hardware supports PML feature, and both EPT 7977 * and EPT A/D bit features are enabled -- PML depends on them to work. 7978 */ 7979 if (!enable_ept || !enable_ept_ad_bits || !cpu_has_vmx_pml()) 7980 enable_pml = 0; 7981 7982 if (!enable_pml) 7983 vmx_x86_ops.cpu_dirty_log_size = 0; 7984 7985 if (!cpu_has_vmx_preemption_timer()) 7986 enable_preemption_timer = false; 7987 7988 if (enable_preemption_timer) { 7989 u64 use_timer_freq = 5000ULL * 1000 * 1000; 7990 u64 vmx_msr; 7991 7992 rdmsrl(MSR_IA32_VMX_MISC, vmx_msr); 7993 cpu_preemption_timer_multi = 7994 vmx_msr & VMX_MISC_PREEMPTION_TIMER_RATE_MASK; 7995 7996 if (tsc_khz) 7997 use_timer_freq = (u64)tsc_khz * 1000; 7998 use_timer_freq >>= cpu_preemption_timer_multi; 7999 8000 /* 8001 * KVM "disables" the preemption timer by setting it to its max 8002 * value. Don't use the timer if it might cause spurious exits 8003 * at a rate faster than 0.1 Hz (of uninterrupted guest time). 8004 */ 8005 if (use_timer_freq > 0xffffffffu / 10) 8006 enable_preemption_timer = false; 8007 } 8008 8009 if (!enable_preemption_timer) { 8010 vmx_x86_ops.set_hv_timer = NULL; 8011 vmx_x86_ops.cancel_hv_timer = NULL; 8012 vmx_x86_ops.request_immediate_exit = __kvm_request_immediate_exit; 8013 } 8014 8015 kvm_mce_cap_supported |= MCG_LMCE_P; 8016 8017 if (pt_mode != PT_MODE_SYSTEM && pt_mode != PT_MODE_HOST_GUEST) 8018 return -EINVAL; 8019 if (!enable_ept || !cpu_has_vmx_intel_pt()) 8020 pt_mode = PT_MODE_SYSTEM; 8021 if (pt_mode == PT_MODE_HOST_GUEST) 8022 vmx_init_ops.handle_intel_pt_intr = vmx_handle_intel_pt_intr; 8023 else 8024 vmx_init_ops.handle_intel_pt_intr = NULL; 8025 8026 setup_default_sgx_lepubkeyhash(); 8027 8028 if (nested) { 8029 nested_vmx_setup_ctls_msrs(&vmcs_config.nested, 8030 vmx_capability.ept); 8031 8032 r = nested_vmx_hardware_setup(kvm_vmx_exit_handlers); 8033 if (r) 8034 return r; 8035 } 8036 8037 vmx_set_cpu_caps(); 8038 8039 r = alloc_kvm_area(); 8040 if (r) 8041 nested_vmx_hardware_unsetup(); 8042 8043 kvm_set_posted_intr_wakeup_handler(pi_wakeup_handler); 8044 8045 return r; 8046 } 8047 8048 static struct kvm_x86_init_ops vmx_init_ops __initdata = { 8049 .cpu_has_kvm_support = cpu_has_kvm_support, 8050 .disabled_by_bios = vmx_disabled_by_bios, 8051 .check_processor_compatibility = vmx_check_processor_compat, 8052 .hardware_setup = hardware_setup, 8053 .handle_intel_pt_intr = NULL, 8054 8055 .runtime_ops = &vmx_x86_ops, 8056 }; 8057 8058 static void vmx_cleanup_l1d_flush(void) 8059 { 8060 if (vmx_l1d_flush_pages) { 8061 free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER); 8062 vmx_l1d_flush_pages = NULL; 8063 } 8064 /* Restore state so sysfs ignores VMX */ 8065 l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; 8066 } 8067 8068 static void vmx_exit(void) 8069 { 8070 #ifdef CONFIG_KEXEC_CORE 8071 RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL); 8072 synchronize_rcu(); 8073 #endif 8074 8075 kvm_exit(); 8076 8077 #if IS_ENABLED(CONFIG_HYPERV) 8078 if (static_branch_unlikely(&enable_evmcs)) { 8079 int cpu; 8080 struct hv_vp_assist_page *vp_ap; 8081 /* 8082 * Reset everything to support using non-enlightened VMCS 8083 * access later (e.g. when we reload the module with 8084 * enlightened_vmcs=0) 8085 */ 8086 for_each_online_cpu(cpu) { 8087 vp_ap = hv_get_vp_assist_page(cpu); 8088 8089 if (!vp_ap) 8090 continue; 8091 8092 vp_ap->nested_control.features.directhypercall = 0; 8093 vp_ap->current_nested_vmcs = 0; 8094 vp_ap->enlighten_vmentry = 0; 8095 } 8096 8097 static_branch_disable(&enable_evmcs); 8098 } 8099 #endif 8100 vmx_cleanup_l1d_flush(); 8101 8102 allow_smaller_maxphyaddr = false; 8103 } 8104 module_exit(vmx_exit); 8105 8106 static int __init vmx_init(void) 8107 { 8108 int r, cpu; 8109 8110 #if IS_ENABLED(CONFIG_HYPERV) 8111 /* 8112 * Enlightened VMCS usage should be recommended and the host needs 8113 * to support eVMCS v1 or above. We can also disable eVMCS support 8114 * with module parameter. 8115 */ 8116 if (enlightened_vmcs && 8117 ms_hyperv.hints & HV_X64_ENLIGHTENED_VMCS_RECOMMENDED && 8118 (ms_hyperv.nested_features & HV_X64_ENLIGHTENED_VMCS_VERSION) >= 8119 KVM_EVMCS_VERSION) { 8120 int cpu; 8121 8122 /* Check that we have assist pages on all online CPUs */ 8123 for_each_online_cpu(cpu) { 8124 if (!hv_get_vp_assist_page(cpu)) { 8125 enlightened_vmcs = false; 8126 break; 8127 } 8128 } 8129 8130 if (enlightened_vmcs) { 8131 pr_info("KVM: vmx: using Hyper-V Enlightened VMCS\n"); 8132 static_branch_enable(&enable_evmcs); 8133 } 8134 8135 if (ms_hyperv.nested_features & HV_X64_NESTED_DIRECT_FLUSH) 8136 vmx_x86_ops.enable_direct_tlbflush 8137 = hv_enable_direct_tlbflush; 8138 8139 } else { 8140 enlightened_vmcs = false; 8141 } 8142 #endif 8143 8144 r = kvm_init(&vmx_init_ops, sizeof(struct vcpu_vmx), 8145 __alignof__(struct vcpu_vmx), THIS_MODULE); 8146 if (r) 8147 return r; 8148 8149 /* 8150 * Must be called after kvm_init() so enable_ept is properly set 8151 * up. Hand the parameter mitigation value in which was stored in 8152 * the pre module init parser. If no parameter was given, it will 8153 * contain 'auto' which will be turned into the default 'cond' 8154 * mitigation mode. 8155 */ 8156 r = vmx_setup_l1d_flush(vmentry_l1d_flush_param); 8157 if (r) { 8158 vmx_exit(); 8159 return r; 8160 } 8161 8162 for_each_possible_cpu(cpu) { 8163 INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu)); 8164 8165 pi_init_cpu(cpu); 8166 } 8167 8168 #ifdef CONFIG_KEXEC_CORE 8169 rcu_assign_pointer(crash_vmclear_loaded_vmcss, 8170 crash_vmclear_local_loaded_vmcss); 8171 #endif 8172 vmx_check_vmcs12_offsets(); 8173 8174 /* 8175 * Shadow paging doesn't have a (further) performance penalty 8176 * from GUEST_MAXPHYADDR < HOST_MAXPHYADDR so enable it 8177 * by default 8178 */ 8179 if (!enable_ept) 8180 allow_smaller_maxphyaddr = true; 8181 8182 return 0; 8183 } 8184 module_init(vmx_init); 8185