1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Kernel-based Virtual Machine driver for Linux 4 * 5 * This module enables machines with Intel VT-x extensions to run virtual 6 * machines without emulation or binary translation. 7 * 8 * Copyright (C) 2006 Qumranet, Inc. 9 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 10 * 11 * Authors: 12 * Avi Kivity <avi@qumranet.com> 13 * Yaniv Kamay <yaniv@qumranet.com> 14 */ 15 16 #include <linux/highmem.h> 17 #include <linux/hrtimer.h> 18 #include <linux/kernel.h> 19 #include <linux/kvm_host.h> 20 #include <linux/module.h> 21 #include <linux/moduleparam.h> 22 #include <linux/mod_devicetable.h> 23 #include <linux/mm.h> 24 #include <linux/objtool.h> 25 #include <linux/sched.h> 26 #include <linux/sched/smt.h> 27 #include <linux/slab.h> 28 #include <linux/tboot.h> 29 #include <linux/trace_events.h> 30 #include <linux/entry-kvm.h> 31 32 #include <asm/apic.h> 33 #include <asm/asm.h> 34 #include <asm/cpu.h> 35 #include <asm/cpu_device_id.h> 36 #include <asm/debugreg.h> 37 #include <asm/desc.h> 38 #include <asm/fpu/api.h> 39 #include <asm/idtentry.h> 40 #include <asm/io.h> 41 #include <asm/irq_remapping.h> 42 #include <asm/kexec.h> 43 #include <asm/perf_event.h> 44 #include <asm/mmu_context.h> 45 #include <asm/mshyperv.h> 46 #include <asm/mwait.h> 47 #include <asm/spec-ctrl.h> 48 #include <asm/virtext.h> 49 #include <asm/vmx.h> 50 51 #include "capabilities.h" 52 #include "cpuid.h" 53 #include "evmcs.h" 54 #include "hyperv.h" 55 #include "kvm_onhyperv.h" 56 #include "irq.h" 57 #include "kvm_cache_regs.h" 58 #include "lapic.h" 59 #include "mmu.h" 60 #include "nested.h" 61 #include "pmu.h" 62 #include "sgx.h" 63 #include "trace.h" 64 #include "vmcs.h" 65 #include "vmcs12.h" 66 #include "vmx.h" 67 #include "x86.h" 68 69 MODULE_AUTHOR("Qumranet"); 70 MODULE_LICENSE("GPL"); 71 72 #ifdef MODULE 73 static const struct x86_cpu_id vmx_cpu_id[] = { 74 X86_MATCH_FEATURE(X86_FEATURE_VMX, NULL), 75 {} 76 }; 77 MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id); 78 #endif 79 80 bool __read_mostly enable_vpid = 1; 81 module_param_named(vpid, enable_vpid, bool, 0444); 82 83 static bool __read_mostly enable_vnmi = 1; 84 module_param_named(vnmi, enable_vnmi, bool, S_IRUGO); 85 86 bool __read_mostly flexpriority_enabled = 1; 87 module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO); 88 89 bool __read_mostly enable_ept = 1; 90 module_param_named(ept, enable_ept, bool, S_IRUGO); 91 92 bool __read_mostly enable_unrestricted_guest = 1; 93 module_param_named(unrestricted_guest, 94 enable_unrestricted_guest, bool, S_IRUGO); 95 96 bool __read_mostly enable_ept_ad_bits = 1; 97 module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO); 98 99 static bool __read_mostly emulate_invalid_guest_state = true; 100 module_param(emulate_invalid_guest_state, bool, S_IRUGO); 101 102 static bool __read_mostly fasteoi = 1; 103 module_param(fasteoi, bool, S_IRUGO); 104 105 module_param(enable_apicv, bool, S_IRUGO); 106 107 /* 108 * If nested=1, nested virtualization is supported, i.e., guests may use 109 * VMX and be a hypervisor for its own guests. If nested=0, guests may not 110 * use VMX instructions. 111 */ 112 static bool __read_mostly nested = 1; 113 module_param(nested, bool, S_IRUGO); 114 115 bool __read_mostly enable_pml = 1; 116 module_param_named(pml, enable_pml, bool, S_IRUGO); 117 118 static bool __read_mostly dump_invalid_vmcs = 0; 119 module_param(dump_invalid_vmcs, bool, 0644); 120 121 #define MSR_BITMAP_MODE_X2APIC 1 122 #define MSR_BITMAP_MODE_X2APIC_APICV 2 123 124 #define KVM_VMX_TSC_MULTIPLIER_MAX 0xffffffffffffffffULL 125 126 /* Guest_tsc -> host_tsc conversion requires 64-bit division. */ 127 static int __read_mostly cpu_preemption_timer_multi; 128 static bool __read_mostly enable_preemption_timer = 1; 129 #ifdef CONFIG_X86_64 130 module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO); 131 #endif 132 133 extern bool __read_mostly allow_smaller_maxphyaddr; 134 module_param(allow_smaller_maxphyaddr, bool, S_IRUGO); 135 136 #define KVM_VM_CR0_ALWAYS_OFF (X86_CR0_NW | X86_CR0_CD) 137 #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR0_NE 138 #define KVM_VM_CR0_ALWAYS_ON \ 139 (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE) 140 141 #define KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR4_VMXE 142 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) 143 #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE) 144 145 #define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM)) 146 147 #define MSR_IA32_RTIT_STATUS_MASK (~(RTIT_STATUS_FILTEREN | \ 148 RTIT_STATUS_CONTEXTEN | RTIT_STATUS_TRIGGEREN | \ 149 RTIT_STATUS_ERROR | RTIT_STATUS_STOPPED | \ 150 RTIT_STATUS_BYTECNT)) 151 152 /* 153 * List of MSRs that can be directly passed to the guest. 154 * In addition to these x2apic and PT MSRs are handled specially. 155 */ 156 static u32 vmx_possible_passthrough_msrs[MAX_POSSIBLE_PASSTHROUGH_MSRS] = { 157 MSR_IA32_SPEC_CTRL, 158 MSR_IA32_PRED_CMD, 159 MSR_IA32_TSC, 160 #ifdef CONFIG_X86_64 161 MSR_FS_BASE, 162 MSR_GS_BASE, 163 MSR_KERNEL_GS_BASE, 164 #endif 165 MSR_IA32_SYSENTER_CS, 166 MSR_IA32_SYSENTER_ESP, 167 MSR_IA32_SYSENTER_EIP, 168 MSR_CORE_C1_RES, 169 MSR_CORE_C3_RESIDENCY, 170 MSR_CORE_C6_RESIDENCY, 171 MSR_CORE_C7_RESIDENCY, 172 }; 173 174 /* 175 * These 2 parameters are used to config the controls for Pause-Loop Exiting: 176 * ple_gap: upper bound on the amount of time between two successive 177 * executions of PAUSE in a loop. Also indicate if ple enabled. 178 * According to test, this time is usually smaller than 128 cycles. 179 * ple_window: upper bound on the amount of time a guest is allowed to execute 180 * in a PAUSE loop. Tests indicate that most spinlocks are held for 181 * less than 2^12 cycles 182 * Time is measured based on a counter that runs at the same rate as the TSC, 183 * refer SDM volume 3b section 21.6.13 & 22.1.3. 184 */ 185 static unsigned int ple_gap = KVM_DEFAULT_PLE_GAP; 186 module_param(ple_gap, uint, 0444); 187 188 static unsigned int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW; 189 module_param(ple_window, uint, 0444); 190 191 /* Default doubles per-vcpu window every exit. */ 192 static unsigned int ple_window_grow = KVM_DEFAULT_PLE_WINDOW_GROW; 193 module_param(ple_window_grow, uint, 0444); 194 195 /* Default resets per-vcpu window every exit to ple_window. */ 196 static unsigned int ple_window_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK; 197 module_param(ple_window_shrink, uint, 0444); 198 199 /* Default is to compute the maximum so we can never overflow. */ 200 static unsigned int ple_window_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX; 201 module_param(ple_window_max, uint, 0444); 202 203 /* Default is SYSTEM mode, 1 for host-guest mode */ 204 int __read_mostly pt_mode = PT_MODE_SYSTEM; 205 module_param(pt_mode, int, S_IRUGO); 206 207 static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush); 208 static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond); 209 static DEFINE_MUTEX(vmx_l1d_flush_mutex); 210 211 /* Storage for pre module init parameter parsing */ 212 static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_L1D_FLUSH_AUTO; 213 214 static const struct { 215 const char *option; 216 bool for_parse; 217 } vmentry_l1d_param[] = { 218 [VMENTER_L1D_FLUSH_AUTO] = {"auto", true}, 219 [VMENTER_L1D_FLUSH_NEVER] = {"never", true}, 220 [VMENTER_L1D_FLUSH_COND] = {"cond", true}, 221 [VMENTER_L1D_FLUSH_ALWAYS] = {"always", true}, 222 [VMENTER_L1D_FLUSH_EPT_DISABLED] = {"EPT disabled", false}, 223 [VMENTER_L1D_FLUSH_NOT_REQUIRED] = {"not required", false}, 224 }; 225 226 #define L1D_CACHE_ORDER 4 227 static void *vmx_l1d_flush_pages; 228 229 static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf) 230 { 231 struct page *page; 232 unsigned int i; 233 234 if (!boot_cpu_has_bug(X86_BUG_L1TF)) { 235 l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED; 236 return 0; 237 } 238 239 if (!enable_ept) { 240 l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED; 241 return 0; 242 } 243 244 if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) { 245 u64 msr; 246 247 rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr); 248 if (msr & ARCH_CAP_SKIP_VMENTRY_L1DFLUSH) { 249 l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED; 250 return 0; 251 } 252 } 253 254 /* If set to auto use the default l1tf mitigation method */ 255 if (l1tf == VMENTER_L1D_FLUSH_AUTO) { 256 switch (l1tf_mitigation) { 257 case L1TF_MITIGATION_OFF: 258 l1tf = VMENTER_L1D_FLUSH_NEVER; 259 break; 260 case L1TF_MITIGATION_FLUSH_NOWARN: 261 case L1TF_MITIGATION_FLUSH: 262 case L1TF_MITIGATION_FLUSH_NOSMT: 263 l1tf = VMENTER_L1D_FLUSH_COND; 264 break; 265 case L1TF_MITIGATION_FULL: 266 case L1TF_MITIGATION_FULL_FORCE: 267 l1tf = VMENTER_L1D_FLUSH_ALWAYS; 268 break; 269 } 270 } else if (l1tf_mitigation == L1TF_MITIGATION_FULL_FORCE) { 271 l1tf = VMENTER_L1D_FLUSH_ALWAYS; 272 } 273 274 if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages && 275 !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) { 276 /* 277 * This allocation for vmx_l1d_flush_pages is not tied to a VM 278 * lifetime and so should not be charged to a memcg. 279 */ 280 page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER); 281 if (!page) 282 return -ENOMEM; 283 vmx_l1d_flush_pages = page_address(page); 284 285 /* 286 * Initialize each page with a different pattern in 287 * order to protect against KSM in the nested 288 * virtualization case. 289 */ 290 for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) { 291 memset(vmx_l1d_flush_pages + i * PAGE_SIZE, i + 1, 292 PAGE_SIZE); 293 } 294 } 295 296 l1tf_vmx_mitigation = l1tf; 297 298 if (l1tf != VMENTER_L1D_FLUSH_NEVER) 299 static_branch_enable(&vmx_l1d_should_flush); 300 else 301 static_branch_disable(&vmx_l1d_should_flush); 302 303 if (l1tf == VMENTER_L1D_FLUSH_COND) 304 static_branch_enable(&vmx_l1d_flush_cond); 305 else 306 static_branch_disable(&vmx_l1d_flush_cond); 307 return 0; 308 } 309 310 static int vmentry_l1d_flush_parse(const char *s) 311 { 312 unsigned int i; 313 314 if (s) { 315 for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) { 316 if (vmentry_l1d_param[i].for_parse && 317 sysfs_streq(s, vmentry_l1d_param[i].option)) 318 return i; 319 } 320 } 321 return -EINVAL; 322 } 323 324 static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp) 325 { 326 int l1tf, ret; 327 328 l1tf = vmentry_l1d_flush_parse(s); 329 if (l1tf < 0) 330 return l1tf; 331 332 if (!boot_cpu_has(X86_BUG_L1TF)) 333 return 0; 334 335 /* 336 * Has vmx_init() run already? If not then this is the pre init 337 * parameter parsing. In that case just store the value and let 338 * vmx_init() do the proper setup after enable_ept has been 339 * established. 340 */ 341 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) { 342 vmentry_l1d_flush_param = l1tf; 343 return 0; 344 } 345 346 mutex_lock(&vmx_l1d_flush_mutex); 347 ret = vmx_setup_l1d_flush(l1tf); 348 mutex_unlock(&vmx_l1d_flush_mutex); 349 return ret; 350 } 351 352 static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp) 353 { 354 if (WARN_ON_ONCE(l1tf_vmx_mitigation >= ARRAY_SIZE(vmentry_l1d_param))) 355 return sprintf(s, "???\n"); 356 357 return sprintf(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option); 358 } 359 360 static const struct kernel_param_ops vmentry_l1d_flush_ops = { 361 .set = vmentry_l1d_flush_set, 362 .get = vmentry_l1d_flush_get, 363 }; 364 module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, 0644); 365 366 static u32 vmx_segment_access_rights(struct kvm_segment *var); 367 368 void vmx_vmexit(void); 369 370 #define vmx_insn_failed(fmt...) \ 371 do { \ 372 WARN_ONCE(1, fmt); \ 373 pr_warn_ratelimited(fmt); \ 374 } while (0) 375 376 asmlinkage void vmread_error(unsigned long field, bool fault) 377 { 378 if (fault) 379 kvm_spurious_fault(); 380 else 381 vmx_insn_failed("kvm: vmread failed: field=%lx\n", field); 382 } 383 384 noinline void vmwrite_error(unsigned long field, unsigned long value) 385 { 386 vmx_insn_failed("kvm: vmwrite failed: field=%lx val=%lx err=%d\n", 387 field, value, vmcs_read32(VM_INSTRUCTION_ERROR)); 388 } 389 390 noinline void vmclear_error(struct vmcs *vmcs, u64 phys_addr) 391 { 392 vmx_insn_failed("kvm: vmclear failed: %p/%llx\n", vmcs, phys_addr); 393 } 394 395 noinline void vmptrld_error(struct vmcs *vmcs, u64 phys_addr) 396 { 397 vmx_insn_failed("kvm: vmptrld failed: %p/%llx\n", vmcs, phys_addr); 398 } 399 400 noinline void invvpid_error(unsigned long ext, u16 vpid, gva_t gva) 401 { 402 vmx_insn_failed("kvm: invvpid failed: ext=0x%lx vpid=%u gva=0x%lx\n", 403 ext, vpid, gva); 404 } 405 406 noinline void invept_error(unsigned long ext, u64 eptp, gpa_t gpa) 407 { 408 vmx_insn_failed("kvm: invept failed: ext=0x%lx eptp=%llx gpa=0x%llx\n", 409 ext, eptp, gpa); 410 } 411 412 static DEFINE_PER_CPU(struct vmcs *, vmxarea); 413 DEFINE_PER_CPU(struct vmcs *, current_vmcs); 414 /* 415 * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed 416 * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it. 417 */ 418 static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu); 419 420 static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS); 421 static DEFINE_SPINLOCK(vmx_vpid_lock); 422 423 struct vmcs_config vmcs_config; 424 struct vmx_capability vmx_capability; 425 426 #define VMX_SEGMENT_FIELD(seg) \ 427 [VCPU_SREG_##seg] = { \ 428 .selector = GUEST_##seg##_SELECTOR, \ 429 .base = GUEST_##seg##_BASE, \ 430 .limit = GUEST_##seg##_LIMIT, \ 431 .ar_bytes = GUEST_##seg##_AR_BYTES, \ 432 } 433 434 static const struct kvm_vmx_segment_field { 435 unsigned selector; 436 unsigned base; 437 unsigned limit; 438 unsigned ar_bytes; 439 } kvm_vmx_segment_fields[] = { 440 VMX_SEGMENT_FIELD(CS), 441 VMX_SEGMENT_FIELD(DS), 442 VMX_SEGMENT_FIELD(ES), 443 VMX_SEGMENT_FIELD(FS), 444 VMX_SEGMENT_FIELD(GS), 445 VMX_SEGMENT_FIELD(SS), 446 VMX_SEGMENT_FIELD(TR), 447 VMX_SEGMENT_FIELD(LDTR), 448 }; 449 450 static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx) 451 { 452 vmx->segment_cache.bitmask = 0; 453 } 454 455 static unsigned long host_idt_base; 456 457 #if IS_ENABLED(CONFIG_HYPERV) 458 static bool __read_mostly enlightened_vmcs = true; 459 module_param(enlightened_vmcs, bool, 0444); 460 461 static int hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu) 462 { 463 struct hv_enlightened_vmcs *evmcs; 464 struct hv_partition_assist_pg **p_hv_pa_pg = 465 &to_kvm_hv(vcpu->kvm)->hv_pa_pg; 466 /* 467 * Synthetic VM-Exit is not enabled in current code and so All 468 * evmcs in singe VM shares same assist page. 469 */ 470 if (!*p_hv_pa_pg) 471 *p_hv_pa_pg = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT); 472 473 if (!*p_hv_pa_pg) 474 return -ENOMEM; 475 476 evmcs = (struct hv_enlightened_vmcs *)to_vmx(vcpu)->loaded_vmcs->vmcs; 477 478 evmcs->partition_assist_page = 479 __pa(*p_hv_pa_pg); 480 evmcs->hv_vm_id = (unsigned long)vcpu->kvm; 481 evmcs->hv_enlightenments_control.nested_flush_hypercall = 1; 482 483 return 0; 484 } 485 486 #endif /* IS_ENABLED(CONFIG_HYPERV) */ 487 488 /* 489 * Comment's format: document - errata name - stepping - processor name. 490 * Refer from 491 * https://www.virtualbox.org/svn/vbox/trunk/src/VBox/VMM/VMMR0/HMR0.cpp 492 */ 493 static u32 vmx_preemption_cpu_tfms[] = { 494 /* 323344.pdf - BA86 - D0 - Xeon 7500 Series */ 495 0x000206E6, 496 /* 323056.pdf - AAX65 - C2 - Xeon L3406 */ 497 /* 322814.pdf - AAT59 - C2 - i7-600, i5-500, i5-400 and i3-300 Mobile */ 498 /* 322911.pdf - AAU65 - C2 - i5-600, i3-500 Desktop and Pentium G6950 */ 499 0x00020652, 500 /* 322911.pdf - AAU65 - K0 - i5-600, i3-500 Desktop and Pentium G6950 */ 501 0x00020655, 502 /* 322373.pdf - AAO95 - B1 - Xeon 3400 Series */ 503 /* 322166.pdf - AAN92 - B1 - i7-800 and i5-700 Desktop */ 504 /* 505 * 320767.pdf - AAP86 - B1 - 506 * i7-900 Mobile Extreme, i7-800 and i7-700 Mobile 507 */ 508 0x000106E5, 509 /* 321333.pdf - AAM126 - C0 - Xeon 3500 */ 510 0x000106A0, 511 /* 321333.pdf - AAM126 - C1 - Xeon 3500 */ 512 0x000106A1, 513 /* 320836.pdf - AAJ124 - C0 - i7-900 Desktop Extreme and i7-900 Desktop */ 514 0x000106A4, 515 /* 321333.pdf - AAM126 - D0 - Xeon 3500 */ 516 /* 321324.pdf - AAK139 - D0 - Xeon 5500 */ 517 /* 320836.pdf - AAJ124 - D0 - i7-900 Extreme and i7-900 Desktop */ 518 0x000106A5, 519 /* Xeon E3-1220 V2 */ 520 0x000306A8, 521 }; 522 523 static inline bool cpu_has_broken_vmx_preemption_timer(void) 524 { 525 u32 eax = cpuid_eax(0x00000001), i; 526 527 /* Clear the reserved bits */ 528 eax &= ~(0x3U << 14 | 0xfU << 28); 529 for (i = 0; i < ARRAY_SIZE(vmx_preemption_cpu_tfms); i++) 530 if (eax == vmx_preemption_cpu_tfms[i]) 531 return true; 532 533 return false; 534 } 535 536 static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu) 537 { 538 return flexpriority_enabled && lapic_in_kernel(vcpu); 539 } 540 541 static inline bool report_flexpriority(void) 542 { 543 return flexpriority_enabled; 544 } 545 546 static int possible_passthrough_msr_slot(u32 msr) 547 { 548 u32 i; 549 550 for (i = 0; i < ARRAY_SIZE(vmx_possible_passthrough_msrs); i++) 551 if (vmx_possible_passthrough_msrs[i] == msr) 552 return i; 553 554 return -ENOENT; 555 } 556 557 static bool is_valid_passthrough_msr(u32 msr) 558 { 559 bool r; 560 561 switch (msr) { 562 case 0x800 ... 0x8ff: 563 /* x2APIC MSRs. These are handled in vmx_update_msr_bitmap_x2apic() */ 564 return true; 565 case MSR_IA32_RTIT_STATUS: 566 case MSR_IA32_RTIT_OUTPUT_BASE: 567 case MSR_IA32_RTIT_OUTPUT_MASK: 568 case MSR_IA32_RTIT_CR3_MATCH: 569 case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: 570 /* PT MSRs. These are handled in pt_update_intercept_for_msr() */ 571 case MSR_LBR_SELECT: 572 case MSR_LBR_TOS: 573 case MSR_LBR_INFO_0 ... MSR_LBR_INFO_0 + 31: 574 case MSR_LBR_NHM_FROM ... MSR_LBR_NHM_FROM + 31: 575 case MSR_LBR_NHM_TO ... MSR_LBR_NHM_TO + 31: 576 case MSR_LBR_CORE_FROM ... MSR_LBR_CORE_FROM + 8: 577 case MSR_LBR_CORE_TO ... MSR_LBR_CORE_TO + 8: 578 /* LBR MSRs. These are handled in vmx_update_intercept_for_lbr_msrs() */ 579 return true; 580 } 581 582 r = possible_passthrough_msr_slot(msr) != -ENOENT; 583 584 WARN(!r, "Invalid MSR %x, please adapt vmx_possible_passthrough_msrs[]", msr); 585 586 return r; 587 } 588 589 struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr) 590 { 591 int i; 592 593 i = kvm_find_user_return_msr(msr); 594 if (i >= 0) 595 return &vmx->guest_uret_msrs[i]; 596 return NULL; 597 } 598 599 static int vmx_set_guest_uret_msr(struct vcpu_vmx *vmx, 600 struct vmx_uret_msr *msr, u64 data) 601 { 602 unsigned int slot = msr - vmx->guest_uret_msrs; 603 int ret = 0; 604 605 u64 old_msr_data = msr->data; 606 msr->data = data; 607 if (msr->load_into_hardware) { 608 preempt_disable(); 609 ret = kvm_set_user_return_msr(slot, msr->data, msr->mask); 610 preempt_enable(); 611 if (ret) 612 msr->data = old_msr_data; 613 } 614 return ret; 615 } 616 617 #ifdef CONFIG_KEXEC_CORE 618 static void crash_vmclear_local_loaded_vmcss(void) 619 { 620 int cpu = raw_smp_processor_id(); 621 struct loaded_vmcs *v; 622 623 list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu), 624 loaded_vmcss_on_cpu_link) 625 vmcs_clear(v->vmcs); 626 } 627 #endif /* CONFIG_KEXEC_CORE */ 628 629 static void __loaded_vmcs_clear(void *arg) 630 { 631 struct loaded_vmcs *loaded_vmcs = arg; 632 int cpu = raw_smp_processor_id(); 633 634 if (loaded_vmcs->cpu != cpu) 635 return; /* vcpu migration can race with cpu offline */ 636 if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs) 637 per_cpu(current_vmcs, cpu) = NULL; 638 639 vmcs_clear(loaded_vmcs->vmcs); 640 if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched) 641 vmcs_clear(loaded_vmcs->shadow_vmcs); 642 643 list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link); 644 645 /* 646 * Ensure all writes to loaded_vmcs, including deleting it from its 647 * current percpu list, complete before setting loaded_vmcs->vcpu to 648 * -1, otherwise a different cpu can see vcpu == -1 first and add 649 * loaded_vmcs to its percpu list before it's deleted from this cpu's 650 * list. Pairs with the smp_rmb() in vmx_vcpu_load_vmcs(). 651 */ 652 smp_wmb(); 653 654 loaded_vmcs->cpu = -1; 655 loaded_vmcs->launched = 0; 656 } 657 658 void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs) 659 { 660 int cpu = loaded_vmcs->cpu; 661 662 if (cpu != -1) 663 smp_call_function_single(cpu, 664 __loaded_vmcs_clear, loaded_vmcs, 1); 665 } 666 667 static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg, 668 unsigned field) 669 { 670 bool ret; 671 u32 mask = 1 << (seg * SEG_FIELD_NR + field); 672 673 if (!kvm_register_is_available(&vmx->vcpu, VCPU_EXREG_SEGMENTS)) { 674 kvm_register_mark_available(&vmx->vcpu, VCPU_EXREG_SEGMENTS); 675 vmx->segment_cache.bitmask = 0; 676 } 677 ret = vmx->segment_cache.bitmask & mask; 678 vmx->segment_cache.bitmask |= mask; 679 return ret; 680 } 681 682 static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg) 683 { 684 u16 *p = &vmx->segment_cache.seg[seg].selector; 685 686 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL)) 687 *p = vmcs_read16(kvm_vmx_segment_fields[seg].selector); 688 return *p; 689 } 690 691 static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg) 692 { 693 ulong *p = &vmx->segment_cache.seg[seg].base; 694 695 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE)) 696 *p = vmcs_readl(kvm_vmx_segment_fields[seg].base); 697 return *p; 698 } 699 700 static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg) 701 { 702 u32 *p = &vmx->segment_cache.seg[seg].limit; 703 704 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT)) 705 *p = vmcs_read32(kvm_vmx_segment_fields[seg].limit); 706 return *p; 707 } 708 709 static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg) 710 { 711 u32 *p = &vmx->segment_cache.seg[seg].ar; 712 713 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR)) 714 *p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes); 715 return *p; 716 } 717 718 void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu) 719 { 720 u32 eb; 721 722 eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) | 723 (1u << DB_VECTOR) | (1u << AC_VECTOR); 724 /* 725 * Guest access to VMware backdoor ports could legitimately 726 * trigger #GP because of TSS I/O permission bitmap. 727 * We intercept those #GP and allow access to them anyway 728 * as VMware does. 729 */ 730 if (enable_vmware_backdoor) 731 eb |= (1u << GP_VECTOR); 732 if ((vcpu->guest_debug & 733 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) == 734 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) 735 eb |= 1u << BP_VECTOR; 736 if (to_vmx(vcpu)->rmode.vm86_active) 737 eb = ~0; 738 if (!vmx_need_pf_intercept(vcpu)) 739 eb &= ~(1u << PF_VECTOR); 740 741 /* When we are running a nested L2 guest and L1 specified for it a 742 * certain exception bitmap, we must trap the same exceptions and pass 743 * them to L1. When running L2, we will only handle the exceptions 744 * specified above if L1 did not want them. 745 */ 746 if (is_guest_mode(vcpu)) 747 eb |= get_vmcs12(vcpu)->exception_bitmap; 748 else { 749 int mask = 0, match = 0; 750 751 if (enable_ept && (eb & (1u << PF_VECTOR))) { 752 /* 753 * If EPT is enabled, #PF is currently only intercepted 754 * if MAXPHYADDR is smaller on the guest than on the 755 * host. In that case we only care about present, 756 * non-reserved faults. For vmcs02, however, PFEC_MASK 757 * and PFEC_MATCH are set in prepare_vmcs02_rare. 758 */ 759 mask = PFERR_PRESENT_MASK | PFERR_RSVD_MASK; 760 match = PFERR_PRESENT_MASK; 761 } 762 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, mask); 763 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, match); 764 } 765 766 vmcs_write32(EXCEPTION_BITMAP, eb); 767 } 768 769 /* 770 * Check if MSR is intercepted for currently loaded MSR bitmap. 771 */ 772 static bool msr_write_intercepted(struct vcpu_vmx *vmx, u32 msr) 773 { 774 if (!(exec_controls_get(vmx) & CPU_BASED_USE_MSR_BITMAPS)) 775 return true; 776 777 return vmx_test_msr_bitmap_write(vmx->loaded_vmcs->msr_bitmap, 778 MSR_IA32_SPEC_CTRL); 779 } 780 781 static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx, 782 unsigned long entry, unsigned long exit) 783 { 784 vm_entry_controls_clearbit(vmx, entry); 785 vm_exit_controls_clearbit(vmx, exit); 786 } 787 788 int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr) 789 { 790 unsigned int i; 791 792 for (i = 0; i < m->nr; ++i) { 793 if (m->val[i].index == msr) 794 return i; 795 } 796 return -ENOENT; 797 } 798 799 static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) 800 { 801 int i; 802 struct msr_autoload *m = &vmx->msr_autoload; 803 804 switch (msr) { 805 case MSR_EFER: 806 if (cpu_has_load_ia32_efer()) { 807 clear_atomic_switch_msr_special(vmx, 808 VM_ENTRY_LOAD_IA32_EFER, 809 VM_EXIT_LOAD_IA32_EFER); 810 return; 811 } 812 break; 813 case MSR_CORE_PERF_GLOBAL_CTRL: 814 if (cpu_has_load_perf_global_ctrl()) { 815 clear_atomic_switch_msr_special(vmx, 816 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, 817 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL); 818 return; 819 } 820 break; 821 } 822 i = vmx_find_loadstore_msr_slot(&m->guest, msr); 823 if (i < 0) 824 goto skip_guest; 825 --m->guest.nr; 826 m->guest.val[i] = m->guest.val[m->guest.nr]; 827 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr); 828 829 skip_guest: 830 i = vmx_find_loadstore_msr_slot(&m->host, msr); 831 if (i < 0) 832 return; 833 834 --m->host.nr; 835 m->host.val[i] = m->host.val[m->host.nr]; 836 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr); 837 } 838 839 static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx, 840 unsigned long entry, unsigned long exit, 841 unsigned long guest_val_vmcs, unsigned long host_val_vmcs, 842 u64 guest_val, u64 host_val) 843 { 844 vmcs_write64(guest_val_vmcs, guest_val); 845 if (host_val_vmcs != HOST_IA32_EFER) 846 vmcs_write64(host_val_vmcs, host_val); 847 vm_entry_controls_setbit(vmx, entry); 848 vm_exit_controls_setbit(vmx, exit); 849 } 850 851 static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, 852 u64 guest_val, u64 host_val, bool entry_only) 853 { 854 int i, j = 0; 855 struct msr_autoload *m = &vmx->msr_autoload; 856 857 switch (msr) { 858 case MSR_EFER: 859 if (cpu_has_load_ia32_efer()) { 860 add_atomic_switch_msr_special(vmx, 861 VM_ENTRY_LOAD_IA32_EFER, 862 VM_EXIT_LOAD_IA32_EFER, 863 GUEST_IA32_EFER, 864 HOST_IA32_EFER, 865 guest_val, host_val); 866 return; 867 } 868 break; 869 case MSR_CORE_PERF_GLOBAL_CTRL: 870 if (cpu_has_load_perf_global_ctrl()) { 871 add_atomic_switch_msr_special(vmx, 872 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, 873 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL, 874 GUEST_IA32_PERF_GLOBAL_CTRL, 875 HOST_IA32_PERF_GLOBAL_CTRL, 876 guest_val, host_val); 877 return; 878 } 879 break; 880 case MSR_IA32_PEBS_ENABLE: 881 /* PEBS needs a quiescent period after being disabled (to write 882 * a record). Disabling PEBS through VMX MSR swapping doesn't 883 * provide that period, so a CPU could write host's record into 884 * guest's memory. 885 */ 886 wrmsrl(MSR_IA32_PEBS_ENABLE, 0); 887 } 888 889 i = vmx_find_loadstore_msr_slot(&m->guest, msr); 890 if (!entry_only) 891 j = vmx_find_loadstore_msr_slot(&m->host, msr); 892 893 if ((i < 0 && m->guest.nr == MAX_NR_LOADSTORE_MSRS) || 894 (j < 0 && m->host.nr == MAX_NR_LOADSTORE_MSRS)) { 895 printk_once(KERN_WARNING "Not enough msr switch entries. " 896 "Can't add msr %x\n", msr); 897 return; 898 } 899 if (i < 0) { 900 i = m->guest.nr++; 901 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr); 902 } 903 m->guest.val[i].index = msr; 904 m->guest.val[i].value = guest_val; 905 906 if (entry_only) 907 return; 908 909 if (j < 0) { 910 j = m->host.nr++; 911 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr); 912 } 913 m->host.val[j].index = msr; 914 m->host.val[j].value = host_val; 915 } 916 917 static bool update_transition_efer(struct vcpu_vmx *vmx) 918 { 919 u64 guest_efer = vmx->vcpu.arch.efer; 920 u64 ignore_bits = 0; 921 int i; 922 923 /* Shadow paging assumes NX to be available. */ 924 if (!enable_ept) 925 guest_efer |= EFER_NX; 926 927 /* 928 * LMA and LME handled by hardware; SCE meaningless outside long mode. 929 */ 930 ignore_bits |= EFER_SCE; 931 #ifdef CONFIG_X86_64 932 ignore_bits |= EFER_LMA | EFER_LME; 933 /* SCE is meaningful only in long mode on Intel */ 934 if (guest_efer & EFER_LMA) 935 ignore_bits &= ~(u64)EFER_SCE; 936 #endif 937 938 /* 939 * On EPT, we can't emulate NX, so we must switch EFER atomically. 940 * On CPUs that support "load IA32_EFER", always switch EFER 941 * atomically, since it's faster than switching it manually. 942 */ 943 if (cpu_has_load_ia32_efer() || 944 (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) { 945 if (!(guest_efer & EFER_LMA)) 946 guest_efer &= ~EFER_LME; 947 if (guest_efer != host_efer) 948 add_atomic_switch_msr(vmx, MSR_EFER, 949 guest_efer, host_efer, false); 950 else 951 clear_atomic_switch_msr(vmx, MSR_EFER); 952 return false; 953 } 954 955 i = kvm_find_user_return_msr(MSR_EFER); 956 if (i < 0) 957 return false; 958 959 clear_atomic_switch_msr(vmx, MSR_EFER); 960 961 guest_efer &= ~ignore_bits; 962 guest_efer |= host_efer & ignore_bits; 963 964 vmx->guest_uret_msrs[i].data = guest_efer; 965 vmx->guest_uret_msrs[i].mask = ~ignore_bits; 966 967 return true; 968 } 969 970 #ifdef CONFIG_X86_32 971 /* 972 * On 32-bit kernels, VM exits still load the FS and GS bases from the 973 * VMCS rather than the segment table. KVM uses this helper to figure 974 * out the current bases to poke them into the VMCS before entry. 975 */ 976 static unsigned long segment_base(u16 selector) 977 { 978 struct desc_struct *table; 979 unsigned long v; 980 981 if (!(selector & ~SEGMENT_RPL_MASK)) 982 return 0; 983 984 table = get_current_gdt_ro(); 985 986 if ((selector & SEGMENT_TI_MASK) == SEGMENT_LDT) { 987 u16 ldt_selector = kvm_read_ldt(); 988 989 if (!(ldt_selector & ~SEGMENT_RPL_MASK)) 990 return 0; 991 992 table = (struct desc_struct *)segment_base(ldt_selector); 993 } 994 v = get_desc_base(&table[selector >> 3]); 995 return v; 996 } 997 #endif 998 999 static inline bool pt_can_write_msr(struct vcpu_vmx *vmx) 1000 { 1001 return vmx_pt_mode_is_host_guest() && 1002 !(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN); 1003 } 1004 1005 static inline bool pt_output_base_valid(struct kvm_vcpu *vcpu, u64 base) 1006 { 1007 /* The base must be 128-byte aligned and a legal physical address. */ 1008 return kvm_vcpu_is_legal_aligned_gpa(vcpu, base, 128); 1009 } 1010 1011 static inline void pt_load_msr(struct pt_ctx *ctx, u32 addr_range) 1012 { 1013 u32 i; 1014 1015 wrmsrl(MSR_IA32_RTIT_STATUS, ctx->status); 1016 wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base); 1017 wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask); 1018 wrmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match); 1019 for (i = 0; i < addr_range; i++) { 1020 wrmsrl(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]); 1021 wrmsrl(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]); 1022 } 1023 } 1024 1025 static inline void pt_save_msr(struct pt_ctx *ctx, u32 addr_range) 1026 { 1027 u32 i; 1028 1029 rdmsrl(MSR_IA32_RTIT_STATUS, ctx->status); 1030 rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base); 1031 rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask); 1032 rdmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match); 1033 for (i = 0; i < addr_range; i++) { 1034 rdmsrl(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]); 1035 rdmsrl(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]); 1036 } 1037 } 1038 1039 static void pt_guest_enter(struct vcpu_vmx *vmx) 1040 { 1041 if (vmx_pt_mode_is_system()) 1042 return; 1043 1044 /* 1045 * GUEST_IA32_RTIT_CTL is already set in the VMCS. 1046 * Save host state before VM entry. 1047 */ 1048 rdmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl); 1049 if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) { 1050 wrmsrl(MSR_IA32_RTIT_CTL, 0); 1051 pt_save_msr(&vmx->pt_desc.host, vmx->pt_desc.num_address_ranges); 1052 pt_load_msr(&vmx->pt_desc.guest, vmx->pt_desc.num_address_ranges); 1053 } 1054 } 1055 1056 static void pt_guest_exit(struct vcpu_vmx *vmx) 1057 { 1058 if (vmx_pt_mode_is_system()) 1059 return; 1060 1061 if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) { 1062 pt_save_msr(&vmx->pt_desc.guest, vmx->pt_desc.num_address_ranges); 1063 pt_load_msr(&vmx->pt_desc.host, vmx->pt_desc.num_address_ranges); 1064 } 1065 1066 /* 1067 * KVM requires VM_EXIT_CLEAR_IA32_RTIT_CTL to expose PT to the guest, 1068 * i.e. RTIT_CTL is always cleared on VM-Exit. Restore it if necessary. 1069 */ 1070 if (vmx->pt_desc.host.ctl) 1071 wrmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl); 1072 } 1073 1074 void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel, 1075 unsigned long fs_base, unsigned long gs_base) 1076 { 1077 if (unlikely(fs_sel != host->fs_sel)) { 1078 if (!(fs_sel & 7)) 1079 vmcs_write16(HOST_FS_SELECTOR, fs_sel); 1080 else 1081 vmcs_write16(HOST_FS_SELECTOR, 0); 1082 host->fs_sel = fs_sel; 1083 } 1084 if (unlikely(gs_sel != host->gs_sel)) { 1085 if (!(gs_sel & 7)) 1086 vmcs_write16(HOST_GS_SELECTOR, gs_sel); 1087 else 1088 vmcs_write16(HOST_GS_SELECTOR, 0); 1089 host->gs_sel = gs_sel; 1090 } 1091 if (unlikely(fs_base != host->fs_base)) { 1092 vmcs_writel(HOST_FS_BASE, fs_base); 1093 host->fs_base = fs_base; 1094 } 1095 if (unlikely(gs_base != host->gs_base)) { 1096 vmcs_writel(HOST_GS_BASE, gs_base); 1097 host->gs_base = gs_base; 1098 } 1099 } 1100 1101 void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) 1102 { 1103 struct vcpu_vmx *vmx = to_vmx(vcpu); 1104 struct vmcs_host_state *host_state; 1105 #ifdef CONFIG_X86_64 1106 int cpu = raw_smp_processor_id(); 1107 #endif 1108 unsigned long fs_base, gs_base; 1109 u16 fs_sel, gs_sel; 1110 int i; 1111 1112 vmx->req_immediate_exit = false; 1113 1114 /* 1115 * Note that guest MSRs to be saved/restored can also be changed 1116 * when guest state is loaded. This happens when guest transitions 1117 * to/from long-mode by setting MSR_EFER.LMA. 1118 */ 1119 if (!vmx->guest_uret_msrs_loaded) { 1120 vmx->guest_uret_msrs_loaded = true; 1121 for (i = 0; i < kvm_nr_uret_msrs; ++i) { 1122 if (!vmx->guest_uret_msrs[i].load_into_hardware) 1123 continue; 1124 1125 kvm_set_user_return_msr(i, 1126 vmx->guest_uret_msrs[i].data, 1127 vmx->guest_uret_msrs[i].mask); 1128 } 1129 } 1130 1131 if (vmx->nested.need_vmcs12_to_shadow_sync) 1132 nested_sync_vmcs12_to_shadow(vcpu); 1133 1134 if (vmx->guest_state_loaded) 1135 return; 1136 1137 host_state = &vmx->loaded_vmcs->host_state; 1138 1139 /* 1140 * Set host fs and gs selectors. Unfortunately, 22.2.3 does not 1141 * allow segment selectors with cpl > 0 or ti == 1. 1142 */ 1143 host_state->ldt_sel = kvm_read_ldt(); 1144 1145 #ifdef CONFIG_X86_64 1146 savesegment(ds, host_state->ds_sel); 1147 savesegment(es, host_state->es_sel); 1148 1149 gs_base = cpu_kernelmode_gs_base(cpu); 1150 if (likely(is_64bit_mm(current->mm))) { 1151 current_save_fsgs(); 1152 fs_sel = current->thread.fsindex; 1153 gs_sel = current->thread.gsindex; 1154 fs_base = current->thread.fsbase; 1155 vmx->msr_host_kernel_gs_base = current->thread.gsbase; 1156 } else { 1157 savesegment(fs, fs_sel); 1158 savesegment(gs, gs_sel); 1159 fs_base = read_msr(MSR_FS_BASE); 1160 vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE); 1161 } 1162 1163 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); 1164 #else 1165 savesegment(fs, fs_sel); 1166 savesegment(gs, gs_sel); 1167 fs_base = segment_base(fs_sel); 1168 gs_base = segment_base(gs_sel); 1169 #endif 1170 1171 vmx_set_host_fs_gs(host_state, fs_sel, gs_sel, fs_base, gs_base); 1172 vmx->guest_state_loaded = true; 1173 } 1174 1175 static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx) 1176 { 1177 struct vmcs_host_state *host_state; 1178 1179 if (!vmx->guest_state_loaded) 1180 return; 1181 1182 host_state = &vmx->loaded_vmcs->host_state; 1183 1184 ++vmx->vcpu.stat.host_state_reload; 1185 1186 #ifdef CONFIG_X86_64 1187 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); 1188 #endif 1189 if (host_state->ldt_sel || (host_state->gs_sel & 7)) { 1190 kvm_load_ldt(host_state->ldt_sel); 1191 #ifdef CONFIG_X86_64 1192 load_gs_index(host_state->gs_sel); 1193 #else 1194 loadsegment(gs, host_state->gs_sel); 1195 #endif 1196 } 1197 if (host_state->fs_sel & 7) 1198 loadsegment(fs, host_state->fs_sel); 1199 #ifdef CONFIG_X86_64 1200 if (unlikely(host_state->ds_sel | host_state->es_sel)) { 1201 loadsegment(ds, host_state->ds_sel); 1202 loadsegment(es, host_state->es_sel); 1203 } 1204 #endif 1205 invalidate_tss_limit(); 1206 #ifdef CONFIG_X86_64 1207 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); 1208 #endif 1209 load_fixmap_gdt(raw_smp_processor_id()); 1210 vmx->guest_state_loaded = false; 1211 vmx->guest_uret_msrs_loaded = false; 1212 } 1213 1214 #ifdef CONFIG_X86_64 1215 static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx) 1216 { 1217 preempt_disable(); 1218 if (vmx->guest_state_loaded) 1219 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); 1220 preempt_enable(); 1221 return vmx->msr_guest_kernel_gs_base; 1222 } 1223 1224 static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data) 1225 { 1226 preempt_disable(); 1227 if (vmx->guest_state_loaded) 1228 wrmsrl(MSR_KERNEL_GS_BASE, data); 1229 preempt_enable(); 1230 vmx->msr_guest_kernel_gs_base = data; 1231 } 1232 #endif 1233 1234 void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu, 1235 struct loaded_vmcs *buddy) 1236 { 1237 struct vcpu_vmx *vmx = to_vmx(vcpu); 1238 bool already_loaded = vmx->loaded_vmcs->cpu == cpu; 1239 struct vmcs *prev; 1240 1241 if (!already_loaded) { 1242 loaded_vmcs_clear(vmx->loaded_vmcs); 1243 local_irq_disable(); 1244 1245 /* 1246 * Ensure loaded_vmcs->cpu is read before adding loaded_vmcs to 1247 * this cpu's percpu list, otherwise it may not yet be deleted 1248 * from its previous cpu's percpu list. Pairs with the 1249 * smb_wmb() in __loaded_vmcs_clear(). 1250 */ 1251 smp_rmb(); 1252 1253 list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link, 1254 &per_cpu(loaded_vmcss_on_cpu, cpu)); 1255 local_irq_enable(); 1256 } 1257 1258 prev = per_cpu(current_vmcs, cpu); 1259 if (prev != vmx->loaded_vmcs->vmcs) { 1260 per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs; 1261 vmcs_load(vmx->loaded_vmcs->vmcs); 1262 1263 /* 1264 * No indirect branch prediction barrier needed when switching 1265 * the active VMCS within a guest, e.g. on nested VM-Enter. 1266 * The L1 VMM can protect itself with retpolines, IBPB or IBRS. 1267 */ 1268 if (!buddy || WARN_ON_ONCE(buddy->vmcs != prev)) 1269 indirect_branch_prediction_barrier(); 1270 } 1271 1272 if (!already_loaded) { 1273 void *gdt = get_current_gdt_ro(); 1274 unsigned long sysenter_esp; 1275 1276 /* 1277 * Flush all EPTP/VPID contexts, the new pCPU may have stale 1278 * TLB entries from its previous association with the vCPU. 1279 */ 1280 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 1281 1282 /* 1283 * Linux uses per-cpu TSS and GDT, so set these when switching 1284 * processors. See 22.2.4. 1285 */ 1286 vmcs_writel(HOST_TR_BASE, 1287 (unsigned long)&get_cpu_entry_area(cpu)->tss.x86_tss); 1288 vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt); /* 22.2.4 */ 1289 1290 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp); 1291 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */ 1292 1293 vmx->loaded_vmcs->cpu = cpu; 1294 } 1295 } 1296 1297 /* 1298 * Switches to specified vcpu, until a matching vcpu_put(), but assumes 1299 * vcpu mutex is already taken. 1300 */ 1301 static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 1302 { 1303 struct vcpu_vmx *vmx = to_vmx(vcpu); 1304 1305 vmx_vcpu_load_vmcs(vcpu, cpu, NULL); 1306 1307 vmx_vcpu_pi_load(vcpu, cpu); 1308 1309 vmx->host_debugctlmsr = get_debugctlmsr(); 1310 } 1311 1312 static void vmx_vcpu_put(struct kvm_vcpu *vcpu) 1313 { 1314 vmx_vcpu_pi_put(vcpu); 1315 1316 vmx_prepare_switch_to_host(to_vmx(vcpu)); 1317 } 1318 1319 bool vmx_emulation_required(struct kvm_vcpu *vcpu) 1320 { 1321 return emulate_invalid_guest_state && !vmx_guest_state_valid(vcpu); 1322 } 1323 1324 unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) 1325 { 1326 struct vcpu_vmx *vmx = to_vmx(vcpu); 1327 unsigned long rflags, save_rflags; 1328 1329 if (!kvm_register_is_available(vcpu, VCPU_EXREG_RFLAGS)) { 1330 kvm_register_mark_available(vcpu, VCPU_EXREG_RFLAGS); 1331 rflags = vmcs_readl(GUEST_RFLAGS); 1332 if (vmx->rmode.vm86_active) { 1333 rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS; 1334 save_rflags = vmx->rmode.save_rflags; 1335 rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; 1336 } 1337 vmx->rflags = rflags; 1338 } 1339 return vmx->rflags; 1340 } 1341 1342 void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) 1343 { 1344 struct vcpu_vmx *vmx = to_vmx(vcpu); 1345 unsigned long old_rflags; 1346 1347 if (is_unrestricted_guest(vcpu)) { 1348 kvm_register_mark_available(vcpu, VCPU_EXREG_RFLAGS); 1349 vmx->rflags = rflags; 1350 vmcs_writel(GUEST_RFLAGS, rflags); 1351 return; 1352 } 1353 1354 old_rflags = vmx_get_rflags(vcpu); 1355 vmx->rflags = rflags; 1356 if (vmx->rmode.vm86_active) { 1357 vmx->rmode.save_rflags = rflags; 1358 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; 1359 } 1360 vmcs_writel(GUEST_RFLAGS, rflags); 1361 1362 if ((old_rflags ^ vmx->rflags) & X86_EFLAGS_VM) 1363 vmx->emulation_required = vmx_emulation_required(vcpu); 1364 } 1365 1366 u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu) 1367 { 1368 u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); 1369 int ret = 0; 1370 1371 if (interruptibility & GUEST_INTR_STATE_STI) 1372 ret |= KVM_X86_SHADOW_INT_STI; 1373 if (interruptibility & GUEST_INTR_STATE_MOV_SS) 1374 ret |= KVM_X86_SHADOW_INT_MOV_SS; 1375 1376 return ret; 1377 } 1378 1379 void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) 1380 { 1381 u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); 1382 u32 interruptibility = interruptibility_old; 1383 1384 interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS); 1385 1386 if (mask & KVM_X86_SHADOW_INT_MOV_SS) 1387 interruptibility |= GUEST_INTR_STATE_MOV_SS; 1388 else if (mask & KVM_X86_SHADOW_INT_STI) 1389 interruptibility |= GUEST_INTR_STATE_STI; 1390 1391 if ((interruptibility != interruptibility_old)) 1392 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility); 1393 } 1394 1395 static int vmx_rtit_ctl_check(struct kvm_vcpu *vcpu, u64 data) 1396 { 1397 struct vcpu_vmx *vmx = to_vmx(vcpu); 1398 unsigned long value; 1399 1400 /* 1401 * Any MSR write that attempts to change bits marked reserved will 1402 * case a #GP fault. 1403 */ 1404 if (data & vmx->pt_desc.ctl_bitmask) 1405 return 1; 1406 1407 /* 1408 * Any attempt to modify IA32_RTIT_CTL while TraceEn is set will 1409 * result in a #GP unless the same write also clears TraceEn. 1410 */ 1411 if ((vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) && 1412 ((vmx->pt_desc.guest.ctl ^ data) & ~RTIT_CTL_TRACEEN)) 1413 return 1; 1414 1415 /* 1416 * WRMSR to IA32_RTIT_CTL that sets TraceEn but clears this bit 1417 * and FabricEn would cause #GP, if 1418 * CPUID.(EAX=14H, ECX=0):ECX.SNGLRGNOUT[bit 2] = 0 1419 */ 1420 if ((data & RTIT_CTL_TRACEEN) && !(data & RTIT_CTL_TOPA) && 1421 !(data & RTIT_CTL_FABRIC_EN) && 1422 !intel_pt_validate_cap(vmx->pt_desc.caps, 1423 PT_CAP_single_range_output)) 1424 return 1; 1425 1426 /* 1427 * MTCFreq, CycThresh and PSBFreq encodings check, any MSR write that 1428 * utilize encodings marked reserved will cause a #GP fault. 1429 */ 1430 value = intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc_periods); 1431 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc) && 1432 !test_bit((data & RTIT_CTL_MTC_RANGE) >> 1433 RTIT_CTL_MTC_RANGE_OFFSET, &value)) 1434 return 1; 1435 value = intel_pt_validate_cap(vmx->pt_desc.caps, 1436 PT_CAP_cycle_thresholds); 1437 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc) && 1438 !test_bit((data & RTIT_CTL_CYC_THRESH) >> 1439 RTIT_CTL_CYC_THRESH_OFFSET, &value)) 1440 return 1; 1441 value = intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_periods); 1442 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc) && 1443 !test_bit((data & RTIT_CTL_PSB_FREQ) >> 1444 RTIT_CTL_PSB_FREQ_OFFSET, &value)) 1445 return 1; 1446 1447 /* 1448 * If ADDRx_CFG is reserved or the encodings is >2 will 1449 * cause a #GP fault. 1450 */ 1451 value = (data & RTIT_CTL_ADDR0) >> RTIT_CTL_ADDR0_OFFSET; 1452 if ((value && (vmx->pt_desc.num_address_ranges < 1)) || (value > 2)) 1453 return 1; 1454 value = (data & RTIT_CTL_ADDR1) >> RTIT_CTL_ADDR1_OFFSET; 1455 if ((value && (vmx->pt_desc.num_address_ranges < 2)) || (value > 2)) 1456 return 1; 1457 value = (data & RTIT_CTL_ADDR2) >> RTIT_CTL_ADDR2_OFFSET; 1458 if ((value && (vmx->pt_desc.num_address_ranges < 3)) || (value > 2)) 1459 return 1; 1460 value = (data & RTIT_CTL_ADDR3) >> RTIT_CTL_ADDR3_OFFSET; 1461 if ((value && (vmx->pt_desc.num_address_ranges < 4)) || (value > 2)) 1462 return 1; 1463 1464 return 0; 1465 } 1466 1467 static bool vmx_can_emulate_instruction(struct kvm_vcpu *vcpu, void *insn, int insn_len) 1468 { 1469 /* 1470 * Emulation of instructions in SGX enclaves is impossible as RIP does 1471 * not point tthe failing instruction, and even if it did, the code 1472 * stream is inaccessible. Inject #UD instead of exiting to userspace 1473 * so that guest userspace can't DoS the guest simply by triggering 1474 * emulation (enclaves are CPL3 only). 1475 */ 1476 if (to_vmx(vcpu)->exit_reason.enclave_mode) { 1477 kvm_queue_exception(vcpu, UD_VECTOR); 1478 return false; 1479 } 1480 return true; 1481 } 1482 1483 static int skip_emulated_instruction(struct kvm_vcpu *vcpu) 1484 { 1485 union vmx_exit_reason exit_reason = to_vmx(vcpu)->exit_reason; 1486 unsigned long rip, orig_rip; 1487 u32 instr_len; 1488 1489 /* 1490 * Using VMCS.VM_EXIT_INSTRUCTION_LEN on EPT misconfig depends on 1491 * undefined behavior: Intel's SDM doesn't mandate the VMCS field be 1492 * set when EPT misconfig occurs. In practice, real hardware updates 1493 * VM_EXIT_INSTRUCTION_LEN on EPT misconfig, but other hypervisors 1494 * (namely Hyper-V) don't set it due to it being undefined behavior, 1495 * i.e. we end up advancing IP with some random value. 1496 */ 1497 if (!static_cpu_has(X86_FEATURE_HYPERVISOR) || 1498 exit_reason.basic != EXIT_REASON_EPT_MISCONFIG) { 1499 instr_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); 1500 1501 /* 1502 * Emulating an enclave's instructions isn't supported as KVM 1503 * cannot access the enclave's memory or its true RIP, e.g. the 1504 * vmcs.GUEST_RIP points at the exit point of the enclave, not 1505 * the RIP that actually triggered the VM-Exit. But, because 1506 * most instructions that cause VM-Exit will #UD in an enclave, 1507 * most instruction-based VM-Exits simply do not occur. 1508 * 1509 * There are a few exceptions, notably the debug instructions 1510 * INT1ICEBRK and INT3, as they are allowed in debug enclaves 1511 * and generate #DB/#BP as expected, which KVM might intercept. 1512 * But again, the CPU does the dirty work and saves an instr 1513 * length of zero so VMMs don't shoot themselves in the foot. 1514 * WARN if KVM tries to skip a non-zero length instruction on 1515 * a VM-Exit from an enclave. 1516 */ 1517 if (!instr_len) 1518 goto rip_updated; 1519 1520 WARN(exit_reason.enclave_mode, 1521 "KVM: skipping instruction after SGX enclave VM-Exit"); 1522 1523 orig_rip = kvm_rip_read(vcpu); 1524 rip = orig_rip + instr_len; 1525 #ifdef CONFIG_X86_64 1526 /* 1527 * We need to mask out the high 32 bits of RIP if not in 64-bit 1528 * mode, but just finding out that we are in 64-bit mode is 1529 * quite expensive. Only do it if there was a carry. 1530 */ 1531 if (unlikely(((rip ^ orig_rip) >> 31) == 3) && !is_64_bit_mode(vcpu)) 1532 rip = (u32)rip; 1533 #endif 1534 kvm_rip_write(vcpu, rip); 1535 } else { 1536 if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP)) 1537 return 0; 1538 } 1539 1540 rip_updated: 1541 /* skipping an emulated instruction also counts */ 1542 vmx_set_interrupt_shadow(vcpu, 0); 1543 1544 return 1; 1545 } 1546 1547 /* 1548 * Recognizes a pending MTF VM-exit and records the nested state for later 1549 * delivery. 1550 */ 1551 static void vmx_update_emulated_instruction(struct kvm_vcpu *vcpu) 1552 { 1553 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 1554 struct vcpu_vmx *vmx = to_vmx(vcpu); 1555 1556 if (!is_guest_mode(vcpu)) 1557 return; 1558 1559 /* 1560 * Per the SDM, MTF takes priority over debug-trap exceptions besides 1561 * T-bit traps. As instruction emulation is completed (i.e. at the 1562 * instruction boundary), any #DB exception pending delivery must be a 1563 * debug-trap. Record the pending MTF state to be delivered in 1564 * vmx_check_nested_events(). 1565 */ 1566 if (nested_cpu_has_mtf(vmcs12) && 1567 (!vcpu->arch.exception.pending || 1568 vcpu->arch.exception.nr == DB_VECTOR)) 1569 vmx->nested.mtf_pending = true; 1570 else 1571 vmx->nested.mtf_pending = false; 1572 } 1573 1574 static int vmx_skip_emulated_instruction(struct kvm_vcpu *vcpu) 1575 { 1576 vmx_update_emulated_instruction(vcpu); 1577 return skip_emulated_instruction(vcpu); 1578 } 1579 1580 static void vmx_clear_hlt(struct kvm_vcpu *vcpu) 1581 { 1582 /* 1583 * Ensure that we clear the HLT state in the VMCS. We don't need to 1584 * explicitly skip the instruction because if the HLT state is set, 1585 * then the instruction is already executing and RIP has already been 1586 * advanced. 1587 */ 1588 if (kvm_hlt_in_guest(vcpu->kvm) && 1589 vmcs_read32(GUEST_ACTIVITY_STATE) == GUEST_ACTIVITY_HLT) 1590 vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE); 1591 } 1592 1593 static void vmx_queue_exception(struct kvm_vcpu *vcpu) 1594 { 1595 struct vcpu_vmx *vmx = to_vmx(vcpu); 1596 unsigned nr = vcpu->arch.exception.nr; 1597 bool has_error_code = vcpu->arch.exception.has_error_code; 1598 u32 error_code = vcpu->arch.exception.error_code; 1599 u32 intr_info = nr | INTR_INFO_VALID_MASK; 1600 1601 kvm_deliver_exception_payload(vcpu); 1602 1603 if (has_error_code) { 1604 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); 1605 intr_info |= INTR_INFO_DELIVER_CODE_MASK; 1606 } 1607 1608 if (vmx->rmode.vm86_active) { 1609 int inc_eip = 0; 1610 if (kvm_exception_is_soft(nr)) 1611 inc_eip = vcpu->arch.event_exit_inst_len; 1612 kvm_inject_realmode_interrupt(vcpu, nr, inc_eip); 1613 return; 1614 } 1615 1616 WARN_ON_ONCE(vmx->emulation_required); 1617 1618 if (kvm_exception_is_soft(nr)) { 1619 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1620 vmx->vcpu.arch.event_exit_inst_len); 1621 intr_info |= INTR_TYPE_SOFT_EXCEPTION; 1622 } else 1623 intr_info |= INTR_TYPE_HARD_EXCEPTION; 1624 1625 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info); 1626 1627 vmx_clear_hlt(vcpu); 1628 } 1629 1630 static void vmx_setup_uret_msr(struct vcpu_vmx *vmx, unsigned int msr, 1631 bool load_into_hardware) 1632 { 1633 struct vmx_uret_msr *uret_msr; 1634 1635 uret_msr = vmx_find_uret_msr(vmx, msr); 1636 if (!uret_msr) 1637 return; 1638 1639 uret_msr->load_into_hardware = load_into_hardware; 1640 } 1641 1642 /* 1643 * Configuring user return MSRs to automatically save, load, and restore MSRs 1644 * that need to be shoved into hardware when running the guest. Note, omitting 1645 * an MSR here does _NOT_ mean it's not emulated, only that it will not be 1646 * loaded into hardware when running the guest. 1647 */ 1648 static void vmx_setup_uret_msrs(struct vcpu_vmx *vmx) 1649 { 1650 #ifdef CONFIG_X86_64 1651 bool load_syscall_msrs; 1652 1653 /* 1654 * The SYSCALL MSRs are only needed on long mode guests, and only 1655 * when EFER.SCE is set. 1656 */ 1657 load_syscall_msrs = is_long_mode(&vmx->vcpu) && 1658 (vmx->vcpu.arch.efer & EFER_SCE); 1659 1660 vmx_setup_uret_msr(vmx, MSR_STAR, load_syscall_msrs); 1661 vmx_setup_uret_msr(vmx, MSR_LSTAR, load_syscall_msrs); 1662 vmx_setup_uret_msr(vmx, MSR_SYSCALL_MASK, load_syscall_msrs); 1663 #endif 1664 vmx_setup_uret_msr(vmx, MSR_EFER, update_transition_efer(vmx)); 1665 1666 vmx_setup_uret_msr(vmx, MSR_TSC_AUX, 1667 guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP) || 1668 guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDPID)); 1669 1670 /* 1671 * hle=0, rtm=0, tsx_ctrl=1 can be found with some combinations of new 1672 * kernel and old userspace. If those guests run on a tsx=off host, do 1673 * allow guests to use TSX_CTRL, but don't change the value in hardware 1674 * so that TSX remains always disabled. 1675 */ 1676 vmx_setup_uret_msr(vmx, MSR_IA32_TSX_CTRL, boot_cpu_has(X86_FEATURE_RTM)); 1677 1678 /* 1679 * The set of MSRs to load may have changed, reload MSRs before the 1680 * next VM-Enter. 1681 */ 1682 vmx->guest_uret_msrs_loaded = false; 1683 } 1684 1685 u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu) 1686 { 1687 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 1688 1689 if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETTING)) 1690 return vmcs12->tsc_offset; 1691 1692 return 0; 1693 } 1694 1695 u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu) 1696 { 1697 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 1698 1699 if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETTING) && 1700 nested_cpu_has2(vmcs12, SECONDARY_EXEC_TSC_SCALING)) 1701 return vmcs12->tsc_multiplier; 1702 1703 return kvm_default_tsc_scaling_ratio; 1704 } 1705 1706 static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) 1707 { 1708 vmcs_write64(TSC_OFFSET, offset); 1709 } 1710 1711 static void vmx_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier) 1712 { 1713 vmcs_write64(TSC_MULTIPLIER, multiplier); 1714 } 1715 1716 /* 1717 * nested_vmx_allowed() checks whether a guest should be allowed to use VMX 1718 * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for 1719 * all guests if the "nested" module option is off, and can also be disabled 1720 * for a single guest by disabling its VMX cpuid bit. 1721 */ 1722 bool nested_vmx_allowed(struct kvm_vcpu *vcpu) 1723 { 1724 return nested && guest_cpuid_has(vcpu, X86_FEATURE_VMX); 1725 } 1726 1727 static inline bool vmx_feature_control_msr_valid(struct kvm_vcpu *vcpu, 1728 uint64_t val) 1729 { 1730 uint64_t valid_bits = to_vmx(vcpu)->msr_ia32_feature_control_valid_bits; 1731 1732 return !(val & ~valid_bits); 1733 } 1734 1735 static int vmx_get_msr_feature(struct kvm_msr_entry *msr) 1736 { 1737 switch (msr->index) { 1738 case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: 1739 if (!nested) 1740 return 1; 1741 return vmx_get_vmx_msr(&vmcs_config.nested, msr->index, &msr->data); 1742 case MSR_IA32_PERF_CAPABILITIES: 1743 msr->data = vmx_get_perf_capabilities(); 1744 return 0; 1745 default: 1746 return KVM_MSR_RET_INVALID; 1747 } 1748 } 1749 1750 /* 1751 * Reads an msr value (of 'msr_index') into 'pdata'. 1752 * Returns 0 on success, non-0 otherwise. 1753 * Assumes vcpu_load() was already called. 1754 */ 1755 static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 1756 { 1757 struct vcpu_vmx *vmx = to_vmx(vcpu); 1758 struct vmx_uret_msr *msr; 1759 u32 index; 1760 1761 switch (msr_info->index) { 1762 #ifdef CONFIG_X86_64 1763 case MSR_FS_BASE: 1764 msr_info->data = vmcs_readl(GUEST_FS_BASE); 1765 break; 1766 case MSR_GS_BASE: 1767 msr_info->data = vmcs_readl(GUEST_GS_BASE); 1768 break; 1769 case MSR_KERNEL_GS_BASE: 1770 msr_info->data = vmx_read_guest_kernel_gs_base(vmx); 1771 break; 1772 #endif 1773 case MSR_EFER: 1774 return kvm_get_msr_common(vcpu, msr_info); 1775 case MSR_IA32_TSX_CTRL: 1776 if (!msr_info->host_initiated && 1777 !(vcpu->arch.arch_capabilities & ARCH_CAP_TSX_CTRL_MSR)) 1778 return 1; 1779 goto find_uret_msr; 1780 case MSR_IA32_UMWAIT_CONTROL: 1781 if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx)) 1782 return 1; 1783 1784 msr_info->data = vmx->msr_ia32_umwait_control; 1785 break; 1786 case MSR_IA32_SPEC_CTRL: 1787 if (!msr_info->host_initiated && 1788 !guest_has_spec_ctrl_msr(vcpu)) 1789 return 1; 1790 1791 msr_info->data = to_vmx(vcpu)->spec_ctrl; 1792 break; 1793 case MSR_IA32_SYSENTER_CS: 1794 msr_info->data = vmcs_read32(GUEST_SYSENTER_CS); 1795 break; 1796 case MSR_IA32_SYSENTER_EIP: 1797 msr_info->data = vmcs_readl(GUEST_SYSENTER_EIP); 1798 break; 1799 case MSR_IA32_SYSENTER_ESP: 1800 msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP); 1801 break; 1802 case MSR_IA32_BNDCFGS: 1803 if (!kvm_mpx_supported() || 1804 (!msr_info->host_initiated && 1805 !guest_cpuid_has(vcpu, X86_FEATURE_MPX))) 1806 return 1; 1807 msr_info->data = vmcs_read64(GUEST_BNDCFGS); 1808 break; 1809 case MSR_IA32_MCG_EXT_CTL: 1810 if (!msr_info->host_initiated && 1811 !(vmx->msr_ia32_feature_control & 1812 FEAT_CTL_LMCE_ENABLED)) 1813 return 1; 1814 msr_info->data = vcpu->arch.mcg_ext_ctl; 1815 break; 1816 case MSR_IA32_FEAT_CTL: 1817 msr_info->data = vmx->msr_ia32_feature_control; 1818 break; 1819 case MSR_IA32_SGXLEPUBKEYHASH0 ... MSR_IA32_SGXLEPUBKEYHASH3: 1820 if (!msr_info->host_initiated && 1821 !guest_cpuid_has(vcpu, X86_FEATURE_SGX_LC)) 1822 return 1; 1823 msr_info->data = to_vmx(vcpu)->msr_ia32_sgxlepubkeyhash 1824 [msr_info->index - MSR_IA32_SGXLEPUBKEYHASH0]; 1825 break; 1826 case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: 1827 if (!nested_vmx_allowed(vcpu)) 1828 return 1; 1829 if (vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index, 1830 &msr_info->data)) 1831 return 1; 1832 /* 1833 * Enlightened VMCS v1 doesn't have certain VMCS fields but 1834 * instead of just ignoring the features, different Hyper-V 1835 * versions are either trying to use them and fail or do some 1836 * sanity checking and refuse to boot. Filter all unsupported 1837 * features out. 1838 */ 1839 if (!msr_info->host_initiated && 1840 vmx->nested.enlightened_vmcs_enabled) 1841 nested_evmcs_filter_control_msr(msr_info->index, 1842 &msr_info->data); 1843 break; 1844 case MSR_IA32_RTIT_CTL: 1845 if (!vmx_pt_mode_is_host_guest()) 1846 return 1; 1847 msr_info->data = vmx->pt_desc.guest.ctl; 1848 break; 1849 case MSR_IA32_RTIT_STATUS: 1850 if (!vmx_pt_mode_is_host_guest()) 1851 return 1; 1852 msr_info->data = vmx->pt_desc.guest.status; 1853 break; 1854 case MSR_IA32_RTIT_CR3_MATCH: 1855 if (!vmx_pt_mode_is_host_guest() || 1856 !intel_pt_validate_cap(vmx->pt_desc.caps, 1857 PT_CAP_cr3_filtering)) 1858 return 1; 1859 msr_info->data = vmx->pt_desc.guest.cr3_match; 1860 break; 1861 case MSR_IA32_RTIT_OUTPUT_BASE: 1862 if (!vmx_pt_mode_is_host_guest() || 1863 (!intel_pt_validate_cap(vmx->pt_desc.caps, 1864 PT_CAP_topa_output) && 1865 !intel_pt_validate_cap(vmx->pt_desc.caps, 1866 PT_CAP_single_range_output))) 1867 return 1; 1868 msr_info->data = vmx->pt_desc.guest.output_base; 1869 break; 1870 case MSR_IA32_RTIT_OUTPUT_MASK: 1871 if (!vmx_pt_mode_is_host_guest() || 1872 (!intel_pt_validate_cap(vmx->pt_desc.caps, 1873 PT_CAP_topa_output) && 1874 !intel_pt_validate_cap(vmx->pt_desc.caps, 1875 PT_CAP_single_range_output))) 1876 return 1; 1877 msr_info->data = vmx->pt_desc.guest.output_mask; 1878 break; 1879 case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: 1880 index = msr_info->index - MSR_IA32_RTIT_ADDR0_A; 1881 if (!vmx_pt_mode_is_host_guest() || 1882 (index >= 2 * vmx->pt_desc.num_address_ranges)) 1883 return 1; 1884 if (index % 2) 1885 msr_info->data = vmx->pt_desc.guest.addr_b[index / 2]; 1886 else 1887 msr_info->data = vmx->pt_desc.guest.addr_a[index / 2]; 1888 break; 1889 case MSR_IA32_DEBUGCTLMSR: 1890 msr_info->data = vmcs_read64(GUEST_IA32_DEBUGCTL); 1891 break; 1892 default: 1893 find_uret_msr: 1894 msr = vmx_find_uret_msr(vmx, msr_info->index); 1895 if (msr) { 1896 msr_info->data = msr->data; 1897 break; 1898 } 1899 return kvm_get_msr_common(vcpu, msr_info); 1900 } 1901 1902 return 0; 1903 } 1904 1905 static u64 nested_vmx_truncate_sysenter_addr(struct kvm_vcpu *vcpu, 1906 u64 data) 1907 { 1908 #ifdef CONFIG_X86_64 1909 if (!guest_cpuid_has(vcpu, X86_FEATURE_LM)) 1910 return (u32)data; 1911 #endif 1912 return (unsigned long)data; 1913 } 1914 1915 static u64 vcpu_supported_debugctl(struct kvm_vcpu *vcpu) 1916 { 1917 u64 debugctl = vmx_supported_debugctl(); 1918 1919 if (!intel_pmu_lbr_is_enabled(vcpu)) 1920 debugctl &= ~DEBUGCTLMSR_LBR_MASK; 1921 1922 if (!guest_cpuid_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT)) 1923 debugctl &= ~DEBUGCTLMSR_BUS_LOCK_DETECT; 1924 1925 return debugctl; 1926 } 1927 1928 /* 1929 * Writes msr value into the appropriate "register". 1930 * Returns 0 on success, non-0 otherwise. 1931 * Assumes vcpu_load() was already called. 1932 */ 1933 static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 1934 { 1935 struct vcpu_vmx *vmx = to_vmx(vcpu); 1936 struct vmx_uret_msr *msr; 1937 int ret = 0; 1938 u32 msr_index = msr_info->index; 1939 u64 data = msr_info->data; 1940 u32 index; 1941 1942 switch (msr_index) { 1943 case MSR_EFER: 1944 ret = kvm_set_msr_common(vcpu, msr_info); 1945 break; 1946 #ifdef CONFIG_X86_64 1947 case MSR_FS_BASE: 1948 vmx_segment_cache_clear(vmx); 1949 vmcs_writel(GUEST_FS_BASE, data); 1950 break; 1951 case MSR_GS_BASE: 1952 vmx_segment_cache_clear(vmx); 1953 vmcs_writel(GUEST_GS_BASE, data); 1954 break; 1955 case MSR_KERNEL_GS_BASE: 1956 vmx_write_guest_kernel_gs_base(vmx, data); 1957 break; 1958 #endif 1959 case MSR_IA32_SYSENTER_CS: 1960 if (is_guest_mode(vcpu)) 1961 get_vmcs12(vcpu)->guest_sysenter_cs = data; 1962 vmcs_write32(GUEST_SYSENTER_CS, data); 1963 break; 1964 case MSR_IA32_SYSENTER_EIP: 1965 if (is_guest_mode(vcpu)) { 1966 data = nested_vmx_truncate_sysenter_addr(vcpu, data); 1967 get_vmcs12(vcpu)->guest_sysenter_eip = data; 1968 } 1969 vmcs_writel(GUEST_SYSENTER_EIP, data); 1970 break; 1971 case MSR_IA32_SYSENTER_ESP: 1972 if (is_guest_mode(vcpu)) { 1973 data = nested_vmx_truncate_sysenter_addr(vcpu, data); 1974 get_vmcs12(vcpu)->guest_sysenter_esp = data; 1975 } 1976 vmcs_writel(GUEST_SYSENTER_ESP, data); 1977 break; 1978 case MSR_IA32_DEBUGCTLMSR: { 1979 u64 invalid = data & ~vcpu_supported_debugctl(vcpu); 1980 if (invalid & (DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR)) { 1981 if (report_ignored_msrs) 1982 vcpu_unimpl(vcpu, "%s: BTF|LBR in IA32_DEBUGCTLMSR 0x%llx, nop\n", 1983 __func__, data); 1984 data &= ~(DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR); 1985 invalid &= ~(DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR); 1986 } 1987 1988 if (invalid) 1989 return 1; 1990 1991 if (is_guest_mode(vcpu) && get_vmcs12(vcpu)->vm_exit_controls & 1992 VM_EXIT_SAVE_DEBUG_CONTROLS) 1993 get_vmcs12(vcpu)->guest_ia32_debugctl = data; 1994 1995 vmcs_write64(GUEST_IA32_DEBUGCTL, data); 1996 if (intel_pmu_lbr_is_enabled(vcpu) && !to_vmx(vcpu)->lbr_desc.event && 1997 (data & DEBUGCTLMSR_LBR)) 1998 intel_pmu_create_guest_lbr_event(vcpu); 1999 return 0; 2000 } 2001 case MSR_IA32_BNDCFGS: 2002 if (!kvm_mpx_supported() || 2003 (!msr_info->host_initiated && 2004 !guest_cpuid_has(vcpu, X86_FEATURE_MPX))) 2005 return 1; 2006 if (is_noncanonical_address(data & PAGE_MASK, vcpu) || 2007 (data & MSR_IA32_BNDCFGS_RSVD)) 2008 return 1; 2009 vmcs_write64(GUEST_BNDCFGS, data); 2010 break; 2011 case MSR_IA32_UMWAIT_CONTROL: 2012 if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx)) 2013 return 1; 2014 2015 /* The reserved bit 1 and non-32 bit [63:32] should be zero */ 2016 if (data & (BIT_ULL(1) | GENMASK_ULL(63, 32))) 2017 return 1; 2018 2019 vmx->msr_ia32_umwait_control = data; 2020 break; 2021 case MSR_IA32_SPEC_CTRL: 2022 if (!msr_info->host_initiated && 2023 !guest_has_spec_ctrl_msr(vcpu)) 2024 return 1; 2025 2026 if (kvm_spec_ctrl_test_value(data)) 2027 return 1; 2028 2029 vmx->spec_ctrl = data; 2030 if (!data) 2031 break; 2032 2033 /* 2034 * For non-nested: 2035 * When it's written (to non-zero) for the first time, pass 2036 * it through. 2037 * 2038 * For nested: 2039 * The handling of the MSR bitmap for L2 guests is done in 2040 * nested_vmx_prepare_msr_bitmap. We should not touch the 2041 * vmcs02.msr_bitmap here since it gets completely overwritten 2042 * in the merging. We update the vmcs01 here for L1 as well 2043 * since it will end up touching the MSR anyway now. 2044 */ 2045 vmx_disable_intercept_for_msr(vcpu, 2046 MSR_IA32_SPEC_CTRL, 2047 MSR_TYPE_RW); 2048 break; 2049 case MSR_IA32_TSX_CTRL: 2050 if (!msr_info->host_initiated && 2051 !(vcpu->arch.arch_capabilities & ARCH_CAP_TSX_CTRL_MSR)) 2052 return 1; 2053 if (data & ~(TSX_CTRL_RTM_DISABLE | TSX_CTRL_CPUID_CLEAR)) 2054 return 1; 2055 goto find_uret_msr; 2056 case MSR_IA32_PRED_CMD: 2057 if (!msr_info->host_initiated && 2058 !guest_has_pred_cmd_msr(vcpu)) 2059 return 1; 2060 2061 if (data & ~PRED_CMD_IBPB) 2062 return 1; 2063 if (!boot_cpu_has(X86_FEATURE_IBPB)) 2064 return 1; 2065 if (!data) 2066 break; 2067 2068 wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB); 2069 2070 /* 2071 * For non-nested: 2072 * When it's written (to non-zero) for the first time, pass 2073 * it through. 2074 * 2075 * For nested: 2076 * The handling of the MSR bitmap for L2 guests is done in 2077 * nested_vmx_prepare_msr_bitmap. We should not touch the 2078 * vmcs02.msr_bitmap here since it gets completely overwritten 2079 * in the merging. 2080 */ 2081 vmx_disable_intercept_for_msr(vcpu, MSR_IA32_PRED_CMD, MSR_TYPE_W); 2082 break; 2083 case MSR_IA32_CR_PAT: 2084 if (!kvm_pat_valid(data)) 2085 return 1; 2086 2087 if (is_guest_mode(vcpu) && 2088 get_vmcs12(vcpu)->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT) 2089 get_vmcs12(vcpu)->guest_ia32_pat = data; 2090 2091 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { 2092 vmcs_write64(GUEST_IA32_PAT, data); 2093 vcpu->arch.pat = data; 2094 break; 2095 } 2096 ret = kvm_set_msr_common(vcpu, msr_info); 2097 break; 2098 case MSR_IA32_TSC_ADJUST: 2099 ret = kvm_set_msr_common(vcpu, msr_info); 2100 break; 2101 case MSR_IA32_MCG_EXT_CTL: 2102 if ((!msr_info->host_initiated && 2103 !(to_vmx(vcpu)->msr_ia32_feature_control & 2104 FEAT_CTL_LMCE_ENABLED)) || 2105 (data & ~MCG_EXT_CTL_LMCE_EN)) 2106 return 1; 2107 vcpu->arch.mcg_ext_ctl = data; 2108 break; 2109 case MSR_IA32_FEAT_CTL: 2110 if (!vmx_feature_control_msr_valid(vcpu, data) || 2111 (to_vmx(vcpu)->msr_ia32_feature_control & 2112 FEAT_CTL_LOCKED && !msr_info->host_initiated)) 2113 return 1; 2114 vmx->msr_ia32_feature_control = data; 2115 if (msr_info->host_initiated && data == 0) 2116 vmx_leave_nested(vcpu); 2117 2118 /* SGX may be enabled/disabled by guest's firmware */ 2119 vmx_write_encls_bitmap(vcpu, NULL); 2120 break; 2121 case MSR_IA32_SGXLEPUBKEYHASH0 ... MSR_IA32_SGXLEPUBKEYHASH3: 2122 /* 2123 * On real hardware, the LE hash MSRs are writable before 2124 * the firmware sets bit 0 in MSR 0x7a ("activating" SGX), 2125 * at which point SGX related bits in IA32_FEATURE_CONTROL 2126 * become writable. 2127 * 2128 * KVM does not emulate SGX activation for simplicity, so 2129 * allow writes to the LE hash MSRs if IA32_FEATURE_CONTROL 2130 * is unlocked. This is technically not architectural 2131 * behavior, but it's close enough. 2132 */ 2133 if (!msr_info->host_initiated && 2134 (!guest_cpuid_has(vcpu, X86_FEATURE_SGX_LC) || 2135 ((vmx->msr_ia32_feature_control & FEAT_CTL_LOCKED) && 2136 !(vmx->msr_ia32_feature_control & FEAT_CTL_SGX_LC_ENABLED)))) 2137 return 1; 2138 vmx->msr_ia32_sgxlepubkeyhash 2139 [msr_index - MSR_IA32_SGXLEPUBKEYHASH0] = data; 2140 break; 2141 case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: 2142 if (!msr_info->host_initiated) 2143 return 1; /* they are read-only */ 2144 if (!nested_vmx_allowed(vcpu)) 2145 return 1; 2146 return vmx_set_vmx_msr(vcpu, msr_index, data); 2147 case MSR_IA32_RTIT_CTL: 2148 if (!vmx_pt_mode_is_host_guest() || 2149 vmx_rtit_ctl_check(vcpu, data) || 2150 vmx->nested.vmxon) 2151 return 1; 2152 vmcs_write64(GUEST_IA32_RTIT_CTL, data); 2153 vmx->pt_desc.guest.ctl = data; 2154 pt_update_intercept_for_msr(vcpu); 2155 break; 2156 case MSR_IA32_RTIT_STATUS: 2157 if (!pt_can_write_msr(vmx)) 2158 return 1; 2159 if (data & MSR_IA32_RTIT_STATUS_MASK) 2160 return 1; 2161 vmx->pt_desc.guest.status = data; 2162 break; 2163 case MSR_IA32_RTIT_CR3_MATCH: 2164 if (!pt_can_write_msr(vmx)) 2165 return 1; 2166 if (!intel_pt_validate_cap(vmx->pt_desc.caps, 2167 PT_CAP_cr3_filtering)) 2168 return 1; 2169 vmx->pt_desc.guest.cr3_match = data; 2170 break; 2171 case MSR_IA32_RTIT_OUTPUT_BASE: 2172 if (!pt_can_write_msr(vmx)) 2173 return 1; 2174 if (!intel_pt_validate_cap(vmx->pt_desc.caps, 2175 PT_CAP_topa_output) && 2176 !intel_pt_validate_cap(vmx->pt_desc.caps, 2177 PT_CAP_single_range_output)) 2178 return 1; 2179 if (!pt_output_base_valid(vcpu, data)) 2180 return 1; 2181 vmx->pt_desc.guest.output_base = data; 2182 break; 2183 case MSR_IA32_RTIT_OUTPUT_MASK: 2184 if (!pt_can_write_msr(vmx)) 2185 return 1; 2186 if (!intel_pt_validate_cap(vmx->pt_desc.caps, 2187 PT_CAP_topa_output) && 2188 !intel_pt_validate_cap(vmx->pt_desc.caps, 2189 PT_CAP_single_range_output)) 2190 return 1; 2191 vmx->pt_desc.guest.output_mask = data; 2192 break; 2193 case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: 2194 if (!pt_can_write_msr(vmx)) 2195 return 1; 2196 index = msr_info->index - MSR_IA32_RTIT_ADDR0_A; 2197 if (index >= 2 * vmx->pt_desc.num_address_ranges) 2198 return 1; 2199 if (is_noncanonical_address(data, vcpu)) 2200 return 1; 2201 if (index % 2) 2202 vmx->pt_desc.guest.addr_b[index / 2] = data; 2203 else 2204 vmx->pt_desc.guest.addr_a[index / 2] = data; 2205 break; 2206 case MSR_IA32_PERF_CAPABILITIES: 2207 if (data && !vcpu_to_pmu(vcpu)->version) 2208 return 1; 2209 if (data & PMU_CAP_LBR_FMT) { 2210 if ((data & PMU_CAP_LBR_FMT) != 2211 (vmx_get_perf_capabilities() & PMU_CAP_LBR_FMT)) 2212 return 1; 2213 if (!intel_pmu_lbr_is_compatible(vcpu)) 2214 return 1; 2215 } 2216 ret = kvm_set_msr_common(vcpu, msr_info); 2217 break; 2218 2219 default: 2220 find_uret_msr: 2221 msr = vmx_find_uret_msr(vmx, msr_index); 2222 if (msr) 2223 ret = vmx_set_guest_uret_msr(vmx, msr, data); 2224 else 2225 ret = kvm_set_msr_common(vcpu, msr_info); 2226 } 2227 2228 return ret; 2229 } 2230 2231 static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) 2232 { 2233 unsigned long guest_owned_bits; 2234 2235 kvm_register_mark_available(vcpu, reg); 2236 2237 switch (reg) { 2238 case VCPU_REGS_RSP: 2239 vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP); 2240 break; 2241 case VCPU_REGS_RIP: 2242 vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP); 2243 break; 2244 case VCPU_EXREG_PDPTR: 2245 if (enable_ept) 2246 ept_save_pdptrs(vcpu); 2247 break; 2248 case VCPU_EXREG_CR0: 2249 guest_owned_bits = vcpu->arch.cr0_guest_owned_bits; 2250 2251 vcpu->arch.cr0 &= ~guest_owned_bits; 2252 vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & guest_owned_bits; 2253 break; 2254 case VCPU_EXREG_CR3: 2255 /* 2256 * When intercepting CR3 loads, e.g. for shadowing paging, KVM's 2257 * CR3 is loaded into hardware, not the guest's CR3. 2258 */ 2259 if (!(exec_controls_get(to_vmx(vcpu)) & CPU_BASED_CR3_LOAD_EXITING)) 2260 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); 2261 break; 2262 case VCPU_EXREG_CR4: 2263 guest_owned_bits = vcpu->arch.cr4_guest_owned_bits; 2264 2265 vcpu->arch.cr4 &= ~guest_owned_bits; 2266 vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & guest_owned_bits; 2267 break; 2268 default: 2269 KVM_BUG_ON(1, vcpu->kvm); 2270 break; 2271 } 2272 } 2273 2274 static __init int cpu_has_kvm_support(void) 2275 { 2276 return cpu_has_vmx(); 2277 } 2278 2279 static __init int vmx_disabled_by_bios(void) 2280 { 2281 return !boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) || 2282 !boot_cpu_has(X86_FEATURE_VMX); 2283 } 2284 2285 static int kvm_cpu_vmxon(u64 vmxon_pointer) 2286 { 2287 u64 msr; 2288 2289 cr4_set_bits(X86_CR4_VMXE); 2290 2291 asm_volatile_goto("1: vmxon %[vmxon_pointer]\n\t" 2292 _ASM_EXTABLE(1b, %l[fault]) 2293 : : [vmxon_pointer] "m"(vmxon_pointer) 2294 : : fault); 2295 return 0; 2296 2297 fault: 2298 WARN_ONCE(1, "VMXON faulted, MSR_IA32_FEAT_CTL (0x3a) = 0x%llx\n", 2299 rdmsrl_safe(MSR_IA32_FEAT_CTL, &msr) ? 0xdeadbeef : msr); 2300 cr4_clear_bits(X86_CR4_VMXE); 2301 2302 return -EFAULT; 2303 } 2304 2305 static int hardware_enable(void) 2306 { 2307 int cpu = raw_smp_processor_id(); 2308 u64 phys_addr = __pa(per_cpu(vmxarea, cpu)); 2309 int r; 2310 2311 if (cr4_read_shadow() & X86_CR4_VMXE) 2312 return -EBUSY; 2313 2314 /* 2315 * This can happen if we hot-added a CPU but failed to allocate 2316 * VP assist page for it. 2317 */ 2318 if (static_branch_unlikely(&enable_evmcs) && 2319 !hv_get_vp_assist_page(cpu)) 2320 return -EFAULT; 2321 2322 intel_pt_handle_vmx(1); 2323 2324 r = kvm_cpu_vmxon(phys_addr); 2325 if (r) { 2326 intel_pt_handle_vmx(0); 2327 return r; 2328 } 2329 2330 if (enable_ept) 2331 ept_sync_global(); 2332 2333 return 0; 2334 } 2335 2336 static void vmclear_local_loaded_vmcss(void) 2337 { 2338 int cpu = raw_smp_processor_id(); 2339 struct loaded_vmcs *v, *n; 2340 2341 list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu), 2342 loaded_vmcss_on_cpu_link) 2343 __loaded_vmcs_clear(v); 2344 } 2345 2346 static void hardware_disable(void) 2347 { 2348 vmclear_local_loaded_vmcss(); 2349 2350 if (cpu_vmxoff()) 2351 kvm_spurious_fault(); 2352 2353 intel_pt_handle_vmx(0); 2354 } 2355 2356 /* 2357 * There is no X86_FEATURE for SGX yet, but anyway we need to query CPUID 2358 * directly instead of going through cpu_has(), to ensure KVM is trapping 2359 * ENCLS whenever it's supported in hardware. It does not matter whether 2360 * the host OS supports or has enabled SGX. 2361 */ 2362 static bool cpu_has_sgx(void) 2363 { 2364 return cpuid_eax(0) >= 0x12 && (cpuid_eax(0x12) & BIT(0)); 2365 } 2366 2367 static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, 2368 u32 msr, u32 *result) 2369 { 2370 u32 vmx_msr_low, vmx_msr_high; 2371 u32 ctl = ctl_min | ctl_opt; 2372 2373 rdmsr(msr, vmx_msr_low, vmx_msr_high); 2374 2375 ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */ 2376 ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */ 2377 2378 /* Ensure minimum (required) set of control bits are supported. */ 2379 if (ctl_min & ~ctl) 2380 return -EIO; 2381 2382 *result = ctl; 2383 return 0; 2384 } 2385 2386 static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf, 2387 struct vmx_capability *vmx_cap) 2388 { 2389 u32 vmx_msr_low, vmx_msr_high; 2390 u32 min, opt, min2, opt2; 2391 u32 _pin_based_exec_control = 0; 2392 u32 _cpu_based_exec_control = 0; 2393 u32 _cpu_based_2nd_exec_control = 0; 2394 u32 _vmexit_control = 0; 2395 u32 _vmentry_control = 0; 2396 2397 memset(vmcs_conf, 0, sizeof(*vmcs_conf)); 2398 min = CPU_BASED_HLT_EXITING | 2399 #ifdef CONFIG_X86_64 2400 CPU_BASED_CR8_LOAD_EXITING | 2401 CPU_BASED_CR8_STORE_EXITING | 2402 #endif 2403 CPU_BASED_CR3_LOAD_EXITING | 2404 CPU_BASED_CR3_STORE_EXITING | 2405 CPU_BASED_UNCOND_IO_EXITING | 2406 CPU_BASED_MOV_DR_EXITING | 2407 CPU_BASED_USE_TSC_OFFSETTING | 2408 CPU_BASED_MWAIT_EXITING | 2409 CPU_BASED_MONITOR_EXITING | 2410 CPU_BASED_INVLPG_EXITING | 2411 CPU_BASED_RDPMC_EXITING; 2412 2413 opt = CPU_BASED_TPR_SHADOW | 2414 CPU_BASED_USE_MSR_BITMAPS | 2415 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; 2416 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS, 2417 &_cpu_based_exec_control) < 0) 2418 return -EIO; 2419 #ifdef CONFIG_X86_64 2420 if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW)) 2421 _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING & 2422 ~CPU_BASED_CR8_STORE_EXITING; 2423 #endif 2424 if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) { 2425 min2 = 0; 2426 opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2427 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2428 SECONDARY_EXEC_WBINVD_EXITING | 2429 SECONDARY_EXEC_ENABLE_VPID | 2430 SECONDARY_EXEC_ENABLE_EPT | 2431 SECONDARY_EXEC_UNRESTRICTED_GUEST | 2432 SECONDARY_EXEC_PAUSE_LOOP_EXITING | 2433 SECONDARY_EXEC_DESC | 2434 SECONDARY_EXEC_ENABLE_RDTSCP | 2435 SECONDARY_EXEC_ENABLE_INVPCID | 2436 SECONDARY_EXEC_APIC_REGISTER_VIRT | 2437 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2438 SECONDARY_EXEC_SHADOW_VMCS | 2439 SECONDARY_EXEC_XSAVES | 2440 SECONDARY_EXEC_RDSEED_EXITING | 2441 SECONDARY_EXEC_RDRAND_EXITING | 2442 SECONDARY_EXEC_ENABLE_PML | 2443 SECONDARY_EXEC_TSC_SCALING | 2444 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE | 2445 SECONDARY_EXEC_PT_USE_GPA | 2446 SECONDARY_EXEC_PT_CONCEAL_VMX | 2447 SECONDARY_EXEC_ENABLE_VMFUNC | 2448 SECONDARY_EXEC_BUS_LOCK_DETECTION; 2449 if (cpu_has_sgx()) 2450 opt2 |= SECONDARY_EXEC_ENCLS_EXITING; 2451 if (adjust_vmx_controls(min2, opt2, 2452 MSR_IA32_VMX_PROCBASED_CTLS2, 2453 &_cpu_based_2nd_exec_control) < 0) 2454 return -EIO; 2455 } 2456 #ifndef CONFIG_X86_64 2457 if (!(_cpu_based_2nd_exec_control & 2458 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) 2459 _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW; 2460 #endif 2461 2462 if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW)) 2463 _cpu_based_2nd_exec_control &= ~( 2464 SECONDARY_EXEC_APIC_REGISTER_VIRT | 2465 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2466 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); 2467 2468 rdmsr_safe(MSR_IA32_VMX_EPT_VPID_CAP, 2469 &vmx_cap->ept, &vmx_cap->vpid); 2470 2471 if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) { 2472 /* CR3 accesses and invlpg don't need to cause VM Exits when EPT 2473 enabled */ 2474 _cpu_based_exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING | 2475 CPU_BASED_CR3_STORE_EXITING | 2476 CPU_BASED_INVLPG_EXITING); 2477 } else if (vmx_cap->ept) { 2478 vmx_cap->ept = 0; 2479 pr_warn_once("EPT CAP should not exist if not support " 2480 "1-setting enable EPT VM-execution control\n"); 2481 } 2482 if (!(_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_VPID) && 2483 vmx_cap->vpid) { 2484 vmx_cap->vpid = 0; 2485 pr_warn_once("VPID CAP should not exist if not support " 2486 "1-setting enable VPID VM-execution control\n"); 2487 } 2488 2489 min = VM_EXIT_SAVE_DEBUG_CONTROLS | VM_EXIT_ACK_INTR_ON_EXIT; 2490 #ifdef CONFIG_X86_64 2491 min |= VM_EXIT_HOST_ADDR_SPACE_SIZE; 2492 #endif 2493 opt = VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2494 VM_EXIT_LOAD_IA32_PAT | 2495 VM_EXIT_LOAD_IA32_EFER | 2496 VM_EXIT_CLEAR_BNDCFGS | 2497 VM_EXIT_PT_CONCEAL_PIP | 2498 VM_EXIT_CLEAR_IA32_RTIT_CTL; 2499 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS, 2500 &_vmexit_control) < 0) 2501 return -EIO; 2502 2503 min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING; 2504 opt = PIN_BASED_VIRTUAL_NMIS | PIN_BASED_POSTED_INTR | 2505 PIN_BASED_VMX_PREEMPTION_TIMER; 2506 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS, 2507 &_pin_based_exec_control) < 0) 2508 return -EIO; 2509 2510 if (cpu_has_broken_vmx_preemption_timer()) 2511 _pin_based_exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER; 2512 if (!(_cpu_based_2nd_exec_control & 2513 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)) 2514 _pin_based_exec_control &= ~PIN_BASED_POSTED_INTR; 2515 2516 min = VM_ENTRY_LOAD_DEBUG_CONTROLS; 2517 opt = VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | 2518 VM_ENTRY_LOAD_IA32_PAT | 2519 VM_ENTRY_LOAD_IA32_EFER | 2520 VM_ENTRY_LOAD_BNDCFGS | 2521 VM_ENTRY_PT_CONCEAL_PIP | 2522 VM_ENTRY_LOAD_IA32_RTIT_CTL; 2523 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS, 2524 &_vmentry_control) < 0) 2525 return -EIO; 2526 2527 /* 2528 * Some cpus support VM_{ENTRY,EXIT}_IA32_PERF_GLOBAL_CTRL but they 2529 * can't be used due to an errata where VM Exit may incorrectly clear 2530 * IA32_PERF_GLOBAL_CTRL[34:32]. Workaround the errata by using the 2531 * MSR load mechanism to switch IA32_PERF_GLOBAL_CTRL. 2532 */ 2533 if (boot_cpu_data.x86 == 0x6) { 2534 switch (boot_cpu_data.x86_model) { 2535 case 26: /* AAK155 */ 2536 case 30: /* AAP115 */ 2537 case 37: /* AAT100 */ 2538 case 44: /* BC86,AAY89,BD102 */ 2539 case 46: /* BA97 */ 2540 _vmentry_control &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL; 2541 _vmexit_control &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL; 2542 pr_warn_once("kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL " 2543 "does not work properly. Using workaround\n"); 2544 break; 2545 default: 2546 break; 2547 } 2548 } 2549 2550 2551 rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high); 2552 2553 /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */ 2554 if ((vmx_msr_high & 0x1fff) > PAGE_SIZE) 2555 return -EIO; 2556 2557 #ifdef CONFIG_X86_64 2558 /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */ 2559 if (vmx_msr_high & (1u<<16)) 2560 return -EIO; 2561 #endif 2562 2563 /* Require Write-Back (WB) memory type for VMCS accesses. */ 2564 if (((vmx_msr_high >> 18) & 15) != 6) 2565 return -EIO; 2566 2567 vmcs_conf->size = vmx_msr_high & 0x1fff; 2568 vmcs_conf->order = get_order(vmcs_conf->size); 2569 vmcs_conf->basic_cap = vmx_msr_high & ~0x1fff; 2570 2571 vmcs_conf->revision_id = vmx_msr_low; 2572 2573 vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control; 2574 vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control; 2575 vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control; 2576 vmcs_conf->vmexit_ctrl = _vmexit_control; 2577 vmcs_conf->vmentry_ctrl = _vmentry_control; 2578 2579 #if IS_ENABLED(CONFIG_HYPERV) 2580 if (enlightened_vmcs) 2581 evmcs_sanitize_exec_ctrls(vmcs_conf); 2582 #endif 2583 2584 return 0; 2585 } 2586 2587 struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags) 2588 { 2589 int node = cpu_to_node(cpu); 2590 struct page *pages; 2591 struct vmcs *vmcs; 2592 2593 pages = __alloc_pages_node(node, flags, vmcs_config.order); 2594 if (!pages) 2595 return NULL; 2596 vmcs = page_address(pages); 2597 memset(vmcs, 0, vmcs_config.size); 2598 2599 /* KVM supports Enlightened VMCS v1 only */ 2600 if (static_branch_unlikely(&enable_evmcs)) 2601 vmcs->hdr.revision_id = KVM_EVMCS_VERSION; 2602 else 2603 vmcs->hdr.revision_id = vmcs_config.revision_id; 2604 2605 if (shadow) 2606 vmcs->hdr.shadow_vmcs = 1; 2607 return vmcs; 2608 } 2609 2610 void free_vmcs(struct vmcs *vmcs) 2611 { 2612 free_pages((unsigned long)vmcs, vmcs_config.order); 2613 } 2614 2615 /* 2616 * Free a VMCS, but before that VMCLEAR it on the CPU where it was last loaded 2617 */ 2618 void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs) 2619 { 2620 if (!loaded_vmcs->vmcs) 2621 return; 2622 loaded_vmcs_clear(loaded_vmcs); 2623 free_vmcs(loaded_vmcs->vmcs); 2624 loaded_vmcs->vmcs = NULL; 2625 if (loaded_vmcs->msr_bitmap) 2626 free_page((unsigned long)loaded_vmcs->msr_bitmap); 2627 WARN_ON(loaded_vmcs->shadow_vmcs != NULL); 2628 } 2629 2630 int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs) 2631 { 2632 loaded_vmcs->vmcs = alloc_vmcs(false); 2633 if (!loaded_vmcs->vmcs) 2634 return -ENOMEM; 2635 2636 vmcs_clear(loaded_vmcs->vmcs); 2637 2638 loaded_vmcs->shadow_vmcs = NULL; 2639 loaded_vmcs->hv_timer_soft_disabled = false; 2640 loaded_vmcs->cpu = -1; 2641 loaded_vmcs->launched = 0; 2642 2643 if (cpu_has_vmx_msr_bitmap()) { 2644 loaded_vmcs->msr_bitmap = (unsigned long *) 2645 __get_free_page(GFP_KERNEL_ACCOUNT); 2646 if (!loaded_vmcs->msr_bitmap) 2647 goto out_vmcs; 2648 memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE); 2649 } 2650 2651 memset(&loaded_vmcs->host_state, 0, sizeof(struct vmcs_host_state)); 2652 memset(&loaded_vmcs->controls_shadow, 0, 2653 sizeof(struct vmcs_controls_shadow)); 2654 2655 return 0; 2656 2657 out_vmcs: 2658 free_loaded_vmcs(loaded_vmcs); 2659 return -ENOMEM; 2660 } 2661 2662 static void free_kvm_area(void) 2663 { 2664 int cpu; 2665 2666 for_each_possible_cpu(cpu) { 2667 free_vmcs(per_cpu(vmxarea, cpu)); 2668 per_cpu(vmxarea, cpu) = NULL; 2669 } 2670 } 2671 2672 static __init int alloc_kvm_area(void) 2673 { 2674 int cpu; 2675 2676 for_each_possible_cpu(cpu) { 2677 struct vmcs *vmcs; 2678 2679 vmcs = alloc_vmcs_cpu(false, cpu, GFP_KERNEL); 2680 if (!vmcs) { 2681 free_kvm_area(); 2682 return -ENOMEM; 2683 } 2684 2685 /* 2686 * When eVMCS is enabled, alloc_vmcs_cpu() sets 2687 * vmcs->revision_id to KVM_EVMCS_VERSION instead of 2688 * revision_id reported by MSR_IA32_VMX_BASIC. 2689 * 2690 * However, even though not explicitly documented by 2691 * TLFS, VMXArea passed as VMXON argument should 2692 * still be marked with revision_id reported by 2693 * physical CPU. 2694 */ 2695 if (static_branch_unlikely(&enable_evmcs)) 2696 vmcs->hdr.revision_id = vmcs_config.revision_id; 2697 2698 per_cpu(vmxarea, cpu) = vmcs; 2699 } 2700 return 0; 2701 } 2702 2703 static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg, 2704 struct kvm_segment *save) 2705 { 2706 if (!emulate_invalid_guest_state) { 2707 /* 2708 * CS and SS RPL should be equal during guest entry according 2709 * to VMX spec, but in reality it is not always so. Since vcpu 2710 * is in the middle of the transition from real mode to 2711 * protected mode it is safe to assume that RPL 0 is a good 2712 * default value. 2713 */ 2714 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS) 2715 save->selector &= ~SEGMENT_RPL_MASK; 2716 save->dpl = save->selector & SEGMENT_RPL_MASK; 2717 save->s = 1; 2718 } 2719 __vmx_set_segment(vcpu, save, seg); 2720 } 2721 2722 static void enter_pmode(struct kvm_vcpu *vcpu) 2723 { 2724 unsigned long flags; 2725 struct vcpu_vmx *vmx = to_vmx(vcpu); 2726 2727 /* 2728 * Update real mode segment cache. It may be not up-to-date if segment 2729 * register was written while vcpu was in a guest mode. 2730 */ 2731 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); 2732 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); 2733 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); 2734 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); 2735 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); 2736 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); 2737 2738 vmx->rmode.vm86_active = 0; 2739 2740 __vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); 2741 2742 flags = vmcs_readl(GUEST_RFLAGS); 2743 flags &= RMODE_GUEST_OWNED_EFLAGS_BITS; 2744 flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; 2745 vmcs_writel(GUEST_RFLAGS, flags); 2746 2747 vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) | 2748 (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME)); 2749 2750 vmx_update_exception_bitmap(vcpu); 2751 2752 fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); 2753 fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); 2754 fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); 2755 fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); 2756 fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); 2757 fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); 2758 } 2759 2760 static void fix_rmode_seg(int seg, struct kvm_segment *save) 2761 { 2762 const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; 2763 struct kvm_segment var = *save; 2764 2765 var.dpl = 0x3; 2766 if (seg == VCPU_SREG_CS) 2767 var.type = 0x3; 2768 2769 if (!emulate_invalid_guest_state) { 2770 var.selector = var.base >> 4; 2771 var.base = var.base & 0xffff0; 2772 var.limit = 0xffff; 2773 var.g = 0; 2774 var.db = 0; 2775 var.present = 1; 2776 var.s = 1; 2777 var.l = 0; 2778 var.unusable = 0; 2779 var.type = 0x3; 2780 var.avl = 0; 2781 if (save->base & 0xf) 2782 printk_once(KERN_WARNING "kvm: segment base is not " 2783 "paragraph aligned when entering " 2784 "protected mode (seg=%d)", seg); 2785 } 2786 2787 vmcs_write16(sf->selector, var.selector); 2788 vmcs_writel(sf->base, var.base); 2789 vmcs_write32(sf->limit, var.limit); 2790 vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var)); 2791 } 2792 2793 static void enter_rmode(struct kvm_vcpu *vcpu) 2794 { 2795 unsigned long flags; 2796 struct vcpu_vmx *vmx = to_vmx(vcpu); 2797 struct kvm_vmx *kvm_vmx = to_kvm_vmx(vcpu->kvm); 2798 2799 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); 2800 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); 2801 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); 2802 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); 2803 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); 2804 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); 2805 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); 2806 2807 vmx->rmode.vm86_active = 1; 2808 2809 /* 2810 * Very old userspace does not call KVM_SET_TSS_ADDR before entering 2811 * vcpu. Warn the user that an update is overdue. 2812 */ 2813 if (!kvm_vmx->tss_addr) 2814 printk_once(KERN_WARNING "kvm: KVM_SET_TSS_ADDR need to be " 2815 "called before entering vcpu\n"); 2816 2817 vmx_segment_cache_clear(vmx); 2818 2819 vmcs_writel(GUEST_TR_BASE, kvm_vmx->tss_addr); 2820 vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1); 2821 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); 2822 2823 flags = vmcs_readl(GUEST_RFLAGS); 2824 vmx->rmode.save_rflags = flags; 2825 2826 flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; 2827 2828 vmcs_writel(GUEST_RFLAGS, flags); 2829 vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME); 2830 vmx_update_exception_bitmap(vcpu); 2831 2832 fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); 2833 fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); 2834 fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); 2835 fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); 2836 fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); 2837 fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); 2838 } 2839 2840 int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) 2841 { 2842 struct vcpu_vmx *vmx = to_vmx(vcpu); 2843 struct vmx_uret_msr *msr = vmx_find_uret_msr(vmx, MSR_EFER); 2844 2845 /* Nothing to do if hardware doesn't support EFER. */ 2846 if (!msr) 2847 return 0; 2848 2849 vcpu->arch.efer = efer; 2850 if (efer & EFER_LMA) { 2851 vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); 2852 msr->data = efer; 2853 } else { 2854 vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); 2855 2856 msr->data = efer & ~EFER_LME; 2857 } 2858 vmx_setup_uret_msrs(vmx); 2859 return 0; 2860 } 2861 2862 #ifdef CONFIG_X86_64 2863 2864 static void enter_lmode(struct kvm_vcpu *vcpu) 2865 { 2866 u32 guest_tr_ar; 2867 2868 vmx_segment_cache_clear(to_vmx(vcpu)); 2869 2870 guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES); 2871 if ((guest_tr_ar & VMX_AR_TYPE_MASK) != VMX_AR_TYPE_BUSY_64_TSS) { 2872 pr_debug_ratelimited("%s: tss fixup for long mode. \n", 2873 __func__); 2874 vmcs_write32(GUEST_TR_AR_BYTES, 2875 (guest_tr_ar & ~VMX_AR_TYPE_MASK) 2876 | VMX_AR_TYPE_BUSY_64_TSS); 2877 } 2878 vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA); 2879 } 2880 2881 static void exit_lmode(struct kvm_vcpu *vcpu) 2882 { 2883 vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); 2884 vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA); 2885 } 2886 2887 #endif 2888 2889 static void vmx_flush_tlb_all(struct kvm_vcpu *vcpu) 2890 { 2891 struct vcpu_vmx *vmx = to_vmx(vcpu); 2892 2893 /* 2894 * INVEPT must be issued when EPT is enabled, irrespective of VPID, as 2895 * the CPU is not required to invalidate guest-physical mappings on 2896 * VM-Entry, even if VPID is disabled. Guest-physical mappings are 2897 * associated with the root EPT structure and not any particular VPID 2898 * (INVVPID also isn't required to invalidate guest-physical mappings). 2899 */ 2900 if (enable_ept) { 2901 ept_sync_global(); 2902 } else if (enable_vpid) { 2903 if (cpu_has_vmx_invvpid_global()) { 2904 vpid_sync_vcpu_global(); 2905 } else { 2906 vpid_sync_vcpu_single(vmx->vpid); 2907 vpid_sync_vcpu_single(vmx->nested.vpid02); 2908 } 2909 } 2910 } 2911 2912 static inline int vmx_get_current_vpid(struct kvm_vcpu *vcpu) 2913 { 2914 if (is_guest_mode(vcpu)) 2915 return nested_get_vpid02(vcpu); 2916 return to_vmx(vcpu)->vpid; 2917 } 2918 2919 static void vmx_flush_tlb_current(struct kvm_vcpu *vcpu) 2920 { 2921 struct kvm_mmu *mmu = vcpu->arch.mmu; 2922 u64 root_hpa = mmu->root_hpa; 2923 2924 /* No flush required if the current context is invalid. */ 2925 if (!VALID_PAGE(root_hpa)) 2926 return; 2927 2928 if (enable_ept) 2929 ept_sync_context(construct_eptp(vcpu, root_hpa, 2930 mmu->shadow_root_level)); 2931 else 2932 vpid_sync_context(vmx_get_current_vpid(vcpu)); 2933 } 2934 2935 static void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr) 2936 { 2937 /* 2938 * vpid_sync_vcpu_addr() is a nop if vpid==0, see the comment in 2939 * vmx_flush_tlb_guest() for an explanation of why this is ok. 2940 */ 2941 vpid_sync_vcpu_addr(vmx_get_current_vpid(vcpu), addr); 2942 } 2943 2944 static void vmx_flush_tlb_guest(struct kvm_vcpu *vcpu) 2945 { 2946 /* 2947 * vpid_sync_context() is a nop if vpid==0, e.g. if enable_vpid==0 or a 2948 * vpid couldn't be allocated for this vCPU. VM-Enter and VM-Exit are 2949 * required to flush GVA->{G,H}PA mappings from the TLB if vpid is 2950 * disabled (VM-Enter with vpid enabled and vpid==0 is disallowed), 2951 * i.e. no explicit INVVPID is necessary. 2952 */ 2953 vpid_sync_context(vmx_get_current_vpid(vcpu)); 2954 } 2955 2956 void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu) 2957 { 2958 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 2959 2960 if (!kvm_register_is_dirty(vcpu, VCPU_EXREG_PDPTR)) 2961 return; 2962 2963 if (is_pae_paging(vcpu)) { 2964 vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]); 2965 vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]); 2966 vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]); 2967 vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]); 2968 } 2969 } 2970 2971 void ept_save_pdptrs(struct kvm_vcpu *vcpu) 2972 { 2973 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 2974 2975 if (WARN_ON_ONCE(!is_pae_paging(vcpu))) 2976 return; 2977 2978 mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0); 2979 mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1); 2980 mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2); 2981 mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3); 2982 2983 kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR); 2984 } 2985 2986 #define CR3_EXITING_BITS (CPU_BASED_CR3_LOAD_EXITING | \ 2987 CPU_BASED_CR3_STORE_EXITING) 2988 2989 void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 2990 { 2991 struct vcpu_vmx *vmx = to_vmx(vcpu); 2992 unsigned long hw_cr0, old_cr0_pg; 2993 u32 tmp; 2994 2995 old_cr0_pg = kvm_read_cr0_bits(vcpu, X86_CR0_PG); 2996 2997 hw_cr0 = (cr0 & ~KVM_VM_CR0_ALWAYS_OFF); 2998 if (is_unrestricted_guest(vcpu)) 2999 hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST; 3000 else { 3001 hw_cr0 |= KVM_VM_CR0_ALWAYS_ON; 3002 if (!enable_ept) 3003 hw_cr0 |= X86_CR0_WP; 3004 3005 if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE)) 3006 enter_pmode(vcpu); 3007 3008 if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE)) 3009 enter_rmode(vcpu); 3010 } 3011 3012 vmcs_writel(CR0_READ_SHADOW, cr0); 3013 vmcs_writel(GUEST_CR0, hw_cr0); 3014 vcpu->arch.cr0 = cr0; 3015 kvm_register_mark_available(vcpu, VCPU_EXREG_CR0); 3016 3017 #ifdef CONFIG_X86_64 3018 if (vcpu->arch.efer & EFER_LME) { 3019 if (!old_cr0_pg && (cr0 & X86_CR0_PG)) 3020 enter_lmode(vcpu); 3021 else if (old_cr0_pg && !(cr0 & X86_CR0_PG)) 3022 exit_lmode(vcpu); 3023 } 3024 #endif 3025 3026 if (enable_ept && !is_unrestricted_guest(vcpu)) { 3027 /* 3028 * Ensure KVM has an up-to-date snapshot of the guest's CR3. If 3029 * the below code _enables_ CR3 exiting, vmx_cache_reg() will 3030 * (correctly) stop reading vmcs.GUEST_CR3 because it thinks 3031 * KVM's CR3 is installed. 3032 */ 3033 if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3)) 3034 vmx_cache_reg(vcpu, VCPU_EXREG_CR3); 3035 3036 /* 3037 * When running with EPT but not unrestricted guest, KVM must 3038 * intercept CR3 accesses when paging is _disabled_. This is 3039 * necessary because restricted guests can't actually run with 3040 * paging disabled, and so KVM stuffs its own CR3 in order to 3041 * run the guest when identity mapped page tables. 3042 * 3043 * Do _NOT_ check the old CR0.PG, e.g. to optimize away the 3044 * update, it may be stale with respect to CR3 interception, 3045 * e.g. after nested VM-Enter. 3046 * 3047 * Lastly, honor L1's desires, i.e. intercept CR3 loads and/or 3048 * stores to forward them to L1, even if KVM does not need to 3049 * intercept them to preserve its identity mapped page tables. 3050 */ 3051 if (!(cr0 & X86_CR0_PG)) { 3052 exec_controls_setbit(vmx, CR3_EXITING_BITS); 3053 } else if (!is_guest_mode(vcpu)) { 3054 exec_controls_clearbit(vmx, CR3_EXITING_BITS); 3055 } else { 3056 tmp = exec_controls_get(vmx); 3057 tmp &= ~CR3_EXITING_BITS; 3058 tmp |= get_vmcs12(vcpu)->cpu_based_vm_exec_control & CR3_EXITING_BITS; 3059 exec_controls_set(vmx, tmp); 3060 } 3061 3062 /* Note, vmx_set_cr4() consumes the new vcpu->arch.cr0. */ 3063 if ((old_cr0_pg ^ cr0) & X86_CR0_PG) 3064 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu)); 3065 } 3066 3067 /* depends on vcpu->arch.cr0 to be set to a new value */ 3068 vmx->emulation_required = vmx_emulation_required(vcpu); 3069 } 3070 3071 static int vmx_get_max_tdp_level(void) 3072 { 3073 if (cpu_has_vmx_ept_5levels()) 3074 return 5; 3075 return 4; 3076 } 3077 3078 u64 construct_eptp(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level) 3079 { 3080 u64 eptp = VMX_EPTP_MT_WB; 3081 3082 eptp |= (root_level == 5) ? VMX_EPTP_PWL_5 : VMX_EPTP_PWL_4; 3083 3084 if (enable_ept_ad_bits && 3085 (!is_guest_mode(vcpu) || nested_ept_ad_enabled(vcpu))) 3086 eptp |= VMX_EPTP_AD_ENABLE_BIT; 3087 eptp |= root_hpa; 3088 3089 return eptp; 3090 } 3091 3092 static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, 3093 int root_level) 3094 { 3095 struct kvm *kvm = vcpu->kvm; 3096 bool update_guest_cr3 = true; 3097 unsigned long guest_cr3; 3098 u64 eptp; 3099 3100 if (enable_ept) { 3101 eptp = construct_eptp(vcpu, root_hpa, root_level); 3102 vmcs_write64(EPT_POINTER, eptp); 3103 3104 hv_track_root_tdp(vcpu, root_hpa); 3105 3106 if (!enable_unrestricted_guest && !is_paging(vcpu)) 3107 guest_cr3 = to_kvm_vmx(kvm)->ept_identity_map_addr; 3108 else if (test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail)) 3109 guest_cr3 = vcpu->arch.cr3; 3110 else /* vmcs01.GUEST_CR3 is already up-to-date. */ 3111 update_guest_cr3 = false; 3112 vmx_ept_load_pdptrs(vcpu); 3113 } else { 3114 guest_cr3 = root_hpa | kvm_get_active_pcid(vcpu); 3115 } 3116 3117 if (update_guest_cr3) 3118 vmcs_writel(GUEST_CR3, guest_cr3); 3119 } 3120 3121 static bool vmx_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 3122 { 3123 /* 3124 * We operate under the default treatment of SMM, so VMX cannot be 3125 * enabled under SMM. Note, whether or not VMXE is allowed at all is 3126 * handled by kvm_is_valid_cr4(). 3127 */ 3128 if ((cr4 & X86_CR4_VMXE) && is_smm(vcpu)) 3129 return false; 3130 3131 if (to_vmx(vcpu)->nested.vmxon && !nested_cr4_valid(vcpu, cr4)) 3132 return false; 3133 3134 return true; 3135 } 3136 3137 void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 3138 { 3139 unsigned long old_cr4 = vcpu->arch.cr4; 3140 struct vcpu_vmx *vmx = to_vmx(vcpu); 3141 /* 3142 * Pass through host's Machine Check Enable value to hw_cr4, which 3143 * is in force while we are in guest mode. Do not let guests control 3144 * this bit, even if host CR4.MCE == 0. 3145 */ 3146 unsigned long hw_cr4; 3147 3148 hw_cr4 = (cr4_read_shadow() & X86_CR4_MCE) | (cr4 & ~X86_CR4_MCE); 3149 if (is_unrestricted_guest(vcpu)) 3150 hw_cr4 |= KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST; 3151 else if (vmx->rmode.vm86_active) 3152 hw_cr4 |= KVM_RMODE_VM_CR4_ALWAYS_ON; 3153 else 3154 hw_cr4 |= KVM_PMODE_VM_CR4_ALWAYS_ON; 3155 3156 if (!boot_cpu_has(X86_FEATURE_UMIP) && vmx_umip_emulated()) { 3157 if (cr4 & X86_CR4_UMIP) { 3158 secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_DESC); 3159 hw_cr4 &= ~X86_CR4_UMIP; 3160 } else if (!is_guest_mode(vcpu) || 3161 !nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC)) { 3162 secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_DESC); 3163 } 3164 } 3165 3166 vcpu->arch.cr4 = cr4; 3167 kvm_register_mark_available(vcpu, VCPU_EXREG_CR4); 3168 3169 if (!is_unrestricted_guest(vcpu)) { 3170 if (enable_ept) { 3171 if (!is_paging(vcpu)) { 3172 hw_cr4 &= ~X86_CR4_PAE; 3173 hw_cr4 |= X86_CR4_PSE; 3174 } else if (!(cr4 & X86_CR4_PAE)) { 3175 hw_cr4 &= ~X86_CR4_PAE; 3176 } 3177 } 3178 3179 /* 3180 * SMEP/SMAP/PKU is disabled if CPU is in non-paging mode in 3181 * hardware. To emulate this behavior, SMEP/SMAP/PKU needs 3182 * to be manually disabled when guest switches to non-paging 3183 * mode. 3184 * 3185 * If !enable_unrestricted_guest, the CPU is always running 3186 * with CR0.PG=1 and CR4 needs to be modified. 3187 * If enable_unrestricted_guest, the CPU automatically 3188 * disables SMEP/SMAP/PKU when the guest sets CR0.PG=0. 3189 */ 3190 if (!is_paging(vcpu)) 3191 hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE); 3192 } 3193 3194 vmcs_writel(CR4_READ_SHADOW, cr4); 3195 vmcs_writel(GUEST_CR4, hw_cr4); 3196 3197 if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE)) 3198 kvm_update_cpuid_runtime(vcpu); 3199 } 3200 3201 void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) 3202 { 3203 struct vcpu_vmx *vmx = to_vmx(vcpu); 3204 u32 ar; 3205 3206 if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { 3207 *var = vmx->rmode.segs[seg]; 3208 if (seg == VCPU_SREG_TR 3209 || var->selector == vmx_read_guest_seg_selector(vmx, seg)) 3210 return; 3211 var->base = vmx_read_guest_seg_base(vmx, seg); 3212 var->selector = vmx_read_guest_seg_selector(vmx, seg); 3213 return; 3214 } 3215 var->base = vmx_read_guest_seg_base(vmx, seg); 3216 var->limit = vmx_read_guest_seg_limit(vmx, seg); 3217 var->selector = vmx_read_guest_seg_selector(vmx, seg); 3218 ar = vmx_read_guest_seg_ar(vmx, seg); 3219 var->unusable = (ar >> 16) & 1; 3220 var->type = ar & 15; 3221 var->s = (ar >> 4) & 1; 3222 var->dpl = (ar >> 5) & 3; 3223 /* 3224 * Some userspaces do not preserve unusable property. Since usable 3225 * segment has to be present according to VMX spec we can use present 3226 * property to amend userspace bug by making unusable segment always 3227 * nonpresent. vmx_segment_access_rights() already marks nonpresent 3228 * segment as unusable. 3229 */ 3230 var->present = !var->unusable; 3231 var->avl = (ar >> 12) & 1; 3232 var->l = (ar >> 13) & 1; 3233 var->db = (ar >> 14) & 1; 3234 var->g = (ar >> 15) & 1; 3235 } 3236 3237 static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg) 3238 { 3239 struct kvm_segment s; 3240 3241 if (to_vmx(vcpu)->rmode.vm86_active) { 3242 vmx_get_segment(vcpu, &s, seg); 3243 return s.base; 3244 } 3245 return vmx_read_guest_seg_base(to_vmx(vcpu), seg); 3246 } 3247 3248 int vmx_get_cpl(struct kvm_vcpu *vcpu) 3249 { 3250 struct vcpu_vmx *vmx = to_vmx(vcpu); 3251 3252 if (unlikely(vmx->rmode.vm86_active)) 3253 return 0; 3254 else { 3255 int ar = vmx_read_guest_seg_ar(vmx, VCPU_SREG_SS); 3256 return VMX_AR_DPL(ar); 3257 } 3258 } 3259 3260 static u32 vmx_segment_access_rights(struct kvm_segment *var) 3261 { 3262 u32 ar; 3263 3264 if (var->unusable || !var->present) 3265 ar = 1 << 16; 3266 else { 3267 ar = var->type & 15; 3268 ar |= (var->s & 1) << 4; 3269 ar |= (var->dpl & 3) << 5; 3270 ar |= (var->present & 1) << 7; 3271 ar |= (var->avl & 1) << 12; 3272 ar |= (var->l & 1) << 13; 3273 ar |= (var->db & 1) << 14; 3274 ar |= (var->g & 1) << 15; 3275 } 3276 3277 return ar; 3278 } 3279 3280 void __vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) 3281 { 3282 struct vcpu_vmx *vmx = to_vmx(vcpu); 3283 const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; 3284 3285 vmx_segment_cache_clear(vmx); 3286 3287 if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { 3288 vmx->rmode.segs[seg] = *var; 3289 if (seg == VCPU_SREG_TR) 3290 vmcs_write16(sf->selector, var->selector); 3291 else if (var->s) 3292 fix_rmode_seg(seg, &vmx->rmode.segs[seg]); 3293 return; 3294 } 3295 3296 vmcs_writel(sf->base, var->base); 3297 vmcs_write32(sf->limit, var->limit); 3298 vmcs_write16(sf->selector, var->selector); 3299 3300 /* 3301 * Fix the "Accessed" bit in AR field of segment registers for older 3302 * qemu binaries. 3303 * IA32 arch specifies that at the time of processor reset the 3304 * "Accessed" bit in the AR field of segment registers is 1. And qemu 3305 * is setting it to 0 in the userland code. This causes invalid guest 3306 * state vmexit when "unrestricted guest" mode is turned on. 3307 * Fix for this setup issue in cpu_reset is being pushed in the qemu 3308 * tree. Newer qemu binaries with that qemu fix would not need this 3309 * kvm hack. 3310 */ 3311 if (is_unrestricted_guest(vcpu) && (seg != VCPU_SREG_LDTR)) 3312 var->type |= 0x1; /* Accessed */ 3313 3314 vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var)); 3315 } 3316 3317 static void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) 3318 { 3319 __vmx_set_segment(vcpu, var, seg); 3320 3321 to_vmx(vcpu)->emulation_required = vmx_emulation_required(vcpu); 3322 } 3323 3324 static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) 3325 { 3326 u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS); 3327 3328 *db = (ar >> 14) & 1; 3329 *l = (ar >> 13) & 1; 3330 } 3331 3332 static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) 3333 { 3334 dt->size = vmcs_read32(GUEST_IDTR_LIMIT); 3335 dt->address = vmcs_readl(GUEST_IDTR_BASE); 3336 } 3337 3338 static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) 3339 { 3340 vmcs_write32(GUEST_IDTR_LIMIT, dt->size); 3341 vmcs_writel(GUEST_IDTR_BASE, dt->address); 3342 } 3343 3344 static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) 3345 { 3346 dt->size = vmcs_read32(GUEST_GDTR_LIMIT); 3347 dt->address = vmcs_readl(GUEST_GDTR_BASE); 3348 } 3349 3350 static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) 3351 { 3352 vmcs_write32(GUEST_GDTR_LIMIT, dt->size); 3353 vmcs_writel(GUEST_GDTR_BASE, dt->address); 3354 } 3355 3356 static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg) 3357 { 3358 struct kvm_segment var; 3359 u32 ar; 3360 3361 vmx_get_segment(vcpu, &var, seg); 3362 var.dpl = 0x3; 3363 if (seg == VCPU_SREG_CS) 3364 var.type = 0x3; 3365 ar = vmx_segment_access_rights(&var); 3366 3367 if (var.base != (var.selector << 4)) 3368 return false; 3369 if (var.limit != 0xffff) 3370 return false; 3371 if (ar != 0xf3) 3372 return false; 3373 3374 return true; 3375 } 3376 3377 static bool code_segment_valid(struct kvm_vcpu *vcpu) 3378 { 3379 struct kvm_segment cs; 3380 unsigned int cs_rpl; 3381 3382 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); 3383 cs_rpl = cs.selector & SEGMENT_RPL_MASK; 3384 3385 if (cs.unusable) 3386 return false; 3387 if (~cs.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_ACCESSES_MASK)) 3388 return false; 3389 if (!cs.s) 3390 return false; 3391 if (cs.type & VMX_AR_TYPE_WRITEABLE_MASK) { 3392 if (cs.dpl > cs_rpl) 3393 return false; 3394 } else { 3395 if (cs.dpl != cs_rpl) 3396 return false; 3397 } 3398 if (!cs.present) 3399 return false; 3400 3401 /* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */ 3402 return true; 3403 } 3404 3405 static bool stack_segment_valid(struct kvm_vcpu *vcpu) 3406 { 3407 struct kvm_segment ss; 3408 unsigned int ss_rpl; 3409 3410 vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); 3411 ss_rpl = ss.selector & SEGMENT_RPL_MASK; 3412 3413 if (ss.unusable) 3414 return true; 3415 if (ss.type != 3 && ss.type != 7) 3416 return false; 3417 if (!ss.s) 3418 return false; 3419 if (ss.dpl != ss_rpl) /* DPL != RPL */ 3420 return false; 3421 if (!ss.present) 3422 return false; 3423 3424 return true; 3425 } 3426 3427 static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg) 3428 { 3429 struct kvm_segment var; 3430 unsigned int rpl; 3431 3432 vmx_get_segment(vcpu, &var, seg); 3433 rpl = var.selector & SEGMENT_RPL_MASK; 3434 3435 if (var.unusable) 3436 return true; 3437 if (!var.s) 3438 return false; 3439 if (!var.present) 3440 return false; 3441 if (~var.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_WRITEABLE_MASK)) { 3442 if (var.dpl < rpl) /* DPL < RPL */ 3443 return false; 3444 } 3445 3446 /* TODO: Add other members to kvm_segment_field to allow checking for other access 3447 * rights flags 3448 */ 3449 return true; 3450 } 3451 3452 static bool tr_valid(struct kvm_vcpu *vcpu) 3453 { 3454 struct kvm_segment tr; 3455 3456 vmx_get_segment(vcpu, &tr, VCPU_SREG_TR); 3457 3458 if (tr.unusable) 3459 return false; 3460 if (tr.selector & SEGMENT_TI_MASK) /* TI = 1 */ 3461 return false; 3462 if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */ 3463 return false; 3464 if (!tr.present) 3465 return false; 3466 3467 return true; 3468 } 3469 3470 static bool ldtr_valid(struct kvm_vcpu *vcpu) 3471 { 3472 struct kvm_segment ldtr; 3473 3474 vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR); 3475 3476 if (ldtr.unusable) 3477 return true; 3478 if (ldtr.selector & SEGMENT_TI_MASK) /* TI = 1 */ 3479 return false; 3480 if (ldtr.type != 2) 3481 return false; 3482 if (!ldtr.present) 3483 return false; 3484 3485 return true; 3486 } 3487 3488 static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu) 3489 { 3490 struct kvm_segment cs, ss; 3491 3492 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); 3493 vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); 3494 3495 return ((cs.selector & SEGMENT_RPL_MASK) == 3496 (ss.selector & SEGMENT_RPL_MASK)); 3497 } 3498 3499 /* 3500 * Check if guest state is valid. Returns true if valid, false if 3501 * not. 3502 * We assume that registers are always usable 3503 */ 3504 bool __vmx_guest_state_valid(struct kvm_vcpu *vcpu) 3505 { 3506 /* real mode guest state checks */ 3507 if (!is_protmode(vcpu) || (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) { 3508 if (!rmode_segment_valid(vcpu, VCPU_SREG_CS)) 3509 return false; 3510 if (!rmode_segment_valid(vcpu, VCPU_SREG_SS)) 3511 return false; 3512 if (!rmode_segment_valid(vcpu, VCPU_SREG_DS)) 3513 return false; 3514 if (!rmode_segment_valid(vcpu, VCPU_SREG_ES)) 3515 return false; 3516 if (!rmode_segment_valid(vcpu, VCPU_SREG_FS)) 3517 return false; 3518 if (!rmode_segment_valid(vcpu, VCPU_SREG_GS)) 3519 return false; 3520 } else { 3521 /* protected mode guest state checks */ 3522 if (!cs_ss_rpl_check(vcpu)) 3523 return false; 3524 if (!code_segment_valid(vcpu)) 3525 return false; 3526 if (!stack_segment_valid(vcpu)) 3527 return false; 3528 if (!data_segment_valid(vcpu, VCPU_SREG_DS)) 3529 return false; 3530 if (!data_segment_valid(vcpu, VCPU_SREG_ES)) 3531 return false; 3532 if (!data_segment_valid(vcpu, VCPU_SREG_FS)) 3533 return false; 3534 if (!data_segment_valid(vcpu, VCPU_SREG_GS)) 3535 return false; 3536 if (!tr_valid(vcpu)) 3537 return false; 3538 if (!ldtr_valid(vcpu)) 3539 return false; 3540 } 3541 /* TODO: 3542 * - Add checks on RIP 3543 * - Add checks on RFLAGS 3544 */ 3545 3546 return true; 3547 } 3548 3549 static int init_rmode_tss(struct kvm *kvm, void __user *ua) 3550 { 3551 const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0))); 3552 u16 data; 3553 int i; 3554 3555 for (i = 0; i < 3; i++) { 3556 if (__copy_to_user(ua + PAGE_SIZE * i, zero_page, PAGE_SIZE)) 3557 return -EFAULT; 3558 } 3559 3560 data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE; 3561 if (__copy_to_user(ua + TSS_IOPB_BASE_OFFSET, &data, sizeof(u16))) 3562 return -EFAULT; 3563 3564 data = ~0; 3565 if (__copy_to_user(ua + RMODE_TSS_SIZE - 1, &data, sizeof(u8))) 3566 return -EFAULT; 3567 3568 return 0; 3569 } 3570 3571 static int init_rmode_identity_map(struct kvm *kvm) 3572 { 3573 struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm); 3574 int i, r = 0; 3575 void __user *uaddr; 3576 u32 tmp; 3577 3578 /* Protect kvm_vmx->ept_identity_pagetable_done. */ 3579 mutex_lock(&kvm->slots_lock); 3580 3581 if (likely(kvm_vmx->ept_identity_pagetable_done)) 3582 goto out; 3583 3584 if (!kvm_vmx->ept_identity_map_addr) 3585 kvm_vmx->ept_identity_map_addr = VMX_EPT_IDENTITY_PAGETABLE_ADDR; 3586 3587 uaddr = __x86_set_memory_region(kvm, 3588 IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 3589 kvm_vmx->ept_identity_map_addr, 3590 PAGE_SIZE); 3591 if (IS_ERR(uaddr)) { 3592 r = PTR_ERR(uaddr); 3593 goto out; 3594 } 3595 3596 /* Set up identity-mapping pagetable for EPT in real mode */ 3597 for (i = 0; i < PT32_ENT_PER_PAGE; i++) { 3598 tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | 3599 _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE); 3600 if (__copy_to_user(uaddr + i * sizeof(tmp), &tmp, sizeof(tmp))) { 3601 r = -EFAULT; 3602 goto out; 3603 } 3604 } 3605 kvm_vmx->ept_identity_pagetable_done = true; 3606 3607 out: 3608 mutex_unlock(&kvm->slots_lock); 3609 return r; 3610 } 3611 3612 static void seg_setup(int seg) 3613 { 3614 const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; 3615 unsigned int ar; 3616 3617 vmcs_write16(sf->selector, 0); 3618 vmcs_writel(sf->base, 0); 3619 vmcs_write32(sf->limit, 0xffff); 3620 ar = 0x93; 3621 if (seg == VCPU_SREG_CS) 3622 ar |= 0x08; /* code segment */ 3623 3624 vmcs_write32(sf->ar_bytes, ar); 3625 } 3626 3627 static int alloc_apic_access_page(struct kvm *kvm) 3628 { 3629 struct page *page; 3630 void __user *hva; 3631 int ret = 0; 3632 3633 mutex_lock(&kvm->slots_lock); 3634 if (kvm->arch.apic_access_memslot_enabled) 3635 goto out; 3636 hva = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 3637 APIC_DEFAULT_PHYS_BASE, PAGE_SIZE); 3638 if (IS_ERR(hva)) { 3639 ret = PTR_ERR(hva); 3640 goto out; 3641 } 3642 3643 page = gfn_to_page(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT); 3644 if (is_error_page(page)) { 3645 ret = -EFAULT; 3646 goto out; 3647 } 3648 3649 /* 3650 * Do not pin the page in memory, so that memory hot-unplug 3651 * is able to migrate it. 3652 */ 3653 put_page(page); 3654 kvm->arch.apic_access_memslot_enabled = true; 3655 out: 3656 mutex_unlock(&kvm->slots_lock); 3657 return ret; 3658 } 3659 3660 int allocate_vpid(void) 3661 { 3662 int vpid; 3663 3664 if (!enable_vpid) 3665 return 0; 3666 spin_lock(&vmx_vpid_lock); 3667 vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS); 3668 if (vpid < VMX_NR_VPIDS) 3669 __set_bit(vpid, vmx_vpid_bitmap); 3670 else 3671 vpid = 0; 3672 spin_unlock(&vmx_vpid_lock); 3673 return vpid; 3674 } 3675 3676 void free_vpid(int vpid) 3677 { 3678 if (!enable_vpid || vpid == 0) 3679 return; 3680 spin_lock(&vmx_vpid_lock); 3681 __clear_bit(vpid, vmx_vpid_bitmap); 3682 spin_unlock(&vmx_vpid_lock); 3683 } 3684 3685 void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type) 3686 { 3687 struct vcpu_vmx *vmx = to_vmx(vcpu); 3688 unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap; 3689 3690 if (!cpu_has_vmx_msr_bitmap()) 3691 return; 3692 3693 if (static_branch_unlikely(&enable_evmcs)) 3694 evmcs_touch_msr_bitmap(); 3695 3696 /* 3697 * Mark the desired intercept state in shadow bitmap, this is needed 3698 * for resync when the MSR filters change. 3699 */ 3700 if (is_valid_passthrough_msr(msr)) { 3701 int idx = possible_passthrough_msr_slot(msr); 3702 3703 if (idx != -ENOENT) { 3704 if (type & MSR_TYPE_R) 3705 clear_bit(idx, vmx->shadow_msr_intercept.read); 3706 if (type & MSR_TYPE_W) 3707 clear_bit(idx, vmx->shadow_msr_intercept.write); 3708 } 3709 } 3710 3711 if ((type & MSR_TYPE_R) && 3712 !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ)) { 3713 vmx_set_msr_bitmap_read(msr_bitmap, msr); 3714 type &= ~MSR_TYPE_R; 3715 } 3716 3717 if ((type & MSR_TYPE_W) && 3718 !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE)) { 3719 vmx_set_msr_bitmap_write(msr_bitmap, msr); 3720 type &= ~MSR_TYPE_W; 3721 } 3722 3723 if (type & MSR_TYPE_R) 3724 vmx_clear_msr_bitmap_read(msr_bitmap, msr); 3725 3726 if (type & MSR_TYPE_W) 3727 vmx_clear_msr_bitmap_write(msr_bitmap, msr); 3728 } 3729 3730 void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type) 3731 { 3732 struct vcpu_vmx *vmx = to_vmx(vcpu); 3733 unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap; 3734 3735 if (!cpu_has_vmx_msr_bitmap()) 3736 return; 3737 3738 if (static_branch_unlikely(&enable_evmcs)) 3739 evmcs_touch_msr_bitmap(); 3740 3741 /* 3742 * Mark the desired intercept state in shadow bitmap, this is needed 3743 * for resync when the MSR filter changes. 3744 */ 3745 if (is_valid_passthrough_msr(msr)) { 3746 int idx = possible_passthrough_msr_slot(msr); 3747 3748 if (idx != -ENOENT) { 3749 if (type & MSR_TYPE_R) 3750 set_bit(idx, vmx->shadow_msr_intercept.read); 3751 if (type & MSR_TYPE_W) 3752 set_bit(idx, vmx->shadow_msr_intercept.write); 3753 } 3754 } 3755 3756 if (type & MSR_TYPE_R) 3757 vmx_set_msr_bitmap_read(msr_bitmap, msr); 3758 3759 if (type & MSR_TYPE_W) 3760 vmx_set_msr_bitmap_write(msr_bitmap, msr); 3761 } 3762 3763 static void vmx_reset_x2apic_msrs(struct kvm_vcpu *vcpu, u8 mode) 3764 { 3765 unsigned long *msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap; 3766 unsigned long read_intercept; 3767 int msr; 3768 3769 read_intercept = (mode & MSR_BITMAP_MODE_X2APIC_APICV) ? 0 : ~0; 3770 3771 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { 3772 unsigned int read_idx = msr / BITS_PER_LONG; 3773 unsigned int write_idx = read_idx + (0x800 / sizeof(long)); 3774 3775 msr_bitmap[read_idx] = read_intercept; 3776 msr_bitmap[write_idx] = ~0ul; 3777 } 3778 } 3779 3780 static void vmx_update_msr_bitmap_x2apic(struct kvm_vcpu *vcpu) 3781 { 3782 struct vcpu_vmx *vmx = to_vmx(vcpu); 3783 u8 mode; 3784 3785 if (!cpu_has_vmx_msr_bitmap()) 3786 return; 3787 3788 if (cpu_has_secondary_exec_ctrls() && 3789 (secondary_exec_controls_get(vmx) & 3790 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) { 3791 mode = MSR_BITMAP_MODE_X2APIC; 3792 if (enable_apicv && kvm_vcpu_apicv_active(vcpu)) 3793 mode |= MSR_BITMAP_MODE_X2APIC_APICV; 3794 } else { 3795 mode = 0; 3796 } 3797 3798 if (mode == vmx->x2apic_msr_bitmap_mode) 3799 return; 3800 3801 vmx->x2apic_msr_bitmap_mode = mode; 3802 3803 vmx_reset_x2apic_msrs(vcpu, mode); 3804 3805 /* 3806 * TPR reads and writes can be virtualized even if virtual interrupt 3807 * delivery is not in use. 3808 */ 3809 vmx_set_intercept_for_msr(vcpu, X2APIC_MSR(APIC_TASKPRI), MSR_TYPE_RW, 3810 !(mode & MSR_BITMAP_MODE_X2APIC)); 3811 3812 if (mode & MSR_BITMAP_MODE_X2APIC_APICV) { 3813 vmx_enable_intercept_for_msr(vcpu, X2APIC_MSR(APIC_TMCCT), MSR_TYPE_RW); 3814 vmx_disable_intercept_for_msr(vcpu, X2APIC_MSR(APIC_EOI), MSR_TYPE_W); 3815 vmx_disable_intercept_for_msr(vcpu, X2APIC_MSR(APIC_SELF_IPI), MSR_TYPE_W); 3816 } 3817 } 3818 3819 void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu) 3820 { 3821 struct vcpu_vmx *vmx = to_vmx(vcpu); 3822 bool flag = !(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN); 3823 u32 i; 3824 3825 vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_STATUS, MSR_TYPE_RW, flag); 3826 vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_OUTPUT_BASE, MSR_TYPE_RW, flag); 3827 vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_OUTPUT_MASK, MSR_TYPE_RW, flag); 3828 vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_CR3_MATCH, MSR_TYPE_RW, flag); 3829 for (i = 0; i < vmx->pt_desc.num_address_ranges; i++) { 3830 vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_ADDR0_A + i * 2, MSR_TYPE_RW, flag); 3831 vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_ADDR0_B + i * 2, MSR_TYPE_RW, flag); 3832 } 3833 } 3834 3835 static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu) 3836 { 3837 struct vcpu_vmx *vmx = to_vmx(vcpu); 3838 void *vapic_page; 3839 u32 vppr; 3840 int rvi; 3841 3842 if (WARN_ON_ONCE(!is_guest_mode(vcpu)) || 3843 !nested_cpu_has_vid(get_vmcs12(vcpu)) || 3844 WARN_ON_ONCE(!vmx->nested.virtual_apic_map.gfn)) 3845 return false; 3846 3847 rvi = vmx_get_rvi(); 3848 3849 vapic_page = vmx->nested.virtual_apic_map.hva; 3850 vppr = *((u32 *)(vapic_page + APIC_PROCPRI)); 3851 3852 return ((rvi & 0xf0) > (vppr & 0xf0)); 3853 } 3854 3855 static void vmx_msr_filter_changed(struct kvm_vcpu *vcpu) 3856 { 3857 struct vcpu_vmx *vmx = to_vmx(vcpu); 3858 u32 i; 3859 3860 /* 3861 * Set intercept permissions for all potentially passed through MSRs 3862 * again. They will automatically get filtered through the MSR filter, 3863 * so we are back in sync after this. 3864 */ 3865 for (i = 0; i < ARRAY_SIZE(vmx_possible_passthrough_msrs); i++) { 3866 u32 msr = vmx_possible_passthrough_msrs[i]; 3867 bool read = test_bit(i, vmx->shadow_msr_intercept.read); 3868 bool write = test_bit(i, vmx->shadow_msr_intercept.write); 3869 3870 vmx_set_intercept_for_msr(vcpu, msr, MSR_TYPE_R, read); 3871 vmx_set_intercept_for_msr(vcpu, msr, MSR_TYPE_W, write); 3872 } 3873 3874 pt_update_intercept_for_msr(vcpu); 3875 } 3876 3877 static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu, 3878 bool nested) 3879 { 3880 #ifdef CONFIG_SMP 3881 int pi_vec = nested ? POSTED_INTR_NESTED_VECTOR : POSTED_INTR_VECTOR; 3882 3883 if (vcpu->mode == IN_GUEST_MODE) { 3884 /* 3885 * The vector of interrupt to be delivered to vcpu had 3886 * been set in PIR before this function. 3887 * 3888 * Following cases will be reached in this block, and 3889 * we always send a notification event in all cases as 3890 * explained below. 3891 * 3892 * Case 1: vcpu keeps in non-root mode. Sending a 3893 * notification event posts the interrupt to vcpu. 3894 * 3895 * Case 2: vcpu exits to root mode and is still 3896 * runnable. PIR will be synced to vIRR before the 3897 * next vcpu entry. Sending a notification event in 3898 * this case has no effect, as vcpu is not in root 3899 * mode. 3900 * 3901 * Case 3: vcpu exits to root mode and is blocked. 3902 * vcpu_block() has already synced PIR to vIRR and 3903 * never blocks vcpu if vIRR is not cleared. Therefore, 3904 * a blocked vcpu here does not wait for any requested 3905 * interrupts in PIR, and sending a notification event 3906 * which has no effect is safe here. 3907 */ 3908 3909 apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec); 3910 return true; 3911 } 3912 #endif 3913 return false; 3914 } 3915 3916 static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu, 3917 int vector) 3918 { 3919 struct vcpu_vmx *vmx = to_vmx(vcpu); 3920 3921 if (is_guest_mode(vcpu) && 3922 vector == vmx->nested.posted_intr_nv) { 3923 /* 3924 * If a posted intr is not recognized by hardware, 3925 * we will accomplish it in the next vmentry. 3926 */ 3927 vmx->nested.pi_pending = true; 3928 kvm_make_request(KVM_REQ_EVENT, vcpu); 3929 /* the PIR and ON have been set by L1. */ 3930 if (!kvm_vcpu_trigger_posted_interrupt(vcpu, true)) 3931 kvm_vcpu_kick(vcpu); 3932 return 0; 3933 } 3934 return -1; 3935 } 3936 /* 3937 * Send interrupt to vcpu via posted interrupt way. 3938 * 1. If target vcpu is running(non-root mode), send posted interrupt 3939 * notification to vcpu and hardware will sync PIR to vIRR atomically. 3940 * 2. If target vcpu isn't running(root mode), kick it to pick up the 3941 * interrupt from PIR in next vmentry. 3942 */ 3943 static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector) 3944 { 3945 struct vcpu_vmx *vmx = to_vmx(vcpu); 3946 int r; 3947 3948 r = vmx_deliver_nested_posted_interrupt(vcpu, vector); 3949 if (!r) 3950 return 0; 3951 3952 if (!vcpu->arch.apicv_active) 3953 return -1; 3954 3955 if (pi_test_and_set_pir(vector, &vmx->pi_desc)) 3956 return 0; 3957 3958 /* If a previous notification has sent the IPI, nothing to do. */ 3959 if (pi_test_and_set_on(&vmx->pi_desc)) 3960 return 0; 3961 3962 if (vcpu != kvm_get_running_vcpu() && 3963 !kvm_vcpu_trigger_posted_interrupt(vcpu, false)) 3964 kvm_vcpu_kick(vcpu); 3965 3966 return 0; 3967 } 3968 3969 /* 3970 * Set up the vmcs's constant host-state fields, i.e., host-state fields that 3971 * will not change in the lifetime of the guest. 3972 * Note that host-state that does change is set elsewhere. E.g., host-state 3973 * that is set differently for each CPU is set in vmx_vcpu_load(), not here. 3974 */ 3975 void vmx_set_constant_host_state(struct vcpu_vmx *vmx) 3976 { 3977 u32 low32, high32; 3978 unsigned long tmpl; 3979 unsigned long cr0, cr3, cr4; 3980 3981 cr0 = read_cr0(); 3982 WARN_ON(cr0 & X86_CR0_TS); 3983 vmcs_writel(HOST_CR0, cr0); /* 22.2.3 */ 3984 3985 /* 3986 * Save the most likely value for this task's CR3 in the VMCS. 3987 * We can't use __get_current_cr3_fast() because we're not atomic. 3988 */ 3989 cr3 = __read_cr3(); 3990 vmcs_writel(HOST_CR3, cr3); /* 22.2.3 FIXME: shadow tables */ 3991 vmx->loaded_vmcs->host_state.cr3 = cr3; 3992 3993 /* Save the most likely value for this task's CR4 in the VMCS. */ 3994 cr4 = cr4_read_shadow(); 3995 vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */ 3996 vmx->loaded_vmcs->host_state.cr4 = cr4; 3997 3998 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ 3999 #ifdef CONFIG_X86_64 4000 /* 4001 * Load null selectors, so we can avoid reloading them in 4002 * vmx_prepare_switch_to_host(), in case userspace uses 4003 * the null selectors too (the expected case). 4004 */ 4005 vmcs_write16(HOST_DS_SELECTOR, 0); 4006 vmcs_write16(HOST_ES_SELECTOR, 0); 4007 #else 4008 vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ 4009 vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */ 4010 #endif 4011 vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ 4012 vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */ 4013 4014 vmcs_writel(HOST_IDTR_BASE, host_idt_base); /* 22.2.4 */ 4015 4016 vmcs_writel(HOST_RIP, (unsigned long)vmx_vmexit); /* 22.2.5 */ 4017 4018 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32); 4019 vmcs_write32(HOST_IA32_SYSENTER_CS, low32); 4020 rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl); 4021 vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl); /* 22.2.3 */ 4022 4023 if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) { 4024 rdmsr(MSR_IA32_CR_PAT, low32, high32); 4025 vmcs_write64(HOST_IA32_PAT, low32 | ((u64) high32 << 32)); 4026 } 4027 4028 if (cpu_has_load_ia32_efer()) 4029 vmcs_write64(HOST_IA32_EFER, host_efer); 4030 } 4031 4032 void set_cr4_guest_host_mask(struct vcpu_vmx *vmx) 4033 { 4034 struct kvm_vcpu *vcpu = &vmx->vcpu; 4035 4036 vcpu->arch.cr4_guest_owned_bits = KVM_POSSIBLE_CR4_GUEST_BITS & 4037 ~vcpu->arch.cr4_guest_rsvd_bits; 4038 if (!enable_ept) 4039 vcpu->arch.cr4_guest_owned_bits &= ~X86_CR4_PGE; 4040 if (is_guest_mode(&vmx->vcpu)) 4041 vcpu->arch.cr4_guest_owned_bits &= 4042 ~get_vmcs12(vcpu)->cr4_guest_host_mask; 4043 vmcs_writel(CR4_GUEST_HOST_MASK, ~vcpu->arch.cr4_guest_owned_bits); 4044 } 4045 4046 static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx) 4047 { 4048 u32 pin_based_exec_ctrl = vmcs_config.pin_based_exec_ctrl; 4049 4050 if (!kvm_vcpu_apicv_active(&vmx->vcpu)) 4051 pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR; 4052 4053 if (!enable_vnmi) 4054 pin_based_exec_ctrl &= ~PIN_BASED_VIRTUAL_NMIS; 4055 4056 if (!enable_preemption_timer) 4057 pin_based_exec_ctrl &= ~PIN_BASED_VMX_PREEMPTION_TIMER; 4058 4059 return pin_based_exec_ctrl; 4060 } 4061 4062 static u32 vmx_vmentry_ctrl(void) 4063 { 4064 u32 vmentry_ctrl = vmcs_config.vmentry_ctrl; 4065 4066 if (vmx_pt_mode_is_system()) 4067 vmentry_ctrl &= ~(VM_ENTRY_PT_CONCEAL_PIP | 4068 VM_ENTRY_LOAD_IA32_RTIT_CTL); 4069 /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */ 4070 return vmentry_ctrl & 4071 ~(VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VM_ENTRY_LOAD_IA32_EFER); 4072 } 4073 4074 static u32 vmx_vmexit_ctrl(void) 4075 { 4076 u32 vmexit_ctrl = vmcs_config.vmexit_ctrl; 4077 4078 if (vmx_pt_mode_is_system()) 4079 vmexit_ctrl &= ~(VM_EXIT_PT_CONCEAL_PIP | 4080 VM_EXIT_CLEAR_IA32_RTIT_CTL); 4081 /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */ 4082 return vmexit_ctrl & 4083 ~(VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VM_EXIT_LOAD_IA32_EFER); 4084 } 4085 4086 static void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu) 4087 { 4088 struct vcpu_vmx *vmx = to_vmx(vcpu); 4089 4090 pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx)); 4091 if (cpu_has_secondary_exec_ctrls()) { 4092 if (kvm_vcpu_apicv_active(vcpu)) 4093 secondary_exec_controls_setbit(vmx, 4094 SECONDARY_EXEC_APIC_REGISTER_VIRT | 4095 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); 4096 else 4097 secondary_exec_controls_clearbit(vmx, 4098 SECONDARY_EXEC_APIC_REGISTER_VIRT | 4099 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); 4100 } 4101 4102 vmx_update_msr_bitmap_x2apic(vcpu); 4103 } 4104 4105 static u32 vmx_exec_control(struct vcpu_vmx *vmx) 4106 { 4107 u32 exec_control = vmcs_config.cpu_based_exec_ctrl; 4108 4109 if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT) 4110 exec_control &= ~CPU_BASED_MOV_DR_EXITING; 4111 4112 if (!cpu_need_tpr_shadow(&vmx->vcpu)) { 4113 exec_control &= ~CPU_BASED_TPR_SHADOW; 4114 #ifdef CONFIG_X86_64 4115 exec_control |= CPU_BASED_CR8_STORE_EXITING | 4116 CPU_BASED_CR8_LOAD_EXITING; 4117 #endif 4118 } 4119 if (!enable_ept) 4120 exec_control |= CPU_BASED_CR3_STORE_EXITING | 4121 CPU_BASED_CR3_LOAD_EXITING | 4122 CPU_BASED_INVLPG_EXITING; 4123 if (kvm_mwait_in_guest(vmx->vcpu.kvm)) 4124 exec_control &= ~(CPU_BASED_MWAIT_EXITING | 4125 CPU_BASED_MONITOR_EXITING); 4126 if (kvm_hlt_in_guest(vmx->vcpu.kvm)) 4127 exec_control &= ~CPU_BASED_HLT_EXITING; 4128 return exec_control; 4129 } 4130 4131 /* 4132 * Adjust a single secondary execution control bit to intercept/allow an 4133 * instruction in the guest. This is usually done based on whether or not a 4134 * feature has been exposed to the guest in order to correctly emulate faults. 4135 */ 4136 static inline void 4137 vmx_adjust_secondary_exec_control(struct vcpu_vmx *vmx, u32 *exec_control, 4138 u32 control, bool enabled, bool exiting) 4139 { 4140 /* 4141 * If the control is for an opt-in feature, clear the control if the 4142 * feature is not exposed to the guest, i.e. not enabled. If the 4143 * control is opt-out, i.e. an exiting control, clear the control if 4144 * the feature _is_ exposed to the guest, i.e. exiting/interception is 4145 * disabled for the associated instruction. Note, the caller is 4146 * responsible presetting exec_control to set all supported bits. 4147 */ 4148 if (enabled == exiting) 4149 *exec_control &= ~control; 4150 4151 /* 4152 * Update the nested MSR settings so that a nested VMM can/can't set 4153 * controls for features that are/aren't exposed to the guest. 4154 */ 4155 if (nested) { 4156 if (enabled) 4157 vmx->nested.msrs.secondary_ctls_high |= control; 4158 else 4159 vmx->nested.msrs.secondary_ctls_high &= ~control; 4160 } 4161 } 4162 4163 /* 4164 * Wrapper macro for the common case of adjusting a secondary execution control 4165 * based on a single guest CPUID bit, with a dedicated feature bit. This also 4166 * verifies that the control is actually supported by KVM and hardware. 4167 */ 4168 #define vmx_adjust_sec_exec_control(vmx, exec_control, name, feat_name, ctrl_name, exiting) \ 4169 ({ \ 4170 bool __enabled; \ 4171 \ 4172 if (cpu_has_vmx_##name()) { \ 4173 __enabled = guest_cpuid_has(&(vmx)->vcpu, \ 4174 X86_FEATURE_##feat_name); \ 4175 vmx_adjust_secondary_exec_control(vmx, exec_control, \ 4176 SECONDARY_EXEC_##ctrl_name, __enabled, exiting); \ 4177 } \ 4178 }) 4179 4180 /* More macro magic for ENABLE_/opt-in versus _EXITING/opt-out controls. */ 4181 #define vmx_adjust_sec_exec_feature(vmx, exec_control, lname, uname) \ 4182 vmx_adjust_sec_exec_control(vmx, exec_control, lname, uname, ENABLE_##uname, false) 4183 4184 #define vmx_adjust_sec_exec_exiting(vmx, exec_control, lname, uname) \ 4185 vmx_adjust_sec_exec_control(vmx, exec_control, lname, uname, uname##_EXITING, true) 4186 4187 static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx) 4188 { 4189 struct kvm_vcpu *vcpu = &vmx->vcpu; 4190 4191 u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl; 4192 4193 if (vmx_pt_mode_is_system()) 4194 exec_control &= ~(SECONDARY_EXEC_PT_USE_GPA | SECONDARY_EXEC_PT_CONCEAL_VMX); 4195 if (!cpu_need_virtualize_apic_accesses(vcpu)) 4196 exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; 4197 if (vmx->vpid == 0) 4198 exec_control &= ~SECONDARY_EXEC_ENABLE_VPID; 4199 if (!enable_ept) { 4200 exec_control &= ~SECONDARY_EXEC_ENABLE_EPT; 4201 enable_unrestricted_guest = 0; 4202 } 4203 if (!enable_unrestricted_guest) 4204 exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST; 4205 if (kvm_pause_in_guest(vmx->vcpu.kvm)) 4206 exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING; 4207 if (!kvm_vcpu_apicv_active(vcpu)) 4208 exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT | 4209 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); 4210 exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; 4211 4212 /* SECONDARY_EXEC_DESC is enabled/disabled on writes to CR4.UMIP, 4213 * in vmx_set_cr4. */ 4214 exec_control &= ~SECONDARY_EXEC_DESC; 4215 4216 /* SECONDARY_EXEC_SHADOW_VMCS is enabled when L1 executes VMPTRLD 4217 (handle_vmptrld). 4218 We can NOT enable shadow_vmcs here because we don't have yet 4219 a current VMCS12 4220 */ 4221 exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS; 4222 4223 /* 4224 * PML is enabled/disabled when dirty logging of memsmlots changes, but 4225 * it needs to be set here when dirty logging is already active, e.g. 4226 * if this vCPU was created after dirty logging was enabled. 4227 */ 4228 if (!vcpu->kvm->arch.cpu_dirty_logging_count) 4229 exec_control &= ~SECONDARY_EXEC_ENABLE_PML; 4230 4231 if (cpu_has_vmx_xsaves()) { 4232 /* Exposing XSAVES only when XSAVE is exposed */ 4233 bool xsaves_enabled = 4234 boot_cpu_has(X86_FEATURE_XSAVE) && 4235 guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && 4236 guest_cpuid_has(vcpu, X86_FEATURE_XSAVES); 4237 4238 vcpu->arch.xsaves_enabled = xsaves_enabled; 4239 4240 vmx_adjust_secondary_exec_control(vmx, &exec_control, 4241 SECONDARY_EXEC_XSAVES, 4242 xsaves_enabled, false); 4243 } 4244 4245 /* 4246 * RDPID is also gated by ENABLE_RDTSCP, turn on the control if either 4247 * feature is exposed to the guest. This creates a virtualization hole 4248 * if both are supported in hardware but only one is exposed to the 4249 * guest, but letting the guest execute RDTSCP or RDPID when either one 4250 * is advertised is preferable to emulating the advertised instruction 4251 * in KVM on #UD, and obviously better than incorrectly injecting #UD. 4252 */ 4253 if (cpu_has_vmx_rdtscp()) { 4254 bool rdpid_or_rdtscp_enabled = 4255 guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) || 4256 guest_cpuid_has(vcpu, X86_FEATURE_RDPID); 4257 4258 vmx_adjust_secondary_exec_control(vmx, &exec_control, 4259 SECONDARY_EXEC_ENABLE_RDTSCP, 4260 rdpid_or_rdtscp_enabled, false); 4261 } 4262 vmx_adjust_sec_exec_feature(vmx, &exec_control, invpcid, INVPCID); 4263 4264 vmx_adjust_sec_exec_exiting(vmx, &exec_control, rdrand, RDRAND); 4265 vmx_adjust_sec_exec_exiting(vmx, &exec_control, rdseed, RDSEED); 4266 4267 vmx_adjust_sec_exec_control(vmx, &exec_control, waitpkg, WAITPKG, 4268 ENABLE_USR_WAIT_PAUSE, false); 4269 4270 if (!vcpu->kvm->arch.bus_lock_detection_enabled) 4271 exec_control &= ~SECONDARY_EXEC_BUS_LOCK_DETECTION; 4272 4273 return exec_control; 4274 } 4275 4276 #define VMX_XSS_EXIT_BITMAP 0 4277 4278 static void init_vmcs(struct vcpu_vmx *vmx) 4279 { 4280 if (nested) 4281 nested_vmx_set_vmcs_shadowing_bitmap(); 4282 4283 if (cpu_has_vmx_msr_bitmap()) 4284 vmcs_write64(MSR_BITMAP, __pa(vmx->vmcs01.msr_bitmap)); 4285 4286 vmcs_write64(VMCS_LINK_POINTER, INVALID_GPA); /* 22.3.1.5 */ 4287 4288 /* Control */ 4289 pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx)); 4290 4291 exec_controls_set(vmx, vmx_exec_control(vmx)); 4292 4293 if (cpu_has_secondary_exec_ctrls()) 4294 secondary_exec_controls_set(vmx, vmx_secondary_exec_control(vmx)); 4295 4296 if (kvm_vcpu_apicv_active(&vmx->vcpu)) { 4297 vmcs_write64(EOI_EXIT_BITMAP0, 0); 4298 vmcs_write64(EOI_EXIT_BITMAP1, 0); 4299 vmcs_write64(EOI_EXIT_BITMAP2, 0); 4300 vmcs_write64(EOI_EXIT_BITMAP3, 0); 4301 4302 vmcs_write16(GUEST_INTR_STATUS, 0); 4303 4304 vmcs_write16(POSTED_INTR_NV, POSTED_INTR_VECTOR); 4305 vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->pi_desc))); 4306 } 4307 4308 if (!kvm_pause_in_guest(vmx->vcpu.kvm)) { 4309 vmcs_write32(PLE_GAP, ple_gap); 4310 vmx->ple_window = ple_window; 4311 vmx->ple_window_dirty = true; 4312 } 4313 4314 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0); 4315 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0); 4316 vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */ 4317 4318 vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */ 4319 vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */ 4320 vmx_set_constant_host_state(vmx); 4321 vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */ 4322 vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */ 4323 4324 if (cpu_has_vmx_vmfunc()) 4325 vmcs_write64(VM_FUNCTION_CONTROL, 0); 4326 4327 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); 4328 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); 4329 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val)); 4330 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0); 4331 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val)); 4332 4333 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) 4334 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); 4335 4336 vm_exit_controls_set(vmx, vmx_vmexit_ctrl()); 4337 4338 /* 22.2.1, 20.8.1 */ 4339 vm_entry_controls_set(vmx, vmx_vmentry_ctrl()); 4340 4341 vmx->vcpu.arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS; 4342 vmcs_writel(CR0_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr0_guest_owned_bits); 4343 4344 set_cr4_guest_host_mask(vmx); 4345 4346 if (vmx->vpid != 0) 4347 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); 4348 4349 if (cpu_has_vmx_xsaves()) 4350 vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP); 4351 4352 if (enable_pml) { 4353 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); 4354 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); 4355 } 4356 4357 vmx_write_encls_bitmap(&vmx->vcpu, NULL); 4358 4359 if (vmx_pt_mode_is_host_guest()) { 4360 memset(&vmx->pt_desc, 0, sizeof(vmx->pt_desc)); 4361 /* Bit[6~0] are forced to 1, writes are ignored. */ 4362 vmx->pt_desc.guest.output_mask = 0x7F; 4363 vmcs_write64(GUEST_IA32_RTIT_CTL, 0); 4364 } 4365 4366 vmcs_write32(GUEST_SYSENTER_CS, 0); 4367 vmcs_writel(GUEST_SYSENTER_ESP, 0); 4368 vmcs_writel(GUEST_SYSENTER_EIP, 0); 4369 vmcs_write64(GUEST_IA32_DEBUGCTL, 0); 4370 4371 if (cpu_has_vmx_tpr_shadow()) { 4372 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0); 4373 if (cpu_need_tpr_shadow(&vmx->vcpu)) 4374 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 4375 __pa(vmx->vcpu.arch.apic->regs)); 4376 vmcs_write32(TPR_THRESHOLD, 0); 4377 } 4378 4379 vmx_setup_uret_msrs(vmx); 4380 } 4381 4382 static void __vmx_vcpu_reset(struct kvm_vcpu *vcpu) 4383 { 4384 struct vcpu_vmx *vmx = to_vmx(vcpu); 4385 4386 init_vmcs(vmx); 4387 4388 if (nested) 4389 memcpy(&vmx->nested.msrs, &vmcs_config.nested, sizeof(vmx->nested.msrs)); 4390 4391 vcpu_setup_sgx_lepubkeyhash(vcpu); 4392 4393 vmx->nested.posted_intr_nv = -1; 4394 vmx->nested.vmxon_ptr = INVALID_GPA; 4395 vmx->nested.current_vmptr = INVALID_GPA; 4396 vmx->nested.hv_evmcs_vmptr = EVMPTR_INVALID; 4397 4398 vcpu->arch.microcode_version = 0x100000000ULL; 4399 vmx->msr_ia32_feature_control_valid_bits = FEAT_CTL_LOCKED; 4400 4401 /* 4402 * Enforce invariant: pi_desc.nv is always either POSTED_INTR_VECTOR 4403 * or POSTED_INTR_WAKEUP_VECTOR. 4404 */ 4405 vmx->pi_desc.nv = POSTED_INTR_VECTOR; 4406 vmx->pi_desc.sn = 1; 4407 } 4408 4409 static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) 4410 { 4411 struct vcpu_vmx *vmx = to_vmx(vcpu); 4412 4413 if (!init_event) 4414 __vmx_vcpu_reset(vcpu); 4415 4416 vmx->rmode.vm86_active = 0; 4417 vmx->spec_ctrl = 0; 4418 4419 vmx->msr_ia32_umwait_control = 0; 4420 4421 vmx->hv_deadline_tsc = -1; 4422 kvm_set_cr8(vcpu, 0); 4423 4424 vmx_segment_cache_clear(vmx); 4425 kvm_register_mark_available(vcpu, VCPU_EXREG_SEGMENTS); 4426 4427 seg_setup(VCPU_SREG_CS); 4428 vmcs_write16(GUEST_CS_SELECTOR, 0xf000); 4429 vmcs_writel(GUEST_CS_BASE, 0xffff0000ul); 4430 4431 seg_setup(VCPU_SREG_DS); 4432 seg_setup(VCPU_SREG_ES); 4433 seg_setup(VCPU_SREG_FS); 4434 seg_setup(VCPU_SREG_GS); 4435 seg_setup(VCPU_SREG_SS); 4436 4437 vmcs_write16(GUEST_TR_SELECTOR, 0); 4438 vmcs_writel(GUEST_TR_BASE, 0); 4439 vmcs_write32(GUEST_TR_LIMIT, 0xffff); 4440 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); 4441 4442 vmcs_write16(GUEST_LDTR_SELECTOR, 0); 4443 vmcs_writel(GUEST_LDTR_BASE, 0); 4444 vmcs_write32(GUEST_LDTR_LIMIT, 0xffff); 4445 vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082); 4446 4447 vmcs_writel(GUEST_GDTR_BASE, 0); 4448 vmcs_write32(GUEST_GDTR_LIMIT, 0xffff); 4449 4450 vmcs_writel(GUEST_IDTR_BASE, 0); 4451 vmcs_write32(GUEST_IDTR_LIMIT, 0xffff); 4452 4453 vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE); 4454 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0); 4455 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, 0); 4456 if (kvm_mpx_supported()) 4457 vmcs_write64(GUEST_BNDCFGS, 0); 4458 4459 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */ 4460 4461 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); 4462 4463 vpid_sync_context(vmx->vpid); 4464 } 4465 4466 static void vmx_enable_irq_window(struct kvm_vcpu *vcpu) 4467 { 4468 exec_controls_setbit(to_vmx(vcpu), CPU_BASED_INTR_WINDOW_EXITING); 4469 } 4470 4471 static void vmx_enable_nmi_window(struct kvm_vcpu *vcpu) 4472 { 4473 if (!enable_vnmi || 4474 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) { 4475 vmx_enable_irq_window(vcpu); 4476 return; 4477 } 4478 4479 exec_controls_setbit(to_vmx(vcpu), CPU_BASED_NMI_WINDOW_EXITING); 4480 } 4481 4482 static void vmx_inject_irq(struct kvm_vcpu *vcpu) 4483 { 4484 struct vcpu_vmx *vmx = to_vmx(vcpu); 4485 uint32_t intr; 4486 int irq = vcpu->arch.interrupt.nr; 4487 4488 trace_kvm_inj_virq(irq); 4489 4490 ++vcpu->stat.irq_injections; 4491 if (vmx->rmode.vm86_active) { 4492 int inc_eip = 0; 4493 if (vcpu->arch.interrupt.soft) 4494 inc_eip = vcpu->arch.event_exit_inst_len; 4495 kvm_inject_realmode_interrupt(vcpu, irq, inc_eip); 4496 return; 4497 } 4498 intr = irq | INTR_INFO_VALID_MASK; 4499 if (vcpu->arch.interrupt.soft) { 4500 intr |= INTR_TYPE_SOFT_INTR; 4501 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 4502 vmx->vcpu.arch.event_exit_inst_len); 4503 } else 4504 intr |= INTR_TYPE_EXT_INTR; 4505 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr); 4506 4507 vmx_clear_hlt(vcpu); 4508 } 4509 4510 static void vmx_inject_nmi(struct kvm_vcpu *vcpu) 4511 { 4512 struct vcpu_vmx *vmx = to_vmx(vcpu); 4513 4514 if (!enable_vnmi) { 4515 /* 4516 * Tracking the NMI-blocked state in software is built upon 4517 * finding the next open IRQ window. This, in turn, depends on 4518 * well-behaving guests: They have to keep IRQs disabled at 4519 * least as long as the NMI handler runs. Otherwise we may 4520 * cause NMI nesting, maybe breaking the guest. But as this is 4521 * highly unlikely, we can live with the residual risk. 4522 */ 4523 vmx->loaded_vmcs->soft_vnmi_blocked = 1; 4524 vmx->loaded_vmcs->vnmi_blocked_time = 0; 4525 } 4526 4527 ++vcpu->stat.nmi_injections; 4528 vmx->loaded_vmcs->nmi_known_unmasked = false; 4529 4530 if (vmx->rmode.vm86_active) { 4531 kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0); 4532 return; 4533 } 4534 4535 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 4536 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR); 4537 4538 vmx_clear_hlt(vcpu); 4539 } 4540 4541 bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu) 4542 { 4543 struct vcpu_vmx *vmx = to_vmx(vcpu); 4544 bool masked; 4545 4546 if (!enable_vnmi) 4547 return vmx->loaded_vmcs->soft_vnmi_blocked; 4548 if (vmx->loaded_vmcs->nmi_known_unmasked) 4549 return false; 4550 masked = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI; 4551 vmx->loaded_vmcs->nmi_known_unmasked = !masked; 4552 return masked; 4553 } 4554 4555 void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) 4556 { 4557 struct vcpu_vmx *vmx = to_vmx(vcpu); 4558 4559 if (!enable_vnmi) { 4560 if (vmx->loaded_vmcs->soft_vnmi_blocked != masked) { 4561 vmx->loaded_vmcs->soft_vnmi_blocked = masked; 4562 vmx->loaded_vmcs->vnmi_blocked_time = 0; 4563 } 4564 } else { 4565 vmx->loaded_vmcs->nmi_known_unmasked = !masked; 4566 if (masked) 4567 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, 4568 GUEST_INTR_STATE_NMI); 4569 else 4570 vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, 4571 GUEST_INTR_STATE_NMI); 4572 } 4573 } 4574 4575 bool vmx_nmi_blocked(struct kvm_vcpu *vcpu) 4576 { 4577 if (is_guest_mode(vcpu) && nested_exit_on_nmi(vcpu)) 4578 return false; 4579 4580 if (!enable_vnmi && to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked) 4581 return true; 4582 4583 return (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 4584 (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI | 4585 GUEST_INTR_STATE_NMI)); 4586 } 4587 4588 static int vmx_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection) 4589 { 4590 if (to_vmx(vcpu)->nested.nested_run_pending) 4591 return -EBUSY; 4592 4593 /* An NMI must not be injected into L2 if it's supposed to VM-Exit. */ 4594 if (for_injection && is_guest_mode(vcpu) && nested_exit_on_nmi(vcpu)) 4595 return -EBUSY; 4596 4597 return !vmx_nmi_blocked(vcpu); 4598 } 4599 4600 bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu) 4601 { 4602 if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) 4603 return false; 4604 4605 return !(vmx_get_rflags(vcpu) & X86_EFLAGS_IF) || 4606 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 4607 (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS)); 4608 } 4609 4610 static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection) 4611 { 4612 if (to_vmx(vcpu)->nested.nested_run_pending) 4613 return -EBUSY; 4614 4615 /* 4616 * An IRQ must not be injected into L2 if it's supposed to VM-Exit, 4617 * e.g. if the IRQ arrived asynchronously after checking nested events. 4618 */ 4619 if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) 4620 return -EBUSY; 4621 4622 return !vmx_interrupt_blocked(vcpu); 4623 } 4624 4625 static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) 4626 { 4627 void __user *ret; 4628 4629 if (enable_unrestricted_guest) 4630 return 0; 4631 4632 mutex_lock(&kvm->slots_lock); 4633 ret = __x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, addr, 4634 PAGE_SIZE * 3); 4635 mutex_unlock(&kvm->slots_lock); 4636 4637 if (IS_ERR(ret)) 4638 return PTR_ERR(ret); 4639 4640 to_kvm_vmx(kvm)->tss_addr = addr; 4641 4642 return init_rmode_tss(kvm, ret); 4643 } 4644 4645 static int vmx_set_identity_map_addr(struct kvm *kvm, u64 ident_addr) 4646 { 4647 to_kvm_vmx(kvm)->ept_identity_map_addr = ident_addr; 4648 return 0; 4649 } 4650 4651 static bool rmode_exception(struct kvm_vcpu *vcpu, int vec) 4652 { 4653 switch (vec) { 4654 case BP_VECTOR: 4655 /* 4656 * Update instruction length as we may reinject the exception 4657 * from user space while in guest debugging mode. 4658 */ 4659 to_vmx(vcpu)->vcpu.arch.event_exit_inst_len = 4660 vmcs_read32(VM_EXIT_INSTRUCTION_LEN); 4661 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) 4662 return false; 4663 fallthrough; 4664 case DB_VECTOR: 4665 return !(vcpu->guest_debug & 4666 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)); 4667 case DE_VECTOR: 4668 case OF_VECTOR: 4669 case BR_VECTOR: 4670 case UD_VECTOR: 4671 case DF_VECTOR: 4672 case SS_VECTOR: 4673 case GP_VECTOR: 4674 case MF_VECTOR: 4675 return true; 4676 } 4677 return false; 4678 } 4679 4680 static int handle_rmode_exception(struct kvm_vcpu *vcpu, 4681 int vec, u32 err_code) 4682 { 4683 /* 4684 * Instruction with address size override prefix opcode 0x67 4685 * Cause the #SS fault with 0 error code in VM86 mode. 4686 */ 4687 if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) { 4688 if (kvm_emulate_instruction(vcpu, 0)) { 4689 if (vcpu->arch.halt_request) { 4690 vcpu->arch.halt_request = 0; 4691 return kvm_vcpu_halt(vcpu); 4692 } 4693 return 1; 4694 } 4695 return 0; 4696 } 4697 4698 /* 4699 * Forward all other exceptions that are valid in real mode. 4700 * FIXME: Breaks guest debugging in real mode, needs to be fixed with 4701 * the required debugging infrastructure rework. 4702 */ 4703 kvm_queue_exception(vcpu, vec); 4704 return 1; 4705 } 4706 4707 static int handle_machine_check(struct kvm_vcpu *vcpu) 4708 { 4709 /* handled by vmx_vcpu_run() */ 4710 return 1; 4711 } 4712 4713 /* 4714 * If the host has split lock detection disabled, then #AC is 4715 * unconditionally injected into the guest, which is the pre split lock 4716 * detection behaviour. 4717 * 4718 * If the host has split lock detection enabled then #AC is 4719 * only injected into the guest when: 4720 * - Guest CPL == 3 (user mode) 4721 * - Guest has #AC detection enabled in CR0 4722 * - Guest EFLAGS has AC bit set 4723 */ 4724 bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu) 4725 { 4726 if (!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) 4727 return true; 4728 4729 return vmx_get_cpl(vcpu) == 3 && kvm_read_cr0_bits(vcpu, X86_CR0_AM) && 4730 (kvm_get_rflags(vcpu) & X86_EFLAGS_AC); 4731 } 4732 4733 static int handle_exception_nmi(struct kvm_vcpu *vcpu) 4734 { 4735 struct vcpu_vmx *vmx = to_vmx(vcpu); 4736 struct kvm_run *kvm_run = vcpu->run; 4737 u32 intr_info, ex_no, error_code; 4738 unsigned long cr2, dr6; 4739 u32 vect_info; 4740 4741 vect_info = vmx->idt_vectoring_info; 4742 intr_info = vmx_get_intr_info(vcpu); 4743 4744 if (is_machine_check(intr_info) || is_nmi(intr_info)) 4745 return 1; /* handled by handle_exception_nmi_irqoff() */ 4746 4747 if (is_invalid_opcode(intr_info)) 4748 return handle_ud(vcpu); 4749 4750 error_code = 0; 4751 if (intr_info & INTR_INFO_DELIVER_CODE_MASK) 4752 error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE); 4753 4754 if (!vmx->rmode.vm86_active && is_gp_fault(intr_info)) { 4755 WARN_ON_ONCE(!enable_vmware_backdoor); 4756 4757 /* 4758 * VMware backdoor emulation on #GP interception only handles 4759 * IN{S}, OUT{S}, and RDPMC, none of which generate a non-zero 4760 * error code on #GP. 4761 */ 4762 if (error_code) { 4763 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); 4764 return 1; 4765 } 4766 return kvm_emulate_instruction(vcpu, EMULTYPE_VMWARE_GP); 4767 } 4768 4769 /* 4770 * The #PF with PFEC.RSVD = 1 indicates the guest is accessing 4771 * MMIO, it is better to report an internal error. 4772 * See the comments in vmx_handle_exit. 4773 */ 4774 if ((vect_info & VECTORING_INFO_VALID_MASK) && 4775 !(is_page_fault(intr_info) && !(error_code & PFERR_RSVD_MASK))) { 4776 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 4777 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX; 4778 vcpu->run->internal.ndata = 4; 4779 vcpu->run->internal.data[0] = vect_info; 4780 vcpu->run->internal.data[1] = intr_info; 4781 vcpu->run->internal.data[2] = error_code; 4782 vcpu->run->internal.data[3] = vcpu->arch.last_vmentry_cpu; 4783 return 0; 4784 } 4785 4786 if (is_page_fault(intr_info)) { 4787 cr2 = vmx_get_exit_qual(vcpu); 4788 if (enable_ept && !vcpu->arch.apf.host_apf_flags) { 4789 /* 4790 * EPT will cause page fault only if we need to 4791 * detect illegal GPAs. 4792 */ 4793 WARN_ON_ONCE(!allow_smaller_maxphyaddr); 4794 kvm_fixup_and_inject_pf_error(vcpu, cr2, error_code); 4795 return 1; 4796 } else 4797 return kvm_handle_page_fault(vcpu, error_code, cr2, NULL, 0); 4798 } 4799 4800 ex_no = intr_info & INTR_INFO_VECTOR_MASK; 4801 4802 if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no)) 4803 return handle_rmode_exception(vcpu, ex_no, error_code); 4804 4805 switch (ex_no) { 4806 case DB_VECTOR: 4807 dr6 = vmx_get_exit_qual(vcpu); 4808 if (!(vcpu->guest_debug & 4809 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) { 4810 if (is_icebp(intr_info)) 4811 WARN_ON(!skip_emulated_instruction(vcpu)); 4812 4813 kvm_queue_exception_p(vcpu, DB_VECTOR, dr6); 4814 return 1; 4815 } 4816 kvm_run->debug.arch.dr6 = dr6 | DR6_ACTIVE_LOW; 4817 kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7); 4818 fallthrough; 4819 case BP_VECTOR: 4820 /* 4821 * Update instruction length as we may reinject #BP from 4822 * user space while in guest debugging mode. Reading it for 4823 * #DB as well causes no harm, it is not used in that case. 4824 */ 4825 vmx->vcpu.arch.event_exit_inst_len = 4826 vmcs_read32(VM_EXIT_INSTRUCTION_LEN); 4827 kvm_run->exit_reason = KVM_EXIT_DEBUG; 4828 kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu); 4829 kvm_run->debug.arch.exception = ex_no; 4830 break; 4831 case AC_VECTOR: 4832 if (vmx_guest_inject_ac(vcpu)) { 4833 kvm_queue_exception_e(vcpu, AC_VECTOR, error_code); 4834 return 1; 4835 } 4836 4837 /* 4838 * Handle split lock. Depending on detection mode this will 4839 * either warn and disable split lock detection for this 4840 * task or force SIGBUS on it. 4841 */ 4842 if (handle_guest_split_lock(kvm_rip_read(vcpu))) 4843 return 1; 4844 fallthrough; 4845 default: 4846 kvm_run->exit_reason = KVM_EXIT_EXCEPTION; 4847 kvm_run->ex.exception = ex_no; 4848 kvm_run->ex.error_code = error_code; 4849 break; 4850 } 4851 return 0; 4852 } 4853 4854 static __always_inline int handle_external_interrupt(struct kvm_vcpu *vcpu) 4855 { 4856 ++vcpu->stat.irq_exits; 4857 return 1; 4858 } 4859 4860 static int handle_triple_fault(struct kvm_vcpu *vcpu) 4861 { 4862 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; 4863 vcpu->mmio_needed = 0; 4864 return 0; 4865 } 4866 4867 static int handle_io(struct kvm_vcpu *vcpu) 4868 { 4869 unsigned long exit_qualification; 4870 int size, in, string; 4871 unsigned port; 4872 4873 exit_qualification = vmx_get_exit_qual(vcpu); 4874 string = (exit_qualification & 16) != 0; 4875 4876 ++vcpu->stat.io_exits; 4877 4878 if (string) 4879 return kvm_emulate_instruction(vcpu, 0); 4880 4881 port = exit_qualification >> 16; 4882 size = (exit_qualification & 7) + 1; 4883 in = (exit_qualification & 8) != 0; 4884 4885 return kvm_fast_pio(vcpu, size, port, in); 4886 } 4887 4888 static void 4889 vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) 4890 { 4891 /* 4892 * Patch in the VMCALL instruction: 4893 */ 4894 hypercall[0] = 0x0f; 4895 hypercall[1] = 0x01; 4896 hypercall[2] = 0xc1; 4897 } 4898 4899 /* called to set cr0 as appropriate for a mov-to-cr0 exit. */ 4900 static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val) 4901 { 4902 if (is_guest_mode(vcpu)) { 4903 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 4904 unsigned long orig_val = val; 4905 4906 /* 4907 * We get here when L2 changed cr0 in a way that did not change 4908 * any of L1's shadowed bits (see nested_vmx_exit_handled_cr), 4909 * but did change L0 shadowed bits. So we first calculate the 4910 * effective cr0 value that L1 would like to write into the 4911 * hardware. It consists of the L2-owned bits from the new 4912 * value combined with the L1-owned bits from L1's guest_cr0. 4913 */ 4914 val = (val & ~vmcs12->cr0_guest_host_mask) | 4915 (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask); 4916 4917 if (!nested_guest_cr0_valid(vcpu, val)) 4918 return 1; 4919 4920 if (kvm_set_cr0(vcpu, val)) 4921 return 1; 4922 vmcs_writel(CR0_READ_SHADOW, orig_val); 4923 return 0; 4924 } else { 4925 if (to_vmx(vcpu)->nested.vmxon && 4926 !nested_host_cr0_valid(vcpu, val)) 4927 return 1; 4928 4929 return kvm_set_cr0(vcpu, val); 4930 } 4931 } 4932 4933 static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val) 4934 { 4935 if (is_guest_mode(vcpu)) { 4936 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 4937 unsigned long orig_val = val; 4938 4939 /* analogously to handle_set_cr0 */ 4940 val = (val & ~vmcs12->cr4_guest_host_mask) | 4941 (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask); 4942 if (kvm_set_cr4(vcpu, val)) 4943 return 1; 4944 vmcs_writel(CR4_READ_SHADOW, orig_val); 4945 return 0; 4946 } else 4947 return kvm_set_cr4(vcpu, val); 4948 } 4949 4950 static int handle_desc(struct kvm_vcpu *vcpu) 4951 { 4952 WARN_ON(!(vcpu->arch.cr4 & X86_CR4_UMIP)); 4953 return kvm_emulate_instruction(vcpu, 0); 4954 } 4955 4956 static int handle_cr(struct kvm_vcpu *vcpu) 4957 { 4958 unsigned long exit_qualification, val; 4959 int cr; 4960 int reg; 4961 int err; 4962 int ret; 4963 4964 exit_qualification = vmx_get_exit_qual(vcpu); 4965 cr = exit_qualification & 15; 4966 reg = (exit_qualification >> 8) & 15; 4967 switch ((exit_qualification >> 4) & 3) { 4968 case 0: /* mov to cr */ 4969 val = kvm_register_read(vcpu, reg); 4970 trace_kvm_cr_write(cr, val); 4971 switch (cr) { 4972 case 0: 4973 err = handle_set_cr0(vcpu, val); 4974 return kvm_complete_insn_gp(vcpu, err); 4975 case 3: 4976 WARN_ON_ONCE(enable_unrestricted_guest); 4977 4978 err = kvm_set_cr3(vcpu, val); 4979 return kvm_complete_insn_gp(vcpu, err); 4980 case 4: 4981 err = handle_set_cr4(vcpu, val); 4982 return kvm_complete_insn_gp(vcpu, err); 4983 case 8: { 4984 u8 cr8_prev = kvm_get_cr8(vcpu); 4985 u8 cr8 = (u8)val; 4986 err = kvm_set_cr8(vcpu, cr8); 4987 ret = kvm_complete_insn_gp(vcpu, err); 4988 if (lapic_in_kernel(vcpu)) 4989 return ret; 4990 if (cr8_prev <= cr8) 4991 return ret; 4992 /* 4993 * TODO: we might be squashing a 4994 * KVM_GUESTDBG_SINGLESTEP-triggered 4995 * KVM_EXIT_DEBUG here. 4996 */ 4997 vcpu->run->exit_reason = KVM_EXIT_SET_TPR; 4998 return 0; 4999 } 5000 } 5001 break; 5002 case 2: /* clts */ 5003 KVM_BUG(1, vcpu->kvm, "Guest always owns CR0.TS"); 5004 return -EIO; 5005 case 1: /*mov from cr*/ 5006 switch (cr) { 5007 case 3: 5008 WARN_ON_ONCE(enable_unrestricted_guest); 5009 5010 val = kvm_read_cr3(vcpu); 5011 kvm_register_write(vcpu, reg, val); 5012 trace_kvm_cr_read(cr, val); 5013 return kvm_skip_emulated_instruction(vcpu); 5014 case 8: 5015 val = kvm_get_cr8(vcpu); 5016 kvm_register_write(vcpu, reg, val); 5017 trace_kvm_cr_read(cr, val); 5018 return kvm_skip_emulated_instruction(vcpu); 5019 } 5020 break; 5021 case 3: /* lmsw */ 5022 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f; 5023 trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val); 5024 kvm_lmsw(vcpu, val); 5025 5026 return kvm_skip_emulated_instruction(vcpu); 5027 default: 5028 break; 5029 } 5030 vcpu->run->exit_reason = 0; 5031 vcpu_unimpl(vcpu, "unhandled control register: op %d cr %d\n", 5032 (int)(exit_qualification >> 4) & 3, cr); 5033 return 0; 5034 } 5035 5036 static int handle_dr(struct kvm_vcpu *vcpu) 5037 { 5038 unsigned long exit_qualification; 5039 int dr, dr7, reg; 5040 int err = 1; 5041 5042 exit_qualification = vmx_get_exit_qual(vcpu); 5043 dr = exit_qualification & DEBUG_REG_ACCESS_NUM; 5044 5045 /* First, if DR does not exist, trigger UD */ 5046 if (!kvm_require_dr(vcpu, dr)) 5047 return 1; 5048 5049 if (kvm_x86_ops.get_cpl(vcpu) > 0) 5050 goto out; 5051 5052 dr7 = vmcs_readl(GUEST_DR7); 5053 if (dr7 & DR7_GD) { 5054 /* 5055 * As the vm-exit takes precedence over the debug trap, we 5056 * need to emulate the latter, either for the host or the 5057 * guest debugging itself. 5058 */ 5059 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { 5060 vcpu->run->debug.arch.dr6 = DR6_BD | DR6_ACTIVE_LOW; 5061 vcpu->run->debug.arch.dr7 = dr7; 5062 vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu); 5063 vcpu->run->debug.arch.exception = DB_VECTOR; 5064 vcpu->run->exit_reason = KVM_EXIT_DEBUG; 5065 return 0; 5066 } else { 5067 kvm_queue_exception_p(vcpu, DB_VECTOR, DR6_BD); 5068 return 1; 5069 } 5070 } 5071 5072 if (vcpu->guest_debug == 0) { 5073 exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_MOV_DR_EXITING); 5074 5075 /* 5076 * No more DR vmexits; force a reload of the debug registers 5077 * and reenter on this instruction. The next vmexit will 5078 * retrieve the full state of the debug registers. 5079 */ 5080 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT; 5081 return 1; 5082 } 5083 5084 reg = DEBUG_REG_ACCESS_REG(exit_qualification); 5085 if (exit_qualification & TYPE_MOV_FROM_DR) { 5086 unsigned long val; 5087 5088 kvm_get_dr(vcpu, dr, &val); 5089 kvm_register_write(vcpu, reg, val); 5090 err = 0; 5091 } else { 5092 err = kvm_set_dr(vcpu, dr, kvm_register_read(vcpu, reg)); 5093 } 5094 5095 out: 5096 return kvm_complete_insn_gp(vcpu, err); 5097 } 5098 5099 static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu) 5100 { 5101 get_debugreg(vcpu->arch.db[0], 0); 5102 get_debugreg(vcpu->arch.db[1], 1); 5103 get_debugreg(vcpu->arch.db[2], 2); 5104 get_debugreg(vcpu->arch.db[3], 3); 5105 get_debugreg(vcpu->arch.dr6, 6); 5106 vcpu->arch.dr7 = vmcs_readl(GUEST_DR7); 5107 5108 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT; 5109 exec_controls_setbit(to_vmx(vcpu), CPU_BASED_MOV_DR_EXITING); 5110 5111 /* 5112 * exc_debug expects dr6 to be cleared after it runs, avoid that it sees 5113 * a stale dr6 from the guest. 5114 */ 5115 set_debugreg(DR6_RESERVED, 6); 5116 } 5117 5118 static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val) 5119 { 5120 vmcs_writel(GUEST_DR7, val); 5121 } 5122 5123 static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu) 5124 { 5125 kvm_apic_update_ppr(vcpu); 5126 return 1; 5127 } 5128 5129 static int handle_interrupt_window(struct kvm_vcpu *vcpu) 5130 { 5131 exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_INTR_WINDOW_EXITING); 5132 5133 kvm_make_request(KVM_REQ_EVENT, vcpu); 5134 5135 ++vcpu->stat.irq_window_exits; 5136 return 1; 5137 } 5138 5139 static int handle_invlpg(struct kvm_vcpu *vcpu) 5140 { 5141 unsigned long exit_qualification = vmx_get_exit_qual(vcpu); 5142 5143 kvm_mmu_invlpg(vcpu, exit_qualification); 5144 return kvm_skip_emulated_instruction(vcpu); 5145 } 5146 5147 static int handle_apic_access(struct kvm_vcpu *vcpu) 5148 { 5149 if (likely(fasteoi)) { 5150 unsigned long exit_qualification = vmx_get_exit_qual(vcpu); 5151 int access_type, offset; 5152 5153 access_type = exit_qualification & APIC_ACCESS_TYPE; 5154 offset = exit_qualification & APIC_ACCESS_OFFSET; 5155 /* 5156 * Sane guest uses MOV to write EOI, with written value 5157 * not cared. So make a short-circuit here by avoiding 5158 * heavy instruction emulation. 5159 */ 5160 if ((access_type == TYPE_LINEAR_APIC_INST_WRITE) && 5161 (offset == APIC_EOI)) { 5162 kvm_lapic_set_eoi(vcpu); 5163 return kvm_skip_emulated_instruction(vcpu); 5164 } 5165 } 5166 return kvm_emulate_instruction(vcpu, 0); 5167 } 5168 5169 static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu) 5170 { 5171 unsigned long exit_qualification = vmx_get_exit_qual(vcpu); 5172 int vector = exit_qualification & 0xff; 5173 5174 /* EOI-induced VM exit is trap-like and thus no need to adjust IP */ 5175 kvm_apic_set_eoi_accelerated(vcpu, vector); 5176 return 1; 5177 } 5178 5179 static int handle_apic_write(struct kvm_vcpu *vcpu) 5180 { 5181 unsigned long exit_qualification = vmx_get_exit_qual(vcpu); 5182 u32 offset = exit_qualification & 0xfff; 5183 5184 /* APIC-write VM exit is trap-like and thus no need to adjust IP */ 5185 kvm_apic_write_nodecode(vcpu, offset); 5186 return 1; 5187 } 5188 5189 static int handle_task_switch(struct kvm_vcpu *vcpu) 5190 { 5191 struct vcpu_vmx *vmx = to_vmx(vcpu); 5192 unsigned long exit_qualification; 5193 bool has_error_code = false; 5194 u32 error_code = 0; 5195 u16 tss_selector; 5196 int reason, type, idt_v, idt_index; 5197 5198 idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK); 5199 idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK); 5200 type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK); 5201 5202 exit_qualification = vmx_get_exit_qual(vcpu); 5203 5204 reason = (u32)exit_qualification >> 30; 5205 if (reason == TASK_SWITCH_GATE && idt_v) { 5206 switch (type) { 5207 case INTR_TYPE_NMI_INTR: 5208 vcpu->arch.nmi_injected = false; 5209 vmx_set_nmi_mask(vcpu, true); 5210 break; 5211 case INTR_TYPE_EXT_INTR: 5212 case INTR_TYPE_SOFT_INTR: 5213 kvm_clear_interrupt_queue(vcpu); 5214 break; 5215 case INTR_TYPE_HARD_EXCEPTION: 5216 if (vmx->idt_vectoring_info & 5217 VECTORING_INFO_DELIVER_CODE_MASK) { 5218 has_error_code = true; 5219 error_code = 5220 vmcs_read32(IDT_VECTORING_ERROR_CODE); 5221 } 5222 fallthrough; 5223 case INTR_TYPE_SOFT_EXCEPTION: 5224 kvm_clear_exception_queue(vcpu); 5225 break; 5226 default: 5227 break; 5228 } 5229 } 5230 tss_selector = exit_qualification; 5231 5232 if (!idt_v || (type != INTR_TYPE_HARD_EXCEPTION && 5233 type != INTR_TYPE_EXT_INTR && 5234 type != INTR_TYPE_NMI_INTR)) 5235 WARN_ON(!skip_emulated_instruction(vcpu)); 5236 5237 /* 5238 * TODO: What about debug traps on tss switch? 5239 * Are we supposed to inject them and update dr6? 5240 */ 5241 return kvm_task_switch(vcpu, tss_selector, 5242 type == INTR_TYPE_SOFT_INTR ? idt_index : -1, 5243 reason, has_error_code, error_code); 5244 } 5245 5246 static int handle_ept_violation(struct kvm_vcpu *vcpu) 5247 { 5248 unsigned long exit_qualification; 5249 gpa_t gpa; 5250 u64 error_code; 5251 5252 exit_qualification = vmx_get_exit_qual(vcpu); 5253 5254 /* 5255 * EPT violation happened while executing iret from NMI, 5256 * "blocked by NMI" bit has to be set before next VM entry. 5257 * There are errata that may cause this bit to not be set: 5258 * AAK134, BY25. 5259 */ 5260 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && 5261 enable_vnmi && 5262 (exit_qualification & INTR_INFO_UNBLOCK_NMI)) 5263 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); 5264 5265 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); 5266 trace_kvm_page_fault(gpa, exit_qualification); 5267 5268 /* Is it a read fault? */ 5269 error_code = (exit_qualification & EPT_VIOLATION_ACC_READ) 5270 ? PFERR_USER_MASK : 0; 5271 /* Is it a write fault? */ 5272 error_code |= (exit_qualification & EPT_VIOLATION_ACC_WRITE) 5273 ? PFERR_WRITE_MASK : 0; 5274 /* Is it a fetch fault? */ 5275 error_code |= (exit_qualification & EPT_VIOLATION_ACC_INSTR) 5276 ? PFERR_FETCH_MASK : 0; 5277 /* ept page table entry is present? */ 5278 error_code |= (exit_qualification & 5279 (EPT_VIOLATION_READABLE | EPT_VIOLATION_WRITABLE | 5280 EPT_VIOLATION_EXECUTABLE)) 5281 ? PFERR_PRESENT_MASK : 0; 5282 5283 error_code |= (exit_qualification & EPT_VIOLATION_GVA_TRANSLATED) != 0 ? 5284 PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK; 5285 5286 vcpu->arch.exit_qualification = exit_qualification; 5287 5288 /* 5289 * Check that the GPA doesn't exceed physical memory limits, as that is 5290 * a guest page fault. We have to emulate the instruction here, because 5291 * if the illegal address is that of a paging structure, then 5292 * EPT_VIOLATION_ACC_WRITE bit is set. Alternatively, if supported we 5293 * would also use advanced VM-exit information for EPT violations to 5294 * reconstruct the page fault error code. 5295 */ 5296 if (unlikely(allow_smaller_maxphyaddr && kvm_vcpu_is_illegal_gpa(vcpu, gpa))) 5297 return kvm_emulate_instruction(vcpu, 0); 5298 5299 return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0); 5300 } 5301 5302 static int handle_ept_misconfig(struct kvm_vcpu *vcpu) 5303 { 5304 gpa_t gpa; 5305 5306 if (!vmx_can_emulate_instruction(vcpu, NULL, 0)) 5307 return 1; 5308 5309 /* 5310 * A nested guest cannot optimize MMIO vmexits, because we have an 5311 * nGPA here instead of the required GPA. 5312 */ 5313 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); 5314 if (!is_guest_mode(vcpu) && 5315 !kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) { 5316 trace_kvm_fast_mmio(gpa); 5317 return kvm_skip_emulated_instruction(vcpu); 5318 } 5319 5320 return kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0); 5321 } 5322 5323 static int handle_nmi_window(struct kvm_vcpu *vcpu) 5324 { 5325 if (KVM_BUG_ON(!enable_vnmi, vcpu->kvm)) 5326 return -EIO; 5327 5328 exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_NMI_WINDOW_EXITING); 5329 ++vcpu->stat.nmi_window_exits; 5330 kvm_make_request(KVM_REQ_EVENT, vcpu); 5331 5332 return 1; 5333 } 5334 5335 static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) 5336 { 5337 struct vcpu_vmx *vmx = to_vmx(vcpu); 5338 bool intr_window_requested; 5339 unsigned count = 130; 5340 5341 intr_window_requested = exec_controls_get(vmx) & 5342 CPU_BASED_INTR_WINDOW_EXITING; 5343 5344 while (vmx->emulation_required && count-- != 0) { 5345 if (intr_window_requested && !vmx_interrupt_blocked(vcpu)) 5346 return handle_interrupt_window(&vmx->vcpu); 5347 5348 if (kvm_test_request(KVM_REQ_EVENT, vcpu)) 5349 return 1; 5350 5351 if (!kvm_emulate_instruction(vcpu, 0)) 5352 return 0; 5353 5354 if (vmx->emulation_required && !vmx->rmode.vm86_active && 5355 vcpu->arch.exception.pending) { 5356 kvm_prepare_emulation_failure_exit(vcpu); 5357 return 0; 5358 } 5359 5360 if (vcpu->arch.halt_request) { 5361 vcpu->arch.halt_request = 0; 5362 return kvm_vcpu_halt(vcpu); 5363 } 5364 5365 /* 5366 * Note, return 1 and not 0, vcpu_run() will invoke 5367 * xfer_to_guest_mode() which will create a proper return 5368 * code. 5369 */ 5370 if (__xfer_to_guest_mode_work_pending()) 5371 return 1; 5372 } 5373 5374 return 1; 5375 } 5376 5377 static void grow_ple_window(struct kvm_vcpu *vcpu) 5378 { 5379 struct vcpu_vmx *vmx = to_vmx(vcpu); 5380 unsigned int old = vmx->ple_window; 5381 5382 vmx->ple_window = __grow_ple_window(old, ple_window, 5383 ple_window_grow, 5384 ple_window_max); 5385 5386 if (vmx->ple_window != old) { 5387 vmx->ple_window_dirty = true; 5388 trace_kvm_ple_window_update(vcpu->vcpu_id, 5389 vmx->ple_window, old); 5390 } 5391 } 5392 5393 static void shrink_ple_window(struct kvm_vcpu *vcpu) 5394 { 5395 struct vcpu_vmx *vmx = to_vmx(vcpu); 5396 unsigned int old = vmx->ple_window; 5397 5398 vmx->ple_window = __shrink_ple_window(old, ple_window, 5399 ple_window_shrink, 5400 ple_window); 5401 5402 if (vmx->ple_window != old) { 5403 vmx->ple_window_dirty = true; 5404 trace_kvm_ple_window_update(vcpu->vcpu_id, 5405 vmx->ple_window, old); 5406 } 5407 } 5408 5409 /* 5410 * Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE 5411 * exiting, so only get here on cpu with PAUSE-Loop-Exiting. 5412 */ 5413 static int handle_pause(struct kvm_vcpu *vcpu) 5414 { 5415 if (!kvm_pause_in_guest(vcpu->kvm)) 5416 grow_ple_window(vcpu); 5417 5418 /* 5419 * Intel sdm vol3 ch-25.1.3 says: The "PAUSE-loop exiting" 5420 * VM-execution control is ignored if CPL > 0. OTOH, KVM 5421 * never set PAUSE_EXITING and just set PLE if supported, 5422 * so the vcpu must be CPL=0 if it gets a PAUSE exit. 5423 */ 5424 kvm_vcpu_on_spin(vcpu, true); 5425 return kvm_skip_emulated_instruction(vcpu); 5426 } 5427 5428 static int handle_monitor_trap(struct kvm_vcpu *vcpu) 5429 { 5430 return 1; 5431 } 5432 5433 static int handle_invpcid(struct kvm_vcpu *vcpu) 5434 { 5435 u32 vmx_instruction_info; 5436 unsigned long type; 5437 gva_t gva; 5438 struct { 5439 u64 pcid; 5440 u64 gla; 5441 } operand; 5442 int gpr_index; 5443 5444 if (!guest_cpuid_has(vcpu, X86_FEATURE_INVPCID)) { 5445 kvm_queue_exception(vcpu, UD_VECTOR); 5446 return 1; 5447 } 5448 5449 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 5450 gpr_index = vmx_get_instr_info_reg2(vmx_instruction_info); 5451 type = kvm_register_read(vcpu, gpr_index); 5452 5453 /* According to the Intel instruction reference, the memory operand 5454 * is read even if it isn't needed (e.g., for type==all) 5455 */ 5456 if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu), 5457 vmx_instruction_info, false, 5458 sizeof(operand), &gva)) 5459 return 1; 5460 5461 return kvm_handle_invpcid(vcpu, type, gva); 5462 } 5463 5464 static int handle_pml_full(struct kvm_vcpu *vcpu) 5465 { 5466 unsigned long exit_qualification; 5467 5468 trace_kvm_pml_full(vcpu->vcpu_id); 5469 5470 exit_qualification = vmx_get_exit_qual(vcpu); 5471 5472 /* 5473 * PML buffer FULL happened while executing iret from NMI, 5474 * "blocked by NMI" bit has to be set before next VM entry. 5475 */ 5476 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && 5477 enable_vnmi && 5478 (exit_qualification & INTR_INFO_UNBLOCK_NMI)) 5479 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, 5480 GUEST_INTR_STATE_NMI); 5481 5482 /* 5483 * PML buffer already flushed at beginning of VMEXIT. Nothing to do 5484 * here.., and there's no userspace involvement needed for PML. 5485 */ 5486 return 1; 5487 } 5488 5489 static fastpath_t handle_fastpath_preemption_timer(struct kvm_vcpu *vcpu) 5490 { 5491 struct vcpu_vmx *vmx = to_vmx(vcpu); 5492 5493 if (!vmx->req_immediate_exit && 5494 !unlikely(vmx->loaded_vmcs->hv_timer_soft_disabled)) { 5495 kvm_lapic_expired_hv_timer(vcpu); 5496 return EXIT_FASTPATH_REENTER_GUEST; 5497 } 5498 5499 return EXIT_FASTPATH_NONE; 5500 } 5501 5502 static int handle_preemption_timer(struct kvm_vcpu *vcpu) 5503 { 5504 handle_fastpath_preemption_timer(vcpu); 5505 return 1; 5506 } 5507 5508 /* 5509 * When nested=0, all VMX instruction VM Exits filter here. The handlers 5510 * are overwritten by nested_vmx_setup() when nested=1. 5511 */ 5512 static int handle_vmx_instruction(struct kvm_vcpu *vcpu) 5513 { 5514 kvm_queue_exception(vcpu, UD_VECTOR); 5515 return 1; 5516 } 5517 5518 #ifndef CONFIG_X86_SGX_KVM 5519 static int handle_encls(struct kvm_vcpu *vcpu) 5520 { 5521 /* 5522 * SGX virtualization is disabled. There is no software enable bit for 5523 * SGX, so KVM intercepts all ENCLS leafs and injects a #UD to prevent 5524 * the guest from executing ENCLS (when SGX is supported by hardware). 5525 */ 5526 kvm_queue_exception(vcpu, UD_VECTOR); 5527 return 1; 5528 } 5529 #endif /* CONFIG_X86_SGX_KVM */ 5530 5531 static int handle_bus_lock_vmexit(struct kvm_vcpu *vcpu) 5532 { 5533 /* 5534 * Hardware may or may not set the BUS_LOCK_DETECTED flag on BUS_LOCK 5535 * VM-Exits. Unconditionally set the flag here and leave the handling to 5536 * vmx_handle_exit(). 5537 */ 5538 to_vmx(vcpu)->exit_reason.bus_lock_detected = true; 5539 return 1; 5540 } 5541 5542 /* 5543 * The exit handlers return 1 if the exit was handled fully and guest execution 5544 * may resume. Otherwise they set the kvm_run parameter to indicate what needs 5545 * to be done to userspace and return 0. 5546 */ 5547 static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { 5548 [EXIT_REASON_EXCEPTION_NMI] = handle_exception_nmi, 5549 [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, 5550 [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault, 5551 [EXIT_REASON_NMI_WINDOW] = handle_nmi_window, 5552 [EXIT_REASON_IO_INSTRUCTION] = handle_io, 5553 [EXIT_REASON_CR_ACCESS] = handle_cr, 5554 [EXIT_REASON_DR_ACCESS] = handle_dr, 5555 [EXIT_REASON_CPUID] = kvm_emulate_cpuid, 5556 [EXIT_REASON_MSR_READ] = kvm_emulate_rdmsr, 5557 [EXIT_REASON_MSR_WRITE] = kvm_emulate_wrmsr, 5558 [EXIT_REASON_INTERRUPT_WINDOW] = handle_interrupt_window, 5559 [EXIT_REASON_HLT] = kvm_emulate_halt, 5560 [EXIT_REASON_INVD] = kvm_emulate_invd, 5561 [EXIT_REASON_INVLPG] = handle_invlpg, 5562 [EXIT_REASON_RDPMC] = kvm_emulate_rdpmc, 5563 [EXIT_REASON_VMCALL] = kvm_emulate_hypercall, 5564 [EXIT_REASON_VMCLEAR] = handle_vmx_instruction, 5565 [EXIT_REASON_VMLAUNCH] = handle_vmx_instruction, 5566 [EXIT_REASON_VMPTRLD] = handle_vmx_instruction, 5567 [EXIT_REASON_VMPTRST] = handle_vmx_instruction, 5568 [EXIT_REASON_VMREAD] = handle_vmx_instruction, 5569 [EXIT_REASON_VMRESUME] = handle_vmx_instruction, 5570 [EXIT_REASON_VMWRITE] = handle_vmx_instruction, 5571 [EXIT_REASON_VMOFF] = handle_vmx_instruction, 5572 [EXIT_REASON_VMON] = handle_vmx_instruction, 5573 [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold, 5574 [EXIT_REASON_APIC_ACCESS] = handle_apic_access, 5575 [EXIT_REASON_APIC_WRITE] = handle_apic_write, 5576 [EXIT_REASON_EOI_INDUCED] = handle_apic_eoi_induced, 5577 [EXIT_REASON_WBINVD] = kvm_emulate_wbinvd, 5578 [EXIT_REASON_XSETBV] = kvm_emulate_xsetbv, 5579 [EXIT_REASON_TASK_SWITCH] = handle_task_switch, 5580 [EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check, 5581 [EXIT_REASON_GDTR_IDTR] = handle_desc, 5582 [EXIT_REASON_LDTR_TR] = handle_desc, 5583 [EXIT_REASON_EPT_VIOLATION] = handle_ept_violation, 5584 [EXIT_REASON_EPT_MISCONFIG] = handle_ept_misconfig, 5585 [EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause, 5586 [EXIT_REASON_MWAIT_INSTRUCTION] = kvm_emulate_mwait, 5587 [EXIT_REASON_MONITOR_TRAP_FLAG] = handle_monitor_trap, 5588 [EXIT_REASON_MONITOR_INSTRUCTION] = kvm_emulate_monitor, 5589 [EXIT_REASON_INVEPT] = handle_vmx_instruction, 5590 [EXIT_REASON_INVVPID] = handle_vmx_instruction, 5591 [EXIT_REASON_RDRAND] = kvm_handle_invalid_op, 5592 [EXIT_REASON_RDSEED] = kvm_handle_invalid_op, 5593 [EXIT_REASON_PML_FULL] = handle_pml_full, 5594 [EXIT_REASON_INVPCID] = handle_invpcid, 5595 [EXIT_REASON_VMFUNC] = handle_vmx_instruction, 5596 [EXIT_REASON_PREEMPTION_TIMER] = handle_preemption_timer, 5597 [EXIT_REASON_ENCLS] = handle_encls, 5598 [EXIT_REASON_BUS_LOCK] = handle_bus_lock_vmexit, 5599 }; 5600 5601 static const int kvm_vmx_max_exit_handlers = 5602 ARRAY_SIZE(kvm_vmx_exit_handlers); 5603 5604 static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason, 5605 u64 *info1, u64 *info2, 5606 u32 *intr_info, u32 *error_code) 5607 { 5608 struct vcpu_vmx *vmx = to_vmx(vcpu); 5609 5610 *reason = vmx->exit_reason.full; 5611 *info1 = vmx_get_exit_qual(vcpu); 5612 if (!(vmx->exit_reason.failed_vmentry)) { 5613 *info2 = vmx->idt_vectoring_info; 5614 *intr_info = vmx_get_intr_info(vcpu); 5615 if (is_exception_with_error_code(*intr_info)) 5616 *error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE); 5617 else 5618 *error_code = 0; 5619 } else { 5620 *info2 = 0; 5621 *intr_info = 0; 5622 *error_code = 0; 5623 } 5624 } 5625 5626 static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx) 5627 { 5628 if (vmx->pml_pg) { 5629 __free_page(vmx->pml_pg); 5630 vmx->pml_pg = NULL; 5631 } 5632 } 5633 5634 static void vmx_flush_pml_buffer(struct kvm_vcpu *vcpu) 5635 { 5636 struct vcpu_vmx *vmx = to_vmx(vcpu); 5637 u64 *pml_buf; 5638 u16 pml_idx; 5639 5640 pml_idx = vmcs_read16(GUEST_PML_INDEX); 5641 5642 /* Do nothing if PML buffer is empty */ 5643 if (pml_idx == (PML_ENTITY_NUM - 1)) 5644 return; 5645 5646 /* PML index always points to next available PML buffer entity */ 5647 if (pml_idx >= PML_ENTITY_NUM) 5648 pml_idx = 0; 5649 else 5650 pml_idx++; 5651 5652 pml_buf = page_address(vmx->pml_pg); 5653 for (; pml_idx < PML_ENTITY_NUM; pml_idx++) { 5654 u64 gpa; 5655 5656 gpa = pml_buf[pml_idx]; 5657 WARN_ON(gpa & (PAGE_SIZE - 1)); 5658 kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT); 5659 } 5660 5661 /* reset PML index */ 5662 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); 5663 } 5664 5665 static void vmx_dump_sel(char *name, uint32_t sel) 5666 { 5667 pr_err("%s sel=0x%04x, attr=0x%05x, limit=0x%08x, base=0x%016lx\n", 5668 name, vmcs_read16(sel), 5669 vmcs_read32(sel + GUEST_ES_AR_BYTES - GUEST_ES_SELECTOR), 5670 vmcs_read32(sel + GUEST_ES_LIMIT - GUEST_ES_SELECTOR), 5671 vmcs_readl(sel + GUEST_ES_BASE - GUEST_ES_SELECTOR)); 5672 } 5673 5674 static void vmx_dump_dtsel(char *name, uint32_t limit) 5675 { 5676 pr_err("%s limit=0x%08x, base=0x%016lx\n", 5677 name, vmcs_read32(limit), 5678 vmcs_readl(limit + GUEST_GDTR_BASE - GUEST_GDTR_LIMIT)); 5679 } 5680 5681 static void vmx_dump_msrs(char *name, struct vmx_msrs *m) 5682 { 5683 unsigned int i; 5684 struct vmx_msr_entry *e; 5685 5686 pr_err("MSR %s:\n", name); 5687 for (i = 0, e = m->val; i < m->nr; ++i, ++e) 5688 pr_err(" %2d: msr=0x%08x value=0x%016llx\n", i, e->index, e->value); 5689 } 5690 5691 void dump_vmcs(struct kvm_vcpu *vcpu) 5692 { 5693 struct vcpu_vmx *vmx = to_vmx(vcpu); 5694 u32 vmentry_ctl, vmexit_ctl; 5695 u32 cpu_based_exec_ctrl, pin_based_exec_ctrl, secondary_exec_control; 5696 unsigned long cr4; 5697 int efer_slot; 5698 5699 if (!dump_invalid_vmcs) { 5700 pr_warn_ratelimited("set kvm_intel.dump_invalid_vmcs=1 to dump internal KVM state.\n"); 5701 return; 5702 } 5703 5704 vmentry_ctl = vmcs_read32(VM_ENTRY_CONTROLS); 5705 vmexit_ctl = vmcs_read32(VM_EXIT_CONTROLS); 5706 cpu_based_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); 5707 pin_based_exec_ctrl = vmcs_read32(PIN_BASED_VM_EXEC_CONTROL); 5708 cr4 = vmcs_readl(GUEST_CR4); 5709 secondary_exec_control = 0; 5710 if (cpu_has_secondary_exec_ctrls()) 5711 secondary_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); 5712 5713 pr_err("VMCS %p, last attempted VM-entry on CPU %d\n", 5714 vmx->loaded_vmcs->vmcs, vcpu->arch.last_vmentry_cpu); 5715 pr_err("*** Guest State ***\n"); 5716 pr_err("CR0: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n", 5717 vmcs_readl(GUEST_CR0), vmcs_readl(CR0_READ_SHADOW), 5718 vmcs_readl(CR0_GUEST_HOST_MASK)); 5719 pr_err("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n", 5720 cr4, vmcs_readl(CR4_READ_SHADOW), vmcs_readl(CR4_GUEST_HOST_MASK)); 5721 pr_err("CR3 = 0x%016lx\n", vmcs_readl(GUEST_CR3)); 5722 if (cpu_has_vmx_ept()) { 5723 pr_err("PDPTR0 = 0x%016llx PDPTR1 = 0x%016llx\n", 5724 vmcs_read64(GUEST_PDPTR0), vmcs_read64(GUEST_PDPTR1)); 5725 pr_err("PDPTR2 = 0x%016llx PDPTR3 = 0x%016llx\n", 5726 vmcs_read64(GUEST_PDPTR2), vmcs_read64(GUEST_PDPTR3)); 5727 } 5728 pr_err("RSP = 0x%016lx RIP = 0x%016lx\n", 5729 vmcs_readl(GUEST_RSP), vmcs_readl(GUEST_RIP)); 5730 pr_err("RFLAGS=0x%08lx DR7 = 0x%016lx\n", 5731 vmcs_readl(GUEST_RFLAGS), vmcs_readl(GUEST_DR7)); 5732 pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n", 5733 vmcs_readl(GUEST_SYSENTER_ESP), 5734 vmcs_read32(GUEST_SYSENTER_CS), vmcs_readl(GUEST_SYSENTER_EIP)); 5735 vmx_dump_sel("CS: ", GUEST_CS_SELECTOR); 5736 vmx_dump_sel("DS: ", GUEST_DS_SELECTOR); 5737 vmx_dump_sel("SS: ", GUEST_SS_SELECTOR); 5738 vmx_dump_sel("ES: ", GUEST_ES_SELECTOR); 5739 vmx_dump_sel("FS: ", GUEST_FS_SELECTOR); 5740 vmx_dump_sel("GS: ", GUEST_GS_SELECTOR); 5741 vmx_dump_dtsel("GDTR:", GUEST_GDTR_LIMIT); 5742 vmx_dump_sel("LDTR:", GUEST_LDTR_SELECTOR); 5743 vmx_dump_dtsel("IDTR:", GUEST_IDTR_LIMIT); 5744 vmx_dump_sel("TR: ", GUEST_TR_SELECTOR); 5745 efer_slot = vmx_find_loadstore_msr_slot(&vmx->msr_autoload.guest, MSR_EFER); 5746 if (vmentry_ctl & VM_ENTRY_LOAD_IA32_EFER) 5747 pr_err("EFER= 0x%016llx\n", vmcs_read64(GUEST_IA32_EFER)); 5748 else if (efer_slot >= 0) 5749 pr_err("EFER= 0x%016llx (autoload)\n", 5750 vmx->msr_autoload.guest.val[efer_slot].value); 5751 else if (vmentry_ctl & VM_ENTRY_IA32E_MODE) 5752 pr_err("EFER= 0x%016llx (effective)\n", 5753 vcpu->arch.efer | (EFER_LMA | EFER_LME)); 5754 else 5755 pr_err("EFER= 0x%016llx (effective)\n", 5756 vcpu->arch.efer & ~(EFER_LMA | EFER_LME)); 5757 if (vmentry_ctl & VM_ENTRY_LOAD_IA32_PAT) 5758 pr_err("PAT = 0x%016llx\n", vmcs_read64(GUEST_IA32_PAT)); 5759 pr_err("DebugCtl = 0x%016llx DebugExceptions = 0x%016lx\n", 5760 vmcs_read64(GUEST_IA32_DEBUGCTL), 5761 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS)); 5762 if (cpu_has_load_perf_global_ctrl() && 5763 vmentry_ctl & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) 5764 pr_err("PerfGlobCtl = 0x%016llx\n", 5765 vmcs_read64(GUEST_IA32_PERF_GLOBAL_CTRL)); 5766 if (vmentry_ctl & VM_ENTRY_LOAD_BNDCFGS) 5767 pr_err("BndCfgS = 0x%016llx\n", vmcs_read64(GUEST_BNDCFGS)); 5768 pr_err("Interruptibility = %08x ActivityState = %08x\n", 5769 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO), 5770 vmcs_read32(GUEST_ACTIVITY_STATE)); 5771 if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) 5772 pr_err("InterruptStatus = %04x\n", 5773 vmcs_read16(GUEST_INTR_STATUS)); 5774 if (vmcs_read32(VM_ENTRY_MSR_LOAD_COUNT) > 0) 5775 vmx_dump_msrs("guest autoload", &vmx->msr_autoload.guest); 5776 if (vmcs_read32(VM_EXIT_MSR_STORE_COUNT) > 0) 5777 vmx_dump_msrs("guest autostore", &vmx->msr_autostore.guest); 5778 5779 pr_err("*** Host State ***\n"); 5780 pr_err("RIP = 0x%016lx RSP = 0x%016lx\n", 5781 vmcs_readl(HOST_RIP), vmcs_readl(HOST_RSP)); 5782 pr_err("CS=%04x SS=%04x DS=%04x ES=%04x FS=%04x GS=%04x TR=%04x\n", 5783 vmcs_read16(HOST_CS_SELECTOR), vmcs_read16(HOST_SS_SELECTOR), 5784 vmcs_read16(HOST_DS_SELECTOR), vmcs_read16(HOST_ES_SELECTOR), 5785 vmcs_read16(HOST_FS_SELECTOR), vmcs_read16(HOST_GS_SELECTOR), 5786 vmcs_read16(HOST_TR_SELECTOR)); 5787 pr_err("FSBase=%016lx GSBase=%016lx TRBase=%016lx\n", 5788 vmcs_readl(HOST_FS_BASE), vmcs_readl(HOST_GS_BASE), 5789 vmcs_readl(HOST_TR_BASE)); 5790 pr_err("GDTBase=%016lx IDTBase=%016lx\n", 5791 vmcs_readl(HOST_GDTR_BASE), vmcs_readl(HOST_IDTR_BASE)); 5792 pr_err("CR0=%016lx CR3=%016lx CR4=%016lx\n", 5793 vmcs_readl(HOST_CR0), vmcs_readl(HOST_CR3), 5794 vmcs_readl(HOST_CR4)); 5795 pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n", 5796 vmcs_readl(HOST_IA32_SYSENTER_ESP), 5797 vmcs_read32(HOST_IA32_SYSENTER_CS), 5798 vmcs_readl(HOST_IA32_SYSENTER_EIP)); 5799 if (vmexit_ctl & VM_EXIT_LOAD_IA32_EFER) 5800 pr_err("EFER= 0x%016llx\n", vmcs_read64(HOST_IA32_EFER)); 5801 if (vmexit_ctl & VM_EXIT_LOAD_IA32_PAT) 5802 pr_err("PAT = 0x%016llx\n", vmcs_read64(HOST_IA32_PAT)); 5803 if (cpu_has_load_perf_global_ctrl() && 5804 vmexit_ctl & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) 5805 pr_err("PerfGlobCtl = 0x%016llx\n", 5806 vmcs_read64(HOST_IA32_PERF_GLOBAL_CTRL)); 5807 if (vmcs_read32(VM_EXIT_MSR_LOAD_COUNT) > 0) 5808 vmx_dump_msrs("host autoload", &vmx->msr_autoload.host); 5809 5810 pr_err("*** Control State ***\n"); 5811 pr_err("PinBased=%08x CPUBased=%08x SecondaryExec=%08x\n", 5812 pin_based_exec_ctrl, cpu_based_exec_ctrl, secondary_exec_control); 5813 pr_err("EntryControls=%08x ExitControls=%08x\n", vmentry_ctl, vmexit_ctl); 5814 pr_err("ExceptionBitmap=%08x PFECmask=%08x PFECmatch=%08x\n", 5815 vmcs_read32(EXCEPTION_BITMAP), 5816 vmcs_read32(PAGE_FAULT_ERROR_CODE_MASK), 5817 vmcs_read32(PAGE_FAULT_ERROR_CODE_MATCH)); 5818 pr_err("VMEntry: intr_info=%08x errcode=%08x ilen=%08x\n", 5819 vmcs_read32(VM_ENTRY_INTR_INFO_FIELD), 5820 vmcs_read32(VM_ENTRY_EXCEPTION_ERROR_CODE), 5821 vmcs_read32(VM_ENTRY_INSTRUCTION_LEN)); 5822 pr_err("VMExit: intr_info=%08x errcode=%08x ilen=%08x\n", 5823 vmcs_read32(VM_EXIT_INTR_INFO), 5824 vmcs_read32(VM_EXIT_INTR_ERROR_CODE), 5825 vmcs_read32(VM_EXIT_INSTRUCTION_LEN)); 5826 pr_err(" reason=%08x qualification=%016lx\n", 5827 vmcs_read32(VM_EXIT_REASON), vmcs_readl(EXIT_QUALIFICATION)); 5828 pr_err("IDTVectoring: info=%08x errcode=%08x\n", 5829 vmcs_read32(IDT_VECTORING_INFO_FIELD), 5830 vmcs_read32(IDT_VECTORING_ERROR_CODE)); 5831 pr_err("TSC Offset = 0x%016llx\n", vmcs_read64(TSC_OFFSET)); 5832 if (secondary_exec_control & SECONDARY_EXEC_TSC_SCALING) 5833 pr_err("TSC Multiplier = 0x%016llx\n", 5834 vmcs_read64(TSC_MULTIPLIER)); 5835 if (cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW) { 5836 if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) { 5837 u16 status = vmcs_read16(GUEST_INTR_STATUS); 5838 pr_err("SVI|RVI = %02x|%02x ", status >> 8, status & 0xff); 5839 } 5840 pr_cont("TPR Threshold = 0x%02x\n", vmcs_read32(TPR_THRESHOLD)); 5841 if (secondary_exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) 5842 pr_err("APIC-access addr = 0x%016llx ", vmcs_read64(APIC_ACCESS_ADDR)); 5843 pr_cont("virt-APIC addr = 0x%016llx\n", vmcs_read64(VIRTUAL_APIC_PAGE_ADDR)); 5844 } 5845 if (pin_based_exec_ctrl & PIN_BASED_POSTED_INTR) 5846 pr_err("PostedIntrVec = 0x%02x\n", vmcs_read16(POSTED_INTR_NV)); 5847 if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT)) 5848 pr_err("EPT pointer = 0x%016llx\n", vmcs_read64(EPT_POINTER)); 5849 if (secondary_exec_control & SECONDARY_EXEC_PAUSE_LOOP_EXITING) 5850 pr_err("PLE Gap=%08x Window=%08x\n", 5851 vmcs_read32(PLE_GAP), vmcs_read32(PLE_WINDOW)); 5852 if (secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID) 5853 pr_err("Virtual processor ID = 0x%04x\n", 5854 vmcs_read16(VIRTUAL_PROCESSOR_ID)); 5855 } 5856 5857 /* 5858 * The guest has exited. See if we can fix it or if we need userspace 5859 * assistance. 5860 */ 5861 static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) 5862 { 5863 struct vcpu_vmx *vmx = to_vmx(vcpu); 5864 union vmx_exit_reason exit_reason = vmx->exit_reason; 5865 u32 vectoring_info = vmx->idt_vectoring_info; 5866 u16 exit_handler_index; 5867 5868 /* 5869 * Flush logged GPAs PML buffer, this will make dirty_bitmap more 5870 * updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before 5871 * querying dirty_bitmap, we only need to kick all vcpus out of guest 5872 * mode as if vcpus is in root mode, the PML buffer must has been 5873 * flushed already. Note, PML is never enabled in hardware while 5874 * running L2. 5875 */ 5876 if (enable_pml && !is_guest_mode(vcpu)) 5877 vmx_flush_pml_buffer(vcpu); 5878 5879 /* 5880 * We should never reach this point with a pending nested VM-Enter, and 5881 * more specifically emulation of L2 due to invalid guest state (see 5882 * below) should never happen as that means we incorrectly allowed a 5883 * nested VM-Enter with an invalid vmcs12. 5884 */ 5885 if (KVM_BUG_ON(vmx->nested.nested_run_pending, vcpu->kvm)) 5886 return -EIO; 5887 5888 /* If guest state is invalid, start emulating */ 5889 if (vmx->emulation_required) 5890 return handle_invalid_guest_state(vcpu); 5891 5892 if (is_guest_mode(vcpu)) { 5893 /* 5894 * PML is never enabled when running L2, bail immediately if a 5895 * PML full exit occurs as something is horribly wrong. 5896 */ 5897 if (exit_reason.basic == EXIT_REASON_PML_FULL) 5898 goto unexpected_vmexit; 5899 5900 /* 5901 * The host physical addresses of some pages of guest memory 5902 * are loaded into the vmcs02 (e.g. vmcs12's Virtual APIC 5903 * Page). The CPU may write to these pages via their host 5904 * physical address while L2 is running, bypassing any 5905 * address-translation-based dirty tracking (e.g. EPT write 5906 * protection). 5907 * 5908 * Mark them dirty on every exit from L2 to prevent them from 5909 * getting out of sync with dirty tracking. 5910 */ 5911 nested_mark_vmcs12_pages_dirty(vcpu); 5912 5913 if (nested_vmx_reflect_vmexit(vcpu)) 5914 return 1; 5915 } 5916 5917 if (exit_reason.failed_vmentry) { 5918 dump_vmcs(vcpu); 5919 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; 5920 vcpu->run->fail_entry.hardware_entry_failure_reason 5921 = exit_reason.full; 5922 vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu; 5923 return 0; 5924 } 5925 5926 if (unlikely(vmx->fail)) { 5927 dump_vmcs(vcpu); 5928 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; 5929 vcpu->run->fail_entry.hardware_entry_failure_reason 5930 = vmcs_read32(VM_INSTRUCTION_ERROR); 5931 vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu; 5932 return 0; 5933 } 5934 5935 /* 5936 * Note: 5937 * Do not try to fix EXIT_REASON_EPT_MISCONFIG if it caused by 5938 * delivery event since it indicates guest is accessing MMIO. 5939 * The vm-exit can be triggered again after return to guest that 5940 * will cause infinite loop. 5941 */ 5942 if ((vectoring_info & VECTORING_INFO_VALID_MASK) && 5943 (exit_reason.basic != EXIT_REASON_EXCEPTION_NMI && 5944 exit_reason.basic != EXIT_REASON_EPT_VIOLATION && 5945 exit_reason.basic != EXIT_REASON_PML_FULL && 5946 exit_reason.basic != EXIT_REASON_APIC_ACCESS && 5947 exit_reason.basic != EXIT_REASON_TASK_SWITCH)) { 5948 int ndata = 3; 5949 5950 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 5951 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV; 5952 vcpu->run->internal.data[0] = vectoring_info; 5953 vcpu->run->internal.data[1] = exit_reason.full; 5954 vcpu->run->internal.data[2] = vcpu->arch.exit_qualification; 5955 if (exit_reason.basic == EXIT_REASON_EPT_MISCONFIG) { 5956 vcpu->run->internal.data[ndata++] = 5957 vmcs_read64(GUEST_PHYSICAL_ADDRESS); 5958 } 5959 vcpu->run->internal.data[ndata++] = vcpu->arch.last_vmentry_cpu; 5960 vcpu->run->internal.ndata = ndata; 5961 return 0; 5962 } 5963 5964 if (unlikely(!enable_vnmi && 5965 vmx->loaded_vmcs->soft_vnmi_blocked)) { 5966 if (!vmx_interrupt_blocked(vcpu)) { 5967 vmx->loaded_vmcs->soft_vnmi_blocked = 0; 5968 } else if (vmx->loaded_vmcs->vnmi_blocked_time > 1000000000LL && 5969 vcpu->arch.nmi_pending) { 5970 /* 5971 * This CPU don't support us in finding the end of an 5972 * NMI-blocked window if the guest runs with IRQs 5973 * disabled. So we pull the trigger after 1 s of 5974 * futile waiting, but inform the user about this. 5975 */ 5976 printk(KERN_WARNING "%s: Breaking out of NMI-blocked " 5977 "state on VCPU %d after 1 s timeout\n", 5978 __func__, vcpu->vcpu_id); 5979 vmx->loaded_vmcs->soft_vnmi_blocked = 0; 5980 } 5981 } 5982 5983 if (exit_fastpath != EXIT_FASTPATH_NONE) 5984 return 1; 5985 5986 if (exit_reason.basic >= kvm_vmx_max_exit_handlers) 5987 goto unexpected_vmexit; 5988 #ifdef CONFIG_RETPOLINE 5989 if (exit_reason.basic == EXIT_REASON_MSR_WRITE) 5990 return kvm_emulate_wrmsr(vcpu); 5991 else if (exit_reason.basic == EXIT_REASON_PREEMPTION_TIMER) 5992 return handle_preemption_timer(vcpu); 5993 else if (exit_reason.basic == EXIT_REASON_INTERRUPT_WINDOW) 5994 return handle_interrupt_window(vcpu); 5995 else if (exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT) 5996 return handle_external_interrupt(vcpu); 5997 else if (exit_reason.basic == EXIT_REASON_HLT) 5998 return kvm_emulate_halt(vcpu); 5999 else if (exit_reason.basic == EXIT_REASON_EPT_MISCONFIG) 6000 return handle_ept_misconfig(vcpu); 6001 #endif 6002 6003 exit_handler_index = array_index_nospec((u16)exit_reason.basic, 6004 kvm_vmx_max_exit_handlers); 6005 if (!kvm_vmx_exit_handlers[exit_handler_index]) 6006 goto unexpected_vmexit; 6007 6008 return kvm_vmx_exit_handlers[exit_handler_index](vcpu); 6009 6010 unexpected_vmexit: 6011 vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n", 6012 exit_reason.full); 6013 dump_vmcs(vcpu); 6014 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 6015 vcpu->run->internal.suberror = 6016 KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON; 6017 vcpu->run->internal.ndata = 2; 6018 vcpu->run->internal.data[0] = exit_reason.full; 6019 vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu; 6020 return 0; 6021 } 6022 6023 static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) 6024 { 6025 int ret = __vmx_handle_exit(vcpu, exit_fastpath); 6026 6027 /* 6028 * Exit to user space when bus lock detected to inform that there is 6029 * a bus lock in guest. 6030 */ 6031 if (to_vmx(vcpu)->exit_reason.bus_lock_detected) { 6032 if (ret > 0) 6033 vcpu->run->exit_reason = KVM_EXIT_X86_BUS_LOCK; 6034 6035 vcpu->run->flags |= KVM_RUN_X86_BUS_LOCK; 6036 return 0; 6037 } 6038 return ret; 6039 } 6040 6041 /* 6042 * Software based L1D cache flush which is used when microcode providing 6043 * the cache control MSR is not loaded. 6044 * 6045 * The L1D cache is 32 KiB on Nehalem and later microarchitectures, but to 6046 * flush it is required to read in 64 KiB because the replacement algorithm 6047 * is not exactly LRU. This could be sized at runtime via topology 6048 * information but as all relevant affected CPUs have 32KiB L1D cache size 6049 * there is no point in doing so. 6050 */ 6051 static noinstr void vmx_l1d_flush(struct kvm_vcpu *vcpu) 6052 { 6053 int size = PAGE_SIZE << L1D_CACHE_ORDER; 6054 6055 /* 6056 * This code is only executed when the the flush mode is 'cond' or 6057 * 'always' 6058 */ 6059 if (static_branch_likely(&vmx_l1d_flush_cond)) { 6060 bool flush_l1d; 6061 6062 /* 6063 * Clear the per-vcpu flush bit, it gets set again 6064 * either from vcpu_run() or from one of the unsafe 6065 * VMEXIT handlers. 6066 */ 6067 flush_l1d = vcpu->arch.l1tf_flush_l1d; 6068 vcpu->arch.l1tf_flush_l1d = false; 6069 6070 /* 6071 * Clear the per-cpu flush bit, it gets set again from 6072 * the interrupt handlers. 6073 */ 6074 flush_l1d |= kvm_get_cpu_l1tf_flush_l1d(); 6075 kvm_clear_cpu_l1tf_flush_l1d(); 6076 6077 if (!flush_l1d) 6078 return; 6079 } 6080 6081 vcpu->stat.l1d_flush++; 6082 6083 if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) { 6084 native_wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH); 6085 return; 6086 } 6087 6088 asm volatile( 6089 /* First ensure the pages are in the TLB */ 6090 "xorl %%eax, %%eax\n" 6091 ".Lpopulate_tlb:\n\t" 6092 "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t" 6093 "addl $4096, %%eax\n\t" 6094 "cmpl %%eax, %[size]\n\t" 6095 "jne .Lpopulate_tlb\n\t" 6096 "xorl %%eax, %%eax\n\t" 6097 "cpuid\n\t" 6098 /* Now fill the cache */ 6099 "xorl %%eax, %%eax\n" 6100 ".Lfill_cache:\n" 6101 "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t" 6102 "addl $64, %%eax\n\t" 6103 "cmpl %%eax, %[size]\n\t" 6104 "jne .Lfill_cache\n\t" 6105 "lfence\n" 6106 :: [flush_pages] "r" (vmx_l1d_flush_pages), 6107 [size] "r" (size) 6108 : "eax", "ebx", "ecx", "edx"); 6109 } 6110 6111 static void vmx_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) 6112 { 6113 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 6114 int tpr_threshold; 6115 6116 if (is_guest_mode(vcpu) && 6117 nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) 6118 return; 6119 6120 tpr_threshold = (irr == -1 || tpr < irr) ? 0 : irr; 6121 if (is_guest_mode(vcpu)) 6122 to_vmx(vcpu)->nested.l1_tpr_threshold = tpr_threshold; 6123 else 6124 vmcs_write32(TPR_THRESHOLD, tpr_threshold); 6125 } 6126 6127 void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu) 6128 { 6129 struct vcpu_vmx *vmx = to_vmx(vcpu); 6130 u32 sec_exec_control; 6131 6132 if (!lapic_in_kernel(vcpu)) 6133 return; 6134 6135 if (!flexpriority_enabled && 6136 !cpu_has_vmx_virtualize_x2apic_mode()) 6137 return; 6138 6139 /* Postpone execution until vmcs01 is the current VMCS. */ 6140 if (is_guest_mode(vcpu)) { 6141 vmx->nested.change_vmcs01_virtual_apic_mode = true; 6142 return; 6143 } 6144 6145 sec_exec_control = secondary_exec_controls_get(vmx); 6146 sec_exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 6147 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE); 6148 6149 switch (kvm_get_apic_mode(vcpu)) { 6150 case LAPIC_MODE_INVALID: 6151 WARN_ONCE(true, "Invalid local APIC state"); 6152 break; 6153 case LAPIC_MODE_DISABLED: 6154 break; 6155 case LAPIC_MODE_XAPIC: 6156 if (flexpriority_enabled) { 6157 sec_exec_control |= 6158 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; 6159 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); 6160 6161 /* 6162 * Flush the TLB, reloading the APIC access page will 6163 * only do so if its physical address has changed, but 6164 * the guest may have inserted a non-APIC mapping into 6165 * the TLB while the APIC access page was disabled. 6166 */ 6167 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); 6168 } 6169 break; 6170 case LAPIC_MODE_X2APIC: 6171 if (cpu_has_vmx_virtualize_x2apic_mode()) 6172 sec_exec_control |= 6173 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; 6174 break; 6175 } 6176 secondary_exec_controls_set(vmx, sec_exec_control); 6177 6178 vmx_update_msr_bitmap_x2apic(vcpu); 6179 } 6180 6181 static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu) 6182 { 6183 struct page *page; 6184 6185 /* Defer reload until vmcs01 is the current VMCS. */ 6186 if (is_guest_mode(vcpu)) { 6187 to_vmx(vcpu)->nested.reload_vmcs01_apic_access_page = true; 6188 return; 6189 } 6190 6191 if (!(secondary_exec_controls_get(to_vmx(vcpu)) & 6192 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) 6193 return; 6194 6195 page = gfn_to_page(vcpu->kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT); 6196 if (is_error_page(page)) 6197 return; 6198 6199 vmcs_write64(APIC_ACCESS_ADDR, page_to_phys(page)); 6200 vmx_flush_tlb_current(vcpu); 6201 6202 /* 6203 * Do not pin apic access page in memory, the MMU notifier 6204 * will call us again if it is migrated or swapped out. 6205 */ 6206 put_page(page); 6207 } 6208 6209 static void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr) 6210 { 6211 u16 status; 6212 u8 old; 6213 6214 if (max_isr == -1) 6215 max_isr = 0; 6216 6217 status = vmcs_read16(GUEST_INTR_STATUS); 6218 old = status >> 8; 6219 if (max_isr != old) { 6220 status &= 0xff; 6221 status |= max_isr << 8; 6222 vmcs_write16(GUEST_INTR_STATUS, status); 6223 } 6224 } 6225 6226 static void vmx_set_rvi(int vector) 6227 { 6228 u16 status; 6229 u8 old; 6230 6231 if (vector == -1) 6232 vector = 0; 6233 6234 status = vmcs_read16(GUEST_INTR_STATUS); 6235 old = (u8)status & 0xff; 6236 if ((u8)vector != old) { 6237 status &= ~0xff; 6238 status |= (u8)vector; 6239 vmcs_write16(GUEST_INTR_STATUS, status); 6240 } 6241 } 6242 6243 static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr) 6244 { 6245 /* 6246 * When running L2, updating RVI is only relevant when 6247 * vmcs12 virtual-interrupt-delivery enabled. 6248 * However, it can be enabled only when L1 also 6249 * intercepts external-interrupts and in that case 6250 * we should not update vmcs02 RVI but instead intercept 6251 * interrupt. Therefore, do nothing when running L2. 6252 */ 6253 if (!is_guest_mode(vcpu)) 6254 vmx_set_rvi(max_irr); 6255 } 6256 6257 static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu) 6258 { 6259 struct vcpu_vmx *vmx = to_vmx(vcpu); 6260 int max_irr; 6261 bool got_posted_interrupt; 6262 6263 if (KVM_BUG_ON(!enable_apicv, vcpu->kvm)) 6264 return -EIO; 6265 6266 if (pi_test_on(&vmx->pi_desc)) { 6267 pi_clear_on(&vmx->pi_desc); 6268 /* 6269 * IOMMU can write to PID.ON, so the barrier matters even on UP. 6270 * But on x86 this is just a compiler barrier anyway. 6271 */ 6272 smp_mb__after_atomic(); 6273 got_posted_interrupt = 6274 kvm_apic_update_irr(vcpu, vmx->pi_desc.pir, &max_irr); 6275 } else { 6276 max_irr = kvm_lapic_find_highest_irr(vcpu); 6277 got_posted_interrupt = false; 6278 } 6279 6280 /* 6281 * Newly recognized interrupts are injected via either virtual interrupt 6282 * delivery (RVI) or KVM_REQ_EVENT. Virtual interrupt delivery is 6283 * disabled in two cases: 6284 * 6285 * 1) If L2 is running and the vCPU has a new pending interrupt. If L1 6286 * wants to exit on interrupts, KVM_REQ_EVENT is needed to synthesize a 6287 * VM-Exit to L1. If L1 doesn't want to exit, the interrupt is injected 6288 * into L2, but KVM doesn't use virtual interrupt delivery to inject 6289 * interrupts into L2, and so KVM_REQ_EVENT is again needed. 6290 * 6291 * 2) If APICv is disabled for this vCPU, assigned devices may still 6292 * attempt to post interrupts. The posted interrupt vector will cause 6293 * a VM-Exit and the subsequent entry will call sync_pir_to_irr. 6294 */ 6295 if (!is_guest_mode(vcpu) && kvm_vcpu_apicv_active(vcpu)) 6296 vmx_set_rvi(max_irr); 6297 else if (got_posted_interrupt) 6298 kvm_make_request(KVM_REQ_EVENT, vcpu); 6299 6300 return max_irr; 6301 } 6302 6303 static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap) 6304 { 6305 if (!kvm_vcpu_apicv_active(vcpu)) 6306 return; 6307 6308 vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]); 6309 vmcs_write64(EOI_EXIT_BITMAP1, eoi_exit_bitmap[1]); 6310 vmcs_write64(EOI_EXIT_BITMAP2, eoi_exit_bitmap[2]); 6311 vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]); 6312 } 6313 6314 static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu) 6315 { 6316 struct vcpu_vmx *vmx = to_vmx(vcpu); 6317 6318 pi_clear_on(&vmx->pi_desc); 6319 memset(vmx->pi_desc.pir, 0, sizeof(vmx->pi_desc.pir)); 6320 } 6321 6322 void vmx_do_interrupt_nmi_irqoff(unsigned long entry); 6323 6324 static void handle_interrupt_nmi_irqoff(struct kvm_vcpu *vcpu, 6325 unsigned long entry) 6326 { 6327 kvm_before_interrupt(vcpu); 6328 vmx_do_interrupt_nmi_irqoff(entry); 6329 kvm_after_interrupt(vcpu); 6330 } 6331 6332 static void handle_exception_nmi_irqoff(struct vcpu_vmx *vmx) 6333 { 6334 const unsigned long nmi_entry = (unsigned long)asm_exc_nmi_noist; 6335 u32 intr_info = vmx_get_intr_info(&vmx->vcpu); 6336 6337 /* if exit due to PF check for async PF */ 6338 if (is_page_fault(intr_info)) 6339 vmx->vcpu.arch.apf.host_apf_flags = kvm_read_and_reset_apf_flags(); 6340 /* Handle machine checks before interrupts are enabled */ 6341 else if (is_machine_check(intr_info)) 6342 kvm_machine_check(); 6343 /* We need to handle NMIs before interrupts are enabled */ 6344 else if (is_nmi(intr_info)) 6345 handle_interrupt_nmi_irqoff(&vmx->vcpu, nmi_entry); 6346 } 6347 6348 static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu) 6349 { 6350 u32 intr_info = vmx_get_intr_info(vcpu); 6351 unsigned int vector = intr_info & INTR_INFO_VECTOR_MASK; 6352 gate_desc *desc = (gate_desc *)host_idt_base + vector; 6353 6354 if (KVM_BUG(!is_external_intr(intr_info), vcpu->kvm, 6355 "KVM: unexpected VM-Exit interrupt info: 0x%x", intr_info)) 6356 return; 6357 6358 handle_interrupt_nmi_irqoff(vcpu, gate_offset(desc)); 6359 } 6360 6361 static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu) 6362 { 6363 struct vcpu_vmx *vmx = to_vmx(vcpu); 6364 6365 if (vmx->emulation_required) 6366 return; 6367 6368 if (vmx->exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT) 6369 handle_external_interrupt_irqoff(vcpu); 6370 else if (vmx->exit_reason.basic == EXIT_REASON_EXCEPTION_NMI) 6371 handle_exception_nmi_irqoff(vmx); 6372 } 6373 6374 /* 6375 * The kvm parameter can be NULL (module initialization, or invocation before 6376 * VM creation). Be sure to check the kvm parameter before using it. 6377 */ 6378 static bool vmx_has_emulated_msr(struct kvm *kvm, u32 index) 6379 { 6380 switch (index) { 6381 case MSR_IA32_SMBASE: 6382 /* 6383 * We cannot do SMM unless we can run the guest in big 6384 * real mode. 6385 */ 6386 return enable_unrestricted_guest || emulate_invalid_guest_state; 6387 case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: 6388 return nested; 6389 case MSR_AMD64_VIRT_SPEC_CTRL: 6390 case MSR_AMD64_TSC_RATIO: 6391 /* This is AMD only. */ 6392 return false; 6393 default: 6394 return true; 6395 } 6396 } 6397 6398 static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx) 6399 { 6400 u32 exit_intr_info; 6401 bool unblock_nmi; 6402 u8 vector; 6403 bool idtv_info_valid; 6404 6405 idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK; 6406 6407 if (enable_vnmi) { 6408 if (vmx->loaded_vmcs->nmi_known_unmasked) 6409 return; 6410 6411 exit_intr_info = vmx_get_intr_info(&vmx->vcpu); 6412 unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0; 6413 vector = exit_intr_info & INTR_INFO_VECTOR_MASK; 6414 /* 6415 * SDM 3: 27.7.1.2 (September 2008) 6416 * Re-set bit "block by NMI" before VM entry if vmexit caused by 6417 * a guest IRET fault. 6418 * SDM 3: 23.2.2 (September 2008) 6419 * Bit 12 is undefined in any of the following cases: 6420 * If the VM exit sets the valid bit in the IDT-vectoring 6421 * information field. 6422 * If the VM exit is due to a double fault. 6423 */ 6424 if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi && 6425 vector != DF_VECTOR && !idtv_info_valid) 6426 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, 6427 GUEST_INTR_STATE_NMI); 6428 else 6429 vmx->loaded_vmcs->nmi_known_unmasked = 6430 !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) 6431 & GUEST_INTR_STATE_NMI); 6432 } else if (unlikely(vmx->loaded_vmcs->soft_vnmi_blocked)) 6433 vmx->loaded_vmcs->vnmi_blocked_time += 6434 ktime_to_ns(ktime_sub(ktime_get(), 6435 vmx->loaded_vmcs->entry_time)); 6436 } 6437 6438 static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu, 6439 u32 idt_vectoring_info, 6440 int instr_len_field, 6441 int error_code_field) 6442 { 6443 u8 vector; 6444 int type; 6445 bool idtv_info_valid; 6446 6447 idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK; 6448 6449 vcpu->arch.nmi_injected = false; 6450 kvm_clear_exception_queue(vcpu); 6451 kvm_clear_interrupt_queue(vcpu); 6452 6453 if (!idtv_info_valid) 6454 return; 6455 6456 kvm_make_request(KVM_REQ_EVENT, vcpu); 6457 6458 vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK; 6459 type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK; 6460 6461 switch (type) { 6462 case INTR_TYPE_NMI_INTR: 6463 vcpu->arch.nmi_injected = true; 6464 /* 6465 * SDM 3: 27.7.1.2 (September 2008) 6466 * Clear bit "block by NMI" before VM entry if a NMI 6467 * delivery faulted. 6468 */ 6469 vmx_set_nmi_mask(vcpu, false); 6470 break; 6471 case INTR_TYPE_SOFT_EXCEPTION: 6472 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); 6473 fallthrough; 6474 case INTR_TYPE_HARD_EXCEPTION: 6475 if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) { 6476 u32 err = vmcs_read32(error_code_field); 6477 kvm_requeue_exception_e(vcpu, vector, err); 6478 } else 6479 kvm_requeue_exception(vcpu, vector); 6480 break; 6481 case INTR_TYPE_SOFT_INTR: 6482 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); 6483 fallthrough; 6484 case INTR_TYPE_EXT_INTR: 6485 kvm_queue_interrupt(vcpu, vector, type == INTR_TYPE_SOFT_INTR); 6486 break; 6487 default: 6488 break; 6489 } 6490 } 6491 6492 static void vmx_complete_interrupts(struct vcpu_vmx *vmx) 6493 { 6494 __vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info, 6495 VM_EXIT_INSTRUCTION_LEN, 6496 IDT_VECTORING_ERROR_CODE); 6497 } 6498 6499 static void vmx_cancel_injection(struct kvm_vcpu *vcpu) 6500 { 6501 __vmx_complete_interrupts(vcpu, 6502 vmcs_read32(VM_ENTRY_INTR_INFO_FIELD), 6503 VM_ENTRY_INSTRUCTION_LEN, 6504 VM_ENTRY_EXCEPTION_ERROR_CODE); 6505 6506 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); 6507 } 6508 6509 static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx) 6510 { 6511 int i, nr_msrs; 6512 struct perf_guest_switch_msr *msrs; 6513 6514 /* Note, nr_msrs may be garbage if perf_guest_get_msrs() returns NULL. */ 6515 msrs = perf_guest_get_msrs(&nr_msrs); 6516 if (!msrs) 6517 return; 6518 6519 for (i = 0; i < nr_msrs; i++) 6520 if (msrs[i].host == msrs[i].guest) 6521 clear_atomic_switch_msr(vmx, msrs[i].msr); 6522 else 6523 add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest, 6524 msrs[i].host, false); 6525 } 6526 6527 static void vmx_update_hv_timer(struct kvm_vcpu *vcpu) 6528 { 6529 struct vcpu_vmx *vmx = to_vmx(vcpu); 6530 u64 tscl; 6531 u32 delta_tsc; 6532 6533 if (vmx->req_immediate_exit) { 6534 vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, 0); 6535 vmx->loaded_vmcs->hv_timer_soft_disabled = false; 6536 } else if (vmx->hv_deadline_tsc != -1) { 6537 tscl = rdtsc(); 6538 if (vmx->hv_deadline_tsc > tscl) 6539 /* set_hv_timer ensures the delta fits in 32-bits */ 6540 delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >> 6541 cpu_preemption_timer_multi); 6542 else 6543 delta_tsc = 0; 6544 6545 vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, delta_tsc); 6546 vmx->loaded_vmcs->hv_timer_soft_disabled = false; 6547 } else if (!vmx->loaded_vmcs->hv_timer_soft_disabled) { 6548 vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, -1); 6549 vmx->loaded_vmcs->hv_timer_soft_disabled = true; 6550 } 6551 } 6552 6553 void noinstr vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp) 6554 { 6555 if (unlikely(host_rsp != vmx->loaded_vmcs->host_state.rsp)) { 6556 vmx->loaded_vmcs->host_state.rsp = host_rsp; 6557 vmcs_writel(HOST_RSP, host_rsp); 6558 } 6559 } 6560 6561 static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu) 6562 { 6563 switch (to_vmx(vcpu)->exit_reason.basic) { 6564 case EXIT_REASON_MSR_WRITE: 6565 return handle_fastpath_set_msr_irqoff(vcpu); 6566 case EXIT_REASON_PREEMPTION_TIMER: 6567 return handle_fastpath_preemption_timer(vcpu); 6568 default: 6569 return EXIT_FASTPATH_NONE; 6570 } 6571 } 6572 6573 static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu, 6574 struct vcpu_vmx *vmx) 6575 { 6576 kvm_guest_enter_irqoff(); 6577 6578 /* L1D Flush includes CPU buffer clear to mitigate MDS */ 6579 if (static_branch_unlikely(&vmx_l1d_should_flush)) 6580 vmx_l1d_flush(vcpu); 6581 else if (static_branch_unlikely(&mds_user_clear)) 6582 mds_clear_cpu_buffers(); 6583 6584 if (vcpu->arch.cr2 != native_read_cr2()) 6585 native_write_cr2(vcpu->arch.cr2); 6586 6587 vmx->fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs, 6588 vmx->loaded_vmcs->launched); 6589 6590 vcpu->arch.cr2 = native_read_cr2(); 6591 6592 kvm_guest_exit_irqoff(); 6593 } 6594 6595 static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) 6596 { 6597 struct vcpu_vmx *vmx = to_vmx(vcpu); 6598 unsigned long cr3, cr4; 6599 6600 /* Record the guest's net vcpu time for enforced NMI injections. */ 6601 if (unlikely(!enable_vnmi && 6602 vmx->loaded_vmcs->soft_vnmi_blocked)) 6603 vmx->loaded_vmcs->entry_time = ktime_get(); 6604 6605 /* 6606 * Don't enter VMX if guest state is invalid, let the exit handler 6607 * start emulation until we arrive back to a valid state. Synthesize a 6608 * consistency check VM-Exit due to invalid guest state and bail. 6609 */ 6610 if (unlikely(vmx->emulation_required)) { 6611 6612 /* We don't emulate invalid state of a nested guest */ 6613 vmx->fail = is_guest_mode(vcpu); 6614 6615 vmx->exit_reason.full = EXIT_REASON_INVALID_STATE; 6616 vmx->exit_reason.failed_vmentry = 1; 6617 kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1); 6618 vmx->exit_qualification = ENTRY_FAIL_DEFAULT; 6619 kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2); 6620 vmx->exit_intr_info = 0; 6621 return EXIT_FASTPATH_NONE; 6622 } 6623 6624 trace_kvm_entry(vcpu); 6625 6626 if (vmx->ple_window_dirty) { 6627 vmx->ple_window_dirty = false; 6628 vmcs_write32(PLE_WINDOW, vmx->ple_window); 6629 } 6630 6631 /* 6632 * We did this in prepare_switch_to_guest, because it needs to 6633 * be within srcu_read_lock. 6634 */ 6635 WARN_ON_ONCE(vmx->nested.need_vmcs12_to_shadow_sync); 6636 6637 if (kvm_register_is_dirty(vcpu, VCPU_REGS_RSP)) 6638 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); 6639 if (kvm_register_is_dirty(vcpu, VCPU_REGS_RIP)) 6640 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); 6641 6642 cr3 = __get_current_cr3_fast(); 6643 if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) { 6644 vmcs_writel(HOST_CR3, cr3); 6645 vmx->loaded_vmcs->host_state.cr3 = cr3; 6646 } 6647 6648 cr4 = cr4_read_shadow(); 6649 if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) { 6650 vmcs_writel(HOST_CR4, cr4); 6651 vmx->loaded_vmcs->host_state.cr4 = cr4; 6652 } 6653 6654 /* When KVM_DEBUGREG_WONT_EXIT, dr6 is accessible in guest. */ 6655 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) 6656 set_debugreg(vcpu->arch.dr6, 6); 6657 6658 /* When single-stepping over STI and MOV SS, we must clear the 6659 * corresponding interruptibility bits in the guest state. Otherwise 6660 * vmentry fails as it then expects bit 14 (BS) in pending debug 6661 * exceptions being set, but that's not correct for the guest debugging 6662 * case. */ 6663 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) 6664 vmx_set_interrupt_shadow(vcpu, 0); 6665 6666 kvm_load_guest_xsave_state(vcpu); 6667 6668 pt_guest_enter(vmx); 6669 6670 atomic_switch_perf_msrs(vmx); 6671 if (intel_pmu_lbr_is_enabled(vcpu)) 6672 vmx_passthrough_lbr_msrs(vcpu); 6673 6674 if (enable_preemption_timer) 6675 vmx_update_hv_timer(vcpu); 6676 6677 kvm_wait_lapic_expire(vcpu); 6678 6679 /* 6680 * If this vCPU has touched SPEC_CTRL, restore the guest's value if 6681 * it's non-zero. Since vmentry is serialising on affected CPUs, there 6682 * is no need to worry about the conditional branch over the wrmsr 6683 * being speculatively taken. 6684 */ 6685 x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0); 6686 6687 /* The actual VMENTER/EXIT is in the .noinstr.text section. */ 6688 vmx_vcpu_enter_exit(vcpu, vmx); 6689 6690 /* 6691 * We do not use IBRS in the kernel. If this vCPU has used the 6692 * SPEC_CTRL MSR it may have left it on; save the value and 6693 * turn it off. This is much more efficient than blindly adding 6694 * it to the atomic save/restore list. Especially as the former 6695 * (Saving guest MSRs on vmexit) doesn't even exist in KVM. 6696 * 6697 * For non-nested case: 6698 * If the L01 MSR bitmap does not intercept the MSR, then we need to 6699 * save it. 6700 * 6701 * For nested case: 6702 * If the L02 MSR bitmap does not intercept the MSR, then we need to 6703 * save it. 6704 */ 6705 if (unlikely(!msr_write_intercepted(vmx, MSR_IA32_SPEC_CTRL))) 6706 vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); 6707 6708 x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0); 6709 6710 /* All fields are clean at this point */ 6711 if (static_branch_unlikely(&enable_evmcs)) { 6712 current_evmcs->hv_clean_fields |= 6713 HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; 6714 6715 current_evmcs->hv_vp_id = kvm_hv_get_vpindex(vcpu); 6716 } 6717 6718 /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */ 6719 if (vmx->host_debugctlmsr) 6720 update_debugctlmsr(vmx->host_debugctlmsr); 6721 6722 #ifndef CONFIG_X86_64 6723 /* 6724 * The sysexit path does not restore ds/es, so we must set them to 6725 * a reasonable value ourselves. 6726 * 6727 * We can't defer this to vmx_prepare_switch_to_host() since that 6728 * function may be executed in interrupt context, which saves and 6729 * restore segments around it, nullifying its effect. 6730 */ 6731 loadsegment(ds, __USER_DS); 6732 loadsegment(es, __USER_DS); 6733 #endif 6734 6735 vmx_register_cache_reset(vcpu); 6736 6737 pt_guest_exit(vmx); 6738 6739 kvm_load_host_xsave_state(vcpu); 6740 6741 if (is_guest_mode(vcpu)) { 6742 /* 6743 * Track VMLAUNCH/VMRESUME that have made past guest state 6744 * checking. 6745 */ 6746 if (vmx->nested.nested_run_pending && 6747 !vmx->exit_reason.failed_vmentry) 6748 ++vcpu->stat.nested_run; 6749 6750 vmx->nested.nested_run_pending = 0; 6751 } 6752 6753 vmx->idt_vectoring_info = 0; 6754 6755 if (unlikely(vmx->fail)) { 6756 vmx->exit_reason.full = 0xdead; 6757 return EXIT_FASTPATH_NONE; 6758 } 6759 6760 vmx->exit_reason.full = vmcs_read32(VM_EXIT_REASON); 6761 if (unlikely((u16)vmx->exit_reason.basic == EXIT_REASON_MCE_DURING_VMENTRY)) 6762 kvm_machine_check(); 6763 6764 if (likely(!vmx->exit_reason.failed_vmentry)) 6765 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); 6766 6767 trace_kvm_exit(vcpu, KVM_ISA_VMX); 6768 6769 if (unlikely(vmx->exit_reason.failed_vmentry)) 6770 return EXIT_FASTPATH_NONE; 6771 6772 vmx->loaded_vmcs->launched = 1; 6773 6774 vmx_recover_nmi_blocking(vmx); 6775 vmx_complete_interrupts(vmx); 6776 6777 if (is_guest_mode(vcpu)) 6778 return EXIT_FASTPATH_NONE; 6779 6780 return vmx_exit_handlers_fastpath(vcpu); 6781 } 6782 6783 static void vmx_free_vcpu(struct kvm_vcpu *vcpu) 6784 { 6785 struct vcpu_vmx *vmx = to_vmx(vcpu); 6786 6787 if (enable_pml) 6788 vmx_destroy_pml_buffer(vmx); 6789 free_vpid(vmx->vpid); 6790 nested_vmx_free_vcpu(vcpu); 6791 free_loaded_vmcs(vmx->loaded_vmcs); 6792 } 6793 6794 static int vmx_create_vcpu(struct kvm_vcpu *vcpu) 6795 { 6796 struct vmx_uret_msr *tsx_ctrl; 6797 struct vcpu_vmx *vmx; 6798 int i, err; 6799 6800 BUILD_BUG_ON(offsetof(struct vcpu_vmx, vcpu) != 0); 6801 vmx = to_vmx(vcpu); 6802 6803 err = -ENOMEM; 6804 6805 vmx->vpid = allocate_vpid(); 6806 6807 /* 6808 * If PML is turned on, failure on enabling PML just results in failure 6809 * of creating the vcpu, therefore we can simplify PML logic (by 6810 * avoiding dealing with cases, such as enabling PML partially on vcpus 6811 * for the guest), etc. 6812 */ 6813 if (enable_pml) { 6814 vmx->pml_pg = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); 6815 if (!vmx->pml_pg) 6816 goto free_vpid; 6817 } 6818 6819 for (i = 0; i < kvm_nr_uret_msrs; ++i) 6820 vmx->guest_uret_msrs[i].mask = -1ull; 6821 if (boot_cpu_has(X86_FEATURE_RTM)) { 6822 /* 6823 * TSX_CTRL_CPUID_CLEAR is handled in the CPUID interception. 6824 * Keep the host value unchanged to avoid changing CPUID bits 6825 * under the host kernel's feet. 6826 */ 6827 tsx_ctrl = vmx_find_uret_msr(vmx, MSR_IA32_TSX_CTRL); 6828 if (tsx_ctrl) 6829 tsx_ctrl->mask = ~(u64)TSX_CTRL_CPUID_CLEAR; 6830 } 6831 6832 err = alloc_loaded_vmcs(&vmx->vmcs01); 6833 if (err < 0) 6834 goto free_pml; 6835 6836 /* 6837 * Use Hyper-V 'Enlightened MSR Bitmap' feature when KVM runs as a 6838 * nested (L1) hypervisor and Hyper-V in L0 supports it. Enable the 6839 * feature only for vmcs01, KVM currently isn't equipped to realize any 6840 * performance benefits from enabling it for vmcs02. 6841 */ 6842 if (IS_ENABLED(CONFIG_HYPERV) && static_branch_unlikely(&enable_evmcs) && 6843 (ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) { 6844 struct hv_enlightened_vmcs *evmcs = (void *)vmx->vmcs01.vmcs; 6845 6846 evmcs->hv_enlightenments_control.msr_bitmap = 1; 6847 } 6848 6849 /* The MSR bitmap starts with all ones */ 6850 bitmap_fill(vmx->shadow_msr_intercept.read, MAX_POSSIBLE_PASSTHROUGH_MSRS); 6851 bitmap_fill(vmx->shadow_msr_intercept.write, MAX_POSSIBLE_PASSTHROUGH_MSRS); 6852 6853 vmx_disable_intercept_for_msr(vcpu, MSR_IA32_TSC, MSR_TYPE_R); 6854 #ifdef CONFIG_X86_64 6855 vmx_disable_intercept_for_msr(vcpu, MSR_FS_BASE, MSR_TYPE_RW); 6856 vmx_disable_intercept_for_msr(vcpu, MSR_GS_BASE, MSR_TYPE_RW); 6857 vmx_disable_intercept_for_msr(vcpu, MSR_KERNEL_GS_BASE, MSR_TYPE_RW); 6858 #endif 6859 vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW); 6860 vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW); 6861 vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW); 6862 if (kvm_cstate_in_guest(vcpu->kvm)) { 6863 vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C1_RES, MSR_TYPE_R); 6864 vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C3_RESIDENCY, MSR_TYPE_R); 6865 vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C6_RESIDENCY, MSR_TYPE_R); 6866 vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C7_RESIDENCY, MSR_TYPE_R); 6867 } 6868 6869 vmx->loaded_vmcs = &vmx->vmcs01; 6870 6871 if (cpu_need_virtualize_apic_accesses(vcpu)) { 6872 err = alloc_apic_access_page(vcpu->kvm); 6873 if (err) 6874 goto free_vmcs; 6875 } 6876 6877 if (enable_ept && !enable_unrestricted_guest) { 6878 err = init_rmode_identity_map(vcpu->kvm); 6879 if (err) 6880 goto free_vmcs; 6881 } 6882 6883 return 0; 6884 6885 free_vmcs: 6886 free_loaded_vmcs(vmx->loaded_vmcs); 6887 free_pml: 6888 vmx_destroy_pml_buffer(vmx); 6889 free_vpid: 6890 free_vpid(vmx->vpid); 6891 return err; 6892 } 6893 6894 #define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n" 6895 #define L1TF_MSG_L1D "L1TF CPU bug present and virtualization mitigation disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n" 6896 6897 static int vmx_vm_init(struct kvm *kvm) 6898 { 6899 if (!ple_gap) 6900 kvm->arch.pause_in_guest = true; 6901 6902 if (boot_cpu_has(X86_BUG_L1TF) && enable_ept) { 6903 switch (l1tf_mitigation) { 6904 case L1TF_MITIGATION_OFF: 6905 case L1TF_MITIGATION_FLUSH_NOWARN: 6906 /* 'I explicitly don't care' is set */ 6907 break; 6908 case L1TF_MITIGATION_FLUSH: 6909 case L1TF_MITIGATION_FLUSH_NOSMT: 6910 case L1TF_MITIGATION_FULL: 6911 /* 6912 * Warn upon starting the first VM in a potentially 6913 * insecure environment. 6914 */ 6915 if (sched_smt_active()) 6916 pr_warn_once(L1TF_MSG_SMT); 6917 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER) 6918 pr_warn_once(L1TF_MSG_L1D); 6919 break; 6920 case L1TF_MITIGATION_FULL_FORCE: 6921 /* Flush is enforced */ 6922 break; 6923 } 6924 } 6925 return 0; 6926 } 6927 6928 static int __init vmx_check_processor_compat(void) 6929 { 6930 struct vmcs_config vmcs_conf; 6931 struct vmx_capability vmx_cap; 6932 6933 if (!this_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) || 6934 !this_cpu_has(X86_FEATURE_VMX)) { 6935 pr_err("kvm: VMX is disabled on CPU %d\n", smp_processor_id()); 6936 return -EIO; 6937 } 6938 6939 if (setup_vmcs_config(&vmcs_conf, &vmx_cap) < 0) 6940 return -EIO; 6941 if (nested) 6942 nested_vmx_setup_ctls_msrs(&vmcs_conf.nested, vmx_cap.ept); 6943 if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) { 6944 printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n", 6945 smp_processor_id()); 6946 return -EIO; 6947 } 6948 return 0; 6949 } 6950 6951 static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) 6952 { 6953 u8 cache; 6954 u64 ipat = 0; 6955 6956 /* We wanted to honor guest CD/MTRR/PAT, but doing so could result in 6957 * memory aliases with conflicting memory types and sometimes MCEs. 6958 * We have to be careful as to what are honored and when. 6959 * 6960 * For MMIO, guest CD/MTRR are ignored. The EPT memory type is set to 6961 * UC. The effective memory type is UC or WC depending on guest PAT. 6962 * This was historically the source of MCEs and we want to be 6963 * conservative. 6964 * 6965 * When there is no need to deal with noncoherent DMA (e.g., no VT-d 6966 * or VT-d has snoop control), guest CD/MTRR/PAT are all ignored. The 6967 * EPT memory type is set to WB. The effective memory type is forced 6968 * WB. 6969 * 6970 * Otherwise, we trust guest. Guest CD/MTRR/PAT are all honored. The 6971 * EPT memory type is used to emulate guest CD/MTRR. 6972 */ 6973 6974 if (is_mmio) { 6975 cache = MTRR_TYPE_UNCACHABLE; 6976 goto exit; 6977 } 6978 6979 if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) { 6980 ipat = VMX_EPT_IPAT_BIT; 6981 cache = MTRR_TYPE_WRBACK; 6982 goto exit; 6983 } 6984 6985 if (kvm_read_cr0(vcpu) & X86_CR0_CD) { 6986 ipat = VMX_EPT_IPAT_BIT; 6987 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) 6988 cache = MTRR_TYPE_WRBACK; 6989 else 6990 cache = MTRR_TYPE_UNCACHABLE; 6991 goto exit; 6992 } 6993 6994 cache = kvm_mtrr_get_guest_memory_type(vcpu, gfn); 6995 6996 exit: 6997 return (cache << VMX_EPT_MT_EPTE_SHIFT) | ipat; 6998 } 6999 7000 static void vmcs_set_secondary_exec_control(struct vcpu_vmx *vmx, u32 new_ctl) 7001 { 7002 /* 7003 * These bits in the secondary execution controls field 7004 * are dynamic, the others are mostly based on the hypervisor 7005 * architecture and the guest's CPUID. Do not touch the 7006 * dynamic bits. 7007 */ 7008 u32 mask = 7009 SECONDARY_EXEC_SHADOW_VMCS | 7010 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 7011 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 7012 SECONDARY_EXEC_DESC; 7013 7014 u32 cur_ctl = secondary_exec_controls_get(vmx); 7015 7016 secondary_exec_controls_set(vmx, (new_ctl & ~mask) | (cur_ctl & mask)); 7017 } 7018 7019 /* 7020 * Generate MSR_IA32_VMX_CR{0,4}_FIXED1 according to CPUID. Only set bits 7021 * (indicating "allowed-1") if they are supported in the guest's CPUID. 7022 */ 7023 static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu) 7024 { 7025 struct vcpu_vmx *vmx = to_vmx(vcpu); 7026 struct kvm_cpuid_entry2 *entry; 7027 7028 vmx->nested.msrs.cr0_fixed1 = 0xffffffff; 7029 vmx->nested.msrs.cr4_fixed1 = X86_CR4_PCE; 7030 7031 #define cr4_fixed1_update(_cr4_mask, _reg, _cpuid_mask) do { \ 7032 if (entry && (entry->_reg & (_cpuid_mask))) \ 7033 vmx->nested.msrs.cr4_fixed1 |= (_cr4_mask); \ 7034 } while (0) 7035 7036 entry = kvm_find_cpuid_entry(vcpu, 0x1, 0); 7037 cr4_fixed1_update(X86_CR4_VME, edx, feature_bit(VME)); 7038 cr4_fixed1_update(X86_CR4_PVI, edx, feature_bit(VME)); 7039 cr4_fixed1_update(X86_CR4_TSD, edx, feature_bit(TSC)); 7040 cr4_fixed1_update(X86_CR4_DE, edx, feature_bit(DE)); 7041 cr4_fixed1_update(X86_CR4_PSE, edx, feature_bit(PSE)); 7042 cr4_fixed1_update(X86_CR4_PAE, edx, feature_bit(PAE)); 7043 cr4_fixed1_update(X86_CR4_MCE, edx, feature_bit(MCE)); 7044 cr4_fixed1_update(X86_CR4_PGE, edx, feature_bit(PGE)); 7045 cr4_fixed1_update(X86_CR4_OSFXSR, edx, feature_bit(FXSR)); 7046 cr4_fixed1_update(X86_CR4_OSXMMEXCPT, edx, feature_bit(XMM)); 7047 cr4_fixed1_update(X86_CR4_VMXE, ecx, feature_bit(VMX)); 7048 cr4_fixed1_update(X86_CR4_SMXE, ecx, feature_bit(SMX)); 7049 cr4_fixed1_update(X86_CR4_PCIDE, ecx, feature_bit(PCID)); 7050 cr4_fixed1_update(X86_CR4_OSXSAVE, ecx, feature_bit(XSAVE)); 7051 7052 entry = kvm_find_cpuid_entry(vcpu, 0x7, 0); 7053 cr4_fixed1_update(X86_CR4_FSGSBASE, ebx, feature_bit(FSGSBASE)); 7054 cr4_fixed1_update(X86_CR4_SMEP, ebx, feature_bit(SMEP)); 7055 cr4_fixed1_update(X86_CR4_SMAP, ebx, feature_bit(SMAP)); 7056 cr4_fixed1_update(X86_CR4_PKE, ecx, feature_bit(PKU)); 7057 cr4_fixed1_update(X86_CR4_UMIP, ecx, feature_bit(UMIP)); 7058 cr4_fixed1_update(X86_CR4_LA57, ecx, feature_bit(LA57)); 7059 7060 #undef cr4_fixed1_update 7061 } 7062 7063 static void nested_vmx_entry_exit_ctls_update(struct kvm_vcpu *vcpu) 7064 { 7065 struct vcpu_vmx *vmx = to_vmx(vcpu); 7066 7067 if (kvm_mpx_supported()) { 7068 bool mpx_enabled = guest_cpuid_has(vcpu, X86_FEATURE_MPX); 7069 7070 if (mpx_enabled) { 7071 vmx->nested.msrs.entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS; 7072 vmx->nested.msrs.exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS; 7073 } else { 7074 vmx->nested.msrs.entry_ctls_high &= ~VM_ENTRY_LOAD_BNDCFGS; 7075 vmx->nested.msrs.exit_ctls_high &= ~VM_EXIT_CLEAR_BNDCFGS; 7076 } 7077 } 7078 } 7079 7080 static void update_intel_pt_cfg(struct kvm_vcpu *vcpu) 7081 { 7082 struct vcpu_vmx *vmx = to_vmx(vcpu); 7083 struct kvm_cpuid_entry2 *best = NULL; 7084 int i; 7085 7086 for (i = 0; i < PT_CPUID_LEAVES; i++) { 7087 best = kvm_find_cpuid_entry(vcpu, 0x14, i); 7088 if (!best) 7089 return; 7090 vmx->pt_desc.caps[CPUID_EAX + i*PT_CPUID_REGS_NUM] = best->eax; 7091 vmx->pt_desc.caps[CPUID_EBX + i*PT_CPUID_REGS_NUM] = best->ebx; 7092 vmx->pt_desc.caps[CPUID_ECX + i*PT_CPUID_REGS_NUM] = best->ecx; 7093 vmx->pt_desc.caps[CPUID_EDX + i*PT_CPUID_REGS_NUM] = best->edx; 7094 } 7095 7096 /* Get the number of configurable Address Ranges for filtering */ 7097 vmx->pt_desc.num_address_ranges = intel_pt_validate_cap(vmx->pt_desc.caps, 7098 PT_CAP_num_address_ranges); 7099 7100 /* Initialize and clear the no dependency bits */ 7101 vmx->pt_desc.ctl_bitmask = ~(RTIT_CTL_TRACEEN | RTIT_CTL_OS | 7102 RTIT_CTL_USR | RTIT_CTL_TSC_EN | RTIT_CTL_DISRETC | 7103 RTIT_CTL_BRANCH_EN); 7104 7105 /* 7106 * If CPUID.(EAX=14H,ECX=0):EBX[0]=1 CR3Filter can be set otherwise 7107 * will inject an #GP 7108 */ 7109 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_cr3_filtering)) 7110 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_CR3EN; 7111 7112 /* 7113 * If CPUID.(EAX=14H,ECX=0):EBX[1]=1 CYCEn, CycThresh and 7114 * PSBFreq can be set 7115 */ 7116 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc)) 7117 vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_CYCLEACC | 7118 RTIT_CTL_CYC_THRESH | RTIT_CTL_PSB_FREQ); 7119 7120 /* 7121 * If CPUID.(EAX=14H,ECX=0):EBX[3]=1 MTCEn and MTCFreq can be set 7122 */ 7123 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc)) 7124 vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_MTC_EN | 7125 RTIT_CTL_MTC_RANGE); 7126 7127 /* If CPUID.(EAX=14H,ECX=0):EBX[4]=1 FUPonPTW and PTWEn can be set */ 7128 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_ptwrite)) 7129 vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_FUP_ON_PTW | 7130 RTIT_CTL_PTW_EN); 7131 7132 /* If CPUID.(EAX=14H,ECX=0):EBX[5]=1 PwrEvEn can be set */ 7133 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_power_event_trace)) 7134 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_PWR_EVT_EN; 7135 7136 /* If CPUID.(EAX=14H,ECX=0):ECX[0]=1 ToPA can be set */ 7137 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_topa_output)) 7138 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_TOPA; 7139 7140 /* If CPUID.(EAX=14H,ECX=0):ECX[3]=1 FabricEn can be set */ 7141 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_output_subsys)) 7142 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_FABRIC_EN; 7143 7144 /* unmask address range configure area */ 7145 for (i = 0; i < vmx->pt_desc.num_address_ranges; i++) 7146 vmx->pt_desc.ctl_bitmask &= ~(0xfULL << (32 + i * 4)); 7147 } 7148 7149 static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) 7150 { 7151 struct vcpu_vmx *vmx = to_vmx(vcpu); 7152 7153 /* xsaves_enabled is recomputed in vmx_compute_secondary_exec_control(). */ 7154 vcpu->arch.xsaves_enabled = false; 7155 7156 vmx_setup_uret_msrs(vmx); 7157 7158 if (cpu_has_secondary_exec_ctrls()) 7159 vmcs_set_secondary_exec_control(vmx, 7160 vmx_secondary_exec_control(vmx)); 7161 7162 if (nested_vmx_allowed(vcpu)) 7163 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |= 7164 FEAT_CTL_VMX_ENABLED_INSIDE_SMX | 7165 FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX; 7166 else 7167 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &= 7168 ~(FEAT_CTL_VMX_ENABLED_INSIDE_SMX | 7169 FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX); 7170 7171 if (nested_vmx_allowed(vcpu)) { 7172 nested_vmx_cr_fixed1_bits_update(vcpu); 7173 nested_vmx_entry_exit_ctls_update(vcpu); 7174 } 7175 7176 if (boot_cpu_has(X86_FEATURE_INTEL_PT) && 7177 guest_cpuid_has(vcpu, X86_FEATURE_INTEL_PT)) 7178 update_intel_pt_cfg(vcpu); 7179 7180 if (boot_cpu_has(X86_FEATURE_RTM)) { 7181 struct vmx_uret_msr *msr; 7182 msr = vmx_find_uret_msr(vmx, MSR_IA32_TSX_CTRL); 7183 if (msr) { 7184 bool enabled = guest_cpuid_has(vcpu, X86_FEATURE_RTM); 7185 vmx_set_guest_uret_msr(vmx, msr, enabled ? 0 : TSX_CTRL_RTM_DISABLE); 7186 } 7187 } 7188 7189 set_cr4_guest_host_mask(vmx); 7190 7191 vmx_write_encls_bitmap(vcpu, NULL); 7192 if (guest_cpuid_has(vcpu, X86_FEATURE_SGX)) 7193 vmx->msr_ia32_feature_control_valid_bits |= FEAT_CTL_SGX_ENABLED; 7194 else 7195 vmx->msr_ia32_feature_control_valid_bits &= ~FEAT_CTL_SGX_ENABLED; 7196 7197 if (guest_cpuid_has(vcpu, X86_FEATURE_SGX_LC)) 7198 vmx->msr_ia32_feature_control_valid_bits |= 7199 FEAT_CTL_SGX_LC_ENABLED; 7200 else 7201 vmx->msr_ia32_feature_control_valid_bits &= 7202 ~FEAT_CTL_SGX_LC_ENABLED; 7203 7204 /* Refresh #PF interception to account for MAXPHYADDR changes. */ 7205 vmx_update_exception_bitmap(vcpu); 7206 } 7207 7208 static __init void vmx_set_cpu_caps(void) 7209 { 7210 kvm_set_cpu_caps(); 7211 7212 /* CPUID 0x1 */ 7213 if (nested) 7214 kvm_cpu_cap_set(X86_FEATURE_VMX); 7215 7216 /* CPUID 0x7 */ 7217 if (kvm_mpx_supported()) 7218 kvm_cpu_cap_check_and_set(X86_FEATURE_MPX); 7219 if (!cpu_has_vmx_invpcid()) 7220 kvm_cpu_cap_clear(X86_FEATURE_INVPCID); 7221 if (vmx_pt_mode_is_host_guest()) 7222 kvm_cpu_cap_check_and_set(X86_FEATURE_INTEL_PT); 7223 7224 if (!enable_sgx) { 7225 kvm_cpu_cap_clear(X86_FEATURE_SGX); 7226 kvm_cpu_cap_clear(X86_FEATURE_SGX_LC); 7227 kvm_cpu_cap_clear(X86_FEATURE_SGX1); 7228 kvm_cpu_cap_clear(X86_FEATURE_SGX2); 7229 } 7230 7231 if (vmx_umip_emulated()) 7232 kvm_cpu_cap_set(X86_FEATURE_UMIP); 7233 7234 /* CPUID 0xD.1 */ 7235 supported_xss = 0; 7236 if (!cpu_has_vmx_xsaves()) 7237 kvm_cpu_cap_clear(X86_FEATURE_XSAVES); 7238 7239 /* CPUID 0x80000001 and 0x7 (RDPID) */ 7240 if (!cpu_has_vmx_rdtscp()) { 7241 kvm_cpu_cap_clear(X86_FEATURE_RDTSCP); 7242 kvm_cpu_cap_clear(X86_FEATURE_RDPID); 7243 } 7244 7245 if (cpu_has_vmx_waitpkg()) 7246 kvm_cpu_cap_check_and_set(X86_FEATURE_WAITPKG); 7247 } 7248 7249 static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu) 7250 { 7251 to_vmx(vcpu)->req_immediate_exit = true; 7252 } 7253 7254 static int vmx_check_intercept_io(struct kvm_vcpu *vcpu, 7255 struct x86_instruction_info *info) 7256 { 7257 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 7258 unsigned short port; 7259 bool intercept; 7260 int size; 7261 7262 if (info->intercept == x86_intercept_in || 7263 info->intercept == x86_intercept_ins) { 7264 port = info->src_val; 7265 size = info->dst_bytes; 7266 } else { 7267 port = info->dst_val; 7268 size = info->src_bytes; 7269 } 7270 7271 /* 7272 * If the 'use IO bitmaps' VM-execution control is 0, IO instruction 7273 * VM-exits depend on the 'unconditional IO exiting' VM-execution 7274 * control. 7275 * 7276 * Otherwise, IO instruction VM-exits are controlled by the IO bitmaps. 7277 */ 7278 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) 7279 intercept = nested_cpu_has(vmcs12, 7280 CPU_BASED_UNCOND_IO_EXITING); 7281 else 7282 intercept = nested_vmx_check_io_bitmaps(vcpu, port, size); 7283 7284 /* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED. */ 7285 return intercept ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; 7286 } 7287 7288 static int vmx_check_intercept(struct kvm_vcpu *vcpu, 7289 struct x86_instruction_info *info, 7290 enum x86_intercept_stage stage, 7291 struct x86_exception *exception) 7292 { 7293 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 7294 7295 switch (info->intercept) { 7296 /* 7297 * RDPID causes #UD if disabled through secondary execution controls. 7298 * Because it is marked as EmulateOnUD, we need to intercept it here. 7299 * Note, RDPID is hidden behind ENABLE_RDTSCP. 7300 */ 7301 case x86_intercept_rdpid: 7302 if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_RDTSCP)) { 7303 exception->vector = UD_VECTOR; 7304 exception->error_code_valid = false; 7305 return X86EMUL_PROPAGATE_FAULT; 7306 } 7307 break; 7308 7309 case x86_intercept_in: 7310 case x86_intercept_ins: 7311 case x86_intercept_out: 7312 case x86_intercept_outs: 7313 return vmx_check_intercept_io(vcpu, info); 7314 7315 case x86_intercept_lgdt: 7316 case x86_intercept_lidt: 7317 case x86_intercept_lldt: 7318 case x86_intercept_ltr: 7319 case x86_intercept_sgdt: 7320 case x86_intercept_sidt: 7321 case x86_intercept_sldt: 7322 case x86_intercept_str: 7323 if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC)) 7324 return X86EMUL_CONTINUE; 7325 7326 /* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED. */ 7327 break; 7328 7329 /* TODO: check more intercepts... */ 7330 default: 7331 break; 7332 } 7333 7334 return X86EMUL_UNHANDLEABLE; 7335 } 7336 7337 #ifdef CONFIG_X86_64 7338 /* (a << shift) / divisor, return 1 if overflow otherwise 0 */ 7339 static inline int u64_shl_div_u64(u64 a, unsigned int shift, 7340 u64 divisor, u64 *result) 7341 { 7342 u64 low = a << shift, high = a >> (64 - shift); 7343 7344 /* To avoid the overflow on divq */ 7345 if (high >= divisor) 7346 return 1; 7347 7348 /* Low hold the result, high hold rem which is discarded */ 7349 asm("divq %2\n\t" : "=a" (low), "=d" (high) : 7350 "rm" (divisor), "0" (low), "1" (high)); 7351 *result = low; 7352 7353 return 0; 7354 } 7355 7356 static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc, 7357 bool *expired) 7358 { 7359 struct vcpu_vmx *vmx; 7360 u64 tscl, guest_tscl, delta_tsc, lapic_timer_advance_cycles; 7361 struct kvm_timer *ktimer = &vcpu->arch.apic->lapic_timer; 7362 7363 vmx = to_vmx(vcpu); 7364 tscl = rdtsc(); 7365 guest_tscl = kvm_read_l1_tsc(vcpu, tscl); 7366 delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl; 7367 lapic_timer_advance_cycles = nsec_to_cycles(vcpu, 7368 ktimer->timer_advance_ns); 7369 7370 if (delta_tsc > lapic_timer_advance_cycles) 7371 delta_tsc -= lapic_timer_advance_cycles; 7372 else 7373 delta_tsc = 0; 7374 7375 /* Convert to host delta tsc if tsc scaling is enabled */ 7376 if (vcpu->arch.l1_tsc_scaling_ratio != kvm_default_tsc_scaling_ratio && 7377 delta_tsc && u64_shl_div_u64(delta_tsc, 7378 kvm_tsc_scaling_ratio_frac_bits, 7379 vcpu->arch.l1_tsc_scaling_ratio, &delta_tsc)) 7380 return -ERANGE; 7381 7382 /* 7383 * If the delta tsc can't fit in the 32 bit after the multi shift, 7384 * we can't use the preemption timer. 7385 * It's possible that it fits on later vmentries, but checking 7386 * on every vmentry is costly so we just use an hrtimer. 7387 */ 7388 if (delta_tsc >> (cpu_preemption_timer_multi + 32)) 7389 return -ERANGE; 7390 7391 vmx->hv_deadline_tsc = tscl + delta_tsc; 7392 *expired = !delta_tsc; 7393 return 0; 7394 } 7395 7396 static void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu) 7397 { 7398 to_vmx(vcpu)->hv_deadline_tsc = -1; 7399 } 7400 #endif 7401 7402 static void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu) 7403 { 7404 if (!kvm_pause_in_guest(vcpu->kvm)) 7405 shrink_ple_window(vcpu); 7406 } 7407 7408 void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu) 7409 { 7410 struct vcpu_vmx *vmx = to_vmx(vcpu); 7411 7412 if (is_guest_mode(vcpu)) { 7413 vmx->nested.update_vmcs01_cpu_dirty_logging = true; 7414 return; 7415 } 7416 7417 /* 7418 * Note, cpu_dirty_logging_count can be changed concurrent with this 7419 * code, but in that case another update request will be made and so 7420 * the guest will never run with a stale PML value. 7421 */ 7422 if (vcpu->kvm->arch.cpu_dirty_logging_count) 7423 secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_ENABLE_PML); 7424 else 7425 secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_ENABLE_PML); 7426 } 7427 7428 static int vmx_pre_block(struct kvm_vcpu *vcpu) 7429 { 7430 if (pi_pre_block(vcpu)) 7431 return 1; 7432 7433 if (kvm_lapic_hv_timer_in_use(vcpu)) 7434 kvm_lapic_switch_to_sw_timer(vcpu); 7435 7436 return 0; 7437 } 7438 7439 static void vmx_post_block(struct kvm_vcpu *vcpu) 7440 { 7441 if (kvm_x86_ops.set_hv_timer) 7442 kvm_lapic_switch_to_hv_timer(vcpu); 7443 7444 pi_post_block(vcpu); 7445 } 7446 7447 static void vmx_setup_mce(struct kvm_vcpu *vcpu) 7448 { 7449 if (vcpu->arch.mcg_cap & MCG_LMCE_P) 7450 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |= 7451 FEAT_CTL_LMCE_ENABLED; 7452 else 7453 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &= 7454 ~FEAT_CTL_LMCE_ENABLED; 7455 } 7456 7457 static int vmx_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection) 7458 { 7459 /* we need a nested vmexit to enter SMM, postpone if run is pending */ 7460 if (to_vmx(vcpu)->nested.nested_run_pending) 7461 return -EBUSY; 7462 return !is_smm(vcpu); 7463 } 7464 7465 static int vmx_enter_smm(struct kvm_vcpu *vcpu, char *smstate) 7466 { 7467 struct vcpu_vmx *vmx = to_vmx(vcpu); 7468 7469 vmx->nested.smm.guest_mode = is_guest_mode(vcpu); 7470 if (vmx->nested.smm.guest_mode) 7471 nested_vmx_vmexit(vcpu, -1, 0, 0); 7472 7473 vmx->nested.smm.vmxon = vmx->nested.vmxon; 7474 vmx->nested.vmxon = false; 7475 vmx_clear_hlt(vcpu); 7476 return 0; 7477 } 7478 7479 static int vmx_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) 7480 { 7481 struct vcpu_vmx *vmx = to_vmx(vcpu); 7482 int ret; 7483 7484 if (vmx->nested.smm.vmxon) { 7485 vmx->nested.vmxon = true; 7486 vmx->nested.smm.vmxon = false; 7487 } 7488 7489 if (vmx->nested.smm.guest_mode) { 7490 ret = nested_vmx_enter_non_root_mode(vcpu, false); 7491 if (ret) 7492 return ret; 7493 7494 vmx->nested.smm.guest_mode = false; 7495 } 7496 return 0; 7497 } 7498 7499 static void vmx_enable_smi_window(struct kvm_vcpu *vcpu) 7500 { 7501 /* RSM will cause a vmexit anyway. */ 7502 } 7503 7504 static bool vmx_apic_init_signal_blocked(struct kvm_vcpu *vcpu) 7505 { 7506 return to_vmx(vcpu)->nested.vmxon && !is_guest_mode(vcpu); 7507 } 7508 7509 static void vmx_migrate_timers(struct kvm_vcpu *vcpu) 7510 { 7511 if (is_guest_mode(vcpu)) { 7512 struct hrtimer *timer = &to_vmx(vcpu)->nested.preemption_timer; 7513 7514 if (hrtimer_try_to_cancel(timer) == 1) 7515 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED); 7516 } 7517 } 7518 7519 static void hardware_unsetup(void) 7520 { 7521 kvm_set_posted_intr_wakeup_handler(NULL); 7522 7523 if (nested) 7524 nested_vmx_hardware_unsetup(); 7525 7526 free_kvm_area(); 7527 } 7528 7529 static bool vmx_check_apicv_inhibit_reasons(ulong bit) 7530 { 7531 ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) | 7532 BIT(APICV_INHIBIT_REASON_ABSENT) | 7533 BIT(APICV_INHIBIT_REASON_HYPERV) | 7534 BIT(APICV_INHIBIT_REASON_BLOCKIRQ); 7535 7536 return supported & BIT(bit); 7537 } 7538 7539 static struct kvm_x86_ops vmx_x86_ops __initdata = { 7540 .name = "kvm_intel", 7541 7542 .hardware_unsetup = hardware_unsetup, 7543 7544 .hardware_enable = hardware_enable, 7545 .hardware_disable = hardware_disable, 7546 .cpu_has_accelerated_tpr = report_flexpriority, 7547 .has_emulated_msr = vmx_has_emulated_msr, 7548 7549 .vm_size = sizeof(struct kvm_vmx), 7550 .vm_init = vmx_vm_init, 7551 7552 .vcpu_create = vmx_create_vcpu, 7553 .vcpu_free = vmx_free_vcpu, 7554 .vcpu_reset = vmx_vcpu_reset, 7555 7556 .prepare_guest_switch = vmx_prepare_switch_to_guest, 7557 .vcpu_load = vmx_vcpu_load, 7558 .vcpu_put = vmx_vcpu_put, 7559 7560 .update_exception_bitmap = vmx_update_exception_bitmap, 7561 .get_msr_feature = vmx_get_msr_feature, 7562 .get_msr = vmx_get_msr, 7563 .set_msr = vmx_set_msr, 7564 .get_segment_base = vmx_get_segment_base, 7565 .get_segment = vmx_get_segment, 7566 .set_segment = vmx_set_segment, 7567 .get_cpl = vmx_get_cpl, 7568 .get_cs_db_l_bits = vmx_get_cs_db_l_bits, 7569 .set_cr0 = vmx_set_cr0, 7570 .is_valid_cr4 = vmx_is_valid_cr4, 7571 .set_cr4 = vmx_set_cr4, 7572 .set_efer = vmx_set_efer, 7573 .get_idt = vmx_get_idt, 7574 .set_idt = vmx_set_idt, 7575 .get_gdt = vmx_get_gdt, 7576 .set_gdt = vmx_set_gdt, 7577 .set_dr7 = vmx_set_dr7, 7578 .sync_dirty_debug_regs = vmx_sync_dirty_debug_regs, 7579 .cache_reg = vmx_cache_reg, 7580 .get_rflags = vmx_get_rflags, 7581 .set_rflags = vmx_set_rflags, 7582 7583 .tlb_flush_all = vmx_flush_tlb_all, 7584 .tlb_flush_current = vmx_flush_tlb_current, 7585 .tlb_flush_gva = vmx_flush_tlb_gva, 7586 .tlb_flush_guest = vmx_flush_tlb_guest, 7587 7588 .run = vmx_vcpu_run, 7589 .handle_exit = vmx_handle_exit, 7590 .skip_emulated_instruction = vmx_skip_emulated_instruction, 7591 .update_emulated_instruction = vmx_update_emulated_instruction, 7592 .set_interrupt_shadow = vmx_set_interrupt_shadow, 7593 .get_interrupt_shadow = vmx_get_interrupt_shadow, 7594 .patch_hypercall = vmx_patch_hypercall, 7595 .set_irq = vmx_inject_irq, 7596 .set_nmi = vmx_inject_nmi, 7597 .queue_exception = vmx_queue_exception, 7598 .cancel_injection = vmx_cancel_injection, 7599 .interrupt_allowed = vmx_interrupt_allowed, 7600 .nmi_allowed = vmx_nmi_allowed, 7601 .get_nmi_mask = vmx_get_nmi_mask, 7602 .set_nmi_mask = vmx_set_nmi_mask, 7603 .enable_nmi_window = vmx_enable_nmi_window, 7604 .enable_irq_window = vmx_enable_irq_window, 7605 .update_cr8_intercept = vmx_update_cr8_intercept, 7606 .set_virtual_apic_mode = vmx_set_virtual_apic_mode, 7607 .set_apic_access_page_addr = vmx_set_apic_access_page_addr, 7608 .refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl, 7609 .load_eoi_exitmap = vmx_load_eoi_exitmap, 7610 .apicv_post_state_restore = vmx_apicv_post_state_restore, 7611 .check_apicv_inhibit_reasons = vmx_check_apicv_inhibit_reasons, 7612 .hwapic_irr_update = vmx_hwapic_irr_update, 7613 .hwapic_isr_update = vmx_hwapic_isr_update, 7614 .guest_apic_has_interrupt = vmx_guest_apic_has_interrupt, 7615 .sync_pir_to_irr = vmx_sync_pir_to_irr, 7616 .deliver_posted_interrupt = vmx_deliver_posted_interrupt, 7617 .dy_apicv_has_pending_interrupt = pi_has_pending_interrupt, 7618 7619 .set_tss_addr = vmx_set_tss_addr, 7620 .set_identity_map_addr = vmx_set_identity_map_addr, 7621 .get_mt_mask = vmx_get_mt_mask, 7622 7623 .get_exit_info = vmx_get_exit_info, 7624 7625 .vcpu_after_set_cpuid = vmx_vcpu_after_set_cpuid, 7626 7627 .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit, 7628 7629 .get_l2_tsc_offset = vmx_get_l2_tsc_offset, 7630 .get_l2_tsc_multiplier = vmx_get_l2_tsc_multiplier, 7631 .write_tsc_offset = vmx_write_tsc_offset, 7632 .write_tsc_multiplier = vmx_write_tsc_multiplier, 7633 7634 .load_mmu_pgd = vmx_load_mmu_pgd, 7635 7636 .check_intercept = vmx_check_intercept, 7637 .handle_exit_irqoff = vmx_handle_exit_irqoff, 7638 7639 .request_immediate_exit = vmx_request_immediate_exit, 7640 7641 .sched_in = vmx_sched_in, 7642 7643 .cpu_dirty_log_size = PML_ENTITY_NUM, 7644 .update_cpu_dirty_logging = vmx_update_cpu_dirty_logging, 7645 7646 .pre_block = vmx_pre_block, 7647 .post_block = vmx_post_block, 7648 7649 .pmu_ops = &intel_pmu_ops, 7650 .nested_ops = &vmx_nested_ops, 7651 7652 .update_pi_irte = pi_update_irte, 7653 .start_assignment = vmx_pi_start_assignment, 7654 7655 #ifdef CONFIG_X86_64 7656 .set_hv_timer = vmx_set_hv_timer, 7657 .cancel_hv_timer = vmx_cancel_hv_timer, 7658 #endif 7659 7660 .setup_mce = vmx_setup_mce, 7661 7662 .smi_allowed = vmx_smi_allowed, 7663 .enter_smm = vmx_enter_smm, 7664 .leave_smm = vmx_leave_smm, 7665 .enable_smi_window = vmx_enable_smi_window, 7666 7667 .can_emulate_instruction = vmx_can_emulate_instruction, 7668 .apic_init_signal_blocked = vmx_apic_init_signal_blocked, 7669 .migrate_timers = vmx_migrate_timers, 7670 7671 .msr_filter_changed = vmx_msr_filter_changed, 7672 .complete_emulated_msr = kvm_complete_insn_gp, 7673 7674 .vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector, 7675 }; 7676 7677 static __init void vmx_setup_user_return_msrs(void) 7678 { 7679 7680 /* 7681 * Though SYSCALL is only supported in 64-bit mode on Intel CPUs, kvm 7682 * will emulate SYSCALL in legacy mode if the vendor string in guest 7683 * CPUID.0:{EBX,ECX,EDX} is "AuthenticAMD" or "AMDisbetter!" To 7684 * support this emulation, MSR_STAR is included in the list for i386, 7685 * but is never loaded into hardware. MSR_CSTAR is also never loaded 7686 * into hardware and is here purely for emulation purposes. 7687 */ 7688 const u32 vmx_uret_msrs_list[] = { 7689 #ifdef CONFIG_X86_64 7690 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, 7691 #endif 7692 MSR_EFER, MSR_TSC_AUX, MSR_STAR, 7693 MSR_IA32_TSX_CTRL, 7694 }; 7695 int i; 7696 7697 BUILD_BUG_ON(ARRAY_SIZE(vmx_uret_msrs_list) != MAX_NR_USER_RETURN_MSRS); 7698 7699 for (i = 0; i < ARRAY_SIZE(vmx_uret_msrs_list); ++i) 7700 kvm_add_user_return_msr(vmx_uret_msrs_list[i]); 7701 } 7702 7703 static __init int hardware_setup(void) 7704 { 7705 unsigned long host_bndcfgs; 7706 struct desc_ptr dt; 7707 int r, ept_lpage_level; 7708 7709 store_idt(&dt); 7710 host_idt_base = dt.address; 7711 7712 vmx_setup_user_return_msrs(); 7713 7714 if (setup_vmcs_config(&vmcs_config, &vmx_capability) < 0) 7715 return -EIO; 7716 7717 if (boot_cpu_has(X86_FEATURE_NX)) 7718 kvm_enable_efer_bits(EFER_NX); 7719 7720 if (boot_cpu_has(X86_FEATURE_MPX)) { 7721 rdmsrl(MSR_IA32_BNDCFGS, host_bndcfgs); 7722 WARN_ONCE(host_bndcfgs, "KVM: BNDCFGS in host will be lost"); 7723 } 7724 7725 if (!cpu_has_vmx_mpx()) 7726 supported_xcr0 &= ~(XFEATURE_MASK_BNDREGS | 7727 XFEATURE_MASK_BNDCSR); 7728 7729 if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() || 7730 !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global())) 7731 enable_vpid = 0; 7732 7733 if (!cpu_has_vmx_ept() || 7734 !cpu_has_vmx_ept_4levels() || 7735 !cpu_has_vmx_ept_mt_wb() || 7736 !cpu_has_vmx_invept_global()) 7737 enable_ept = 0; 7738 7739 /* NX support is required for shadow paging. */ 7740 if (!enable_ept && !boot_cpu_has(X86_FEATURE_NX)) { 7741 pr_err_ratelimited("kvm: NX (Execute Disable) not supported\n"); 7742 return -EOPNOTSUPP; 7743 } 7744 7745 if (!cpu_has_vmx_ept_ad_bits() || !enable_ept) 7746 enable_ept_ad_bits = 0; 7747 7748 if (!cpu_has_vmx_unrestricted_guest() || !enable_ept) 7749 enable_unrestricted_guest = 0; 7750 7751 if (!cpu_has_vmx_flexpriority()) 7752 flexpriority_enabled = 0; 7753 7754 if (!cpu_has_virtual_nmis()) 7755 enable_vnmi = 0; 7756 7757 /* 7758 * set_apic_access_page_addr() is used to reload apic access 7759 * page upon invalidation. No need to do anything if not 7760 * using the APIC_ACCESS_ADDR VMCS field. 7761 */ 7762 if (!flexpriority_enabled) 7763 vmx_x86_ops.set_apic_access_page_addr = NULL; 7764 7765 if (!cpu_has_vmx_tpr_shadow()) 7766 vmx_x86_ops.update_cr8_intercept = NULL; 7767 7768 #if IS_ENABLED(CONFIG_HYPERV) 7769 if (ms_hyperv.nested_features & HV_X64_NESTED_GUEST_MAPPING_FLUSH 7770 && enable_ept) { 7771 vmx_x86_ops.tlb_remote_flush = hv_remote_flush_tlb; 7772 vmx_x86_ops.tlb_remote_flush_with_range = 7773 hv_remote_flush_tlb_with_range; 7774 } 7775 #endif 7776 7777 if (!cpu_has_vmx_ple()) { 7778 ple_gap = 0; 7779 ple_window = 0; 7780 ple_window_grow = 0; 7781 ple_window_max = 0; 7782 ple_window_shrink = 0; 7783 } 7784 7785 if (!cpu_has_vmx_apicv()) 7786 enable_apicv = 0; 7787 if (!enable_apicv) 7788 vmx_x86_ops.sync_pir_to_irr = NULL; 7789 7790 if (cpu_has_vmx_tsc_scaling()) { 7791 kvm_has_tsc_control = true; 7792 kvm_max_tsc_scaling_ratio = KVM_VMX_TSC_MULTIPLIER_MAX; 7793 kvm_tsc_scaling_ratio_frac_bits = 48; 7794 } 7795 7796 kvm_has_bus_lock_exit = cpu_has_vmx_bus_lock_detection(); 7797 7798 set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */ 7799 7800 if (enable_ept) 7801 kvm_mmu_set_ept_masks(enable_ept_ad_bits, 7802 cpu_has_vmx_ept_execute_only()); 7803 7804 if (!enable_ept) 7805 ept_lpage_level = 0; 7806 else if (cpu_has_vmx_ept_1g_page()) 7807 ept_lpage_level = PG_LEVEL_1G; 7808 else if (cpu_has_vmx_ept_2m_page()) 7809 ept_lpage_level = PG_LEVEL_2M; 7810 else 7811 ept_lpage_level = PG_LEVEL_4K; 7812 kvm_configure_mmu(enable_ept, 0, vmx_get_max_tdp_level(), 7813 ept_lpage_level); 7814 7815 /* 7816 * Only enable PML when hardware supports PML feature, and both EPT 7817 * and EPT A/D bit features are enabled -- PML depends on them to work. 7818 */ 7819 if (!enable_ept || !enable_ept_ad_bits || !cpu_has_vmx_pml()) 7820 enable_pml = 0; 7821 7822 if (!enable_pml) 7823 vmx_x86_ops.cpu_dirty_log_size = 0; 7824 7825 if (!cpu_has_vmx_preemption_timer()) 7826 enable_preemption_timer = false; 7827 7828 if (enable_preemption_timer) { 7829 u64 use_timer_freq = 5000ULL * 1000 * 1000; 7830 u64 vmx_msr; 7831 7832 rdmsrl(MSR_IA32_VMX_MISC, vmx_msr); 7833 cpu_preemption_timer_multi = 7834 vmx_msr & VMX_MISC_PREEMPTION_TIMER_RATE_MASK; 7835 7836 if (tsc_khz) 7837 use_timer_freq = (u64)tsc_khz * 1000; 7838 use_timer_freq >>= cpu_preemption_timer_multi; 7839 7840 /* 7841 * KVM "disables" the preemption timer by setting it to its max 7842 * value. Don't use the timer if it might cause spurious exits 7843 * at a rate faster than 0.1 Hz (of uninterrupted guest time). 7844 */ 7845 if (use_timer_freq > 0xffffffffu / 10) 7846 enable_preemption_timer = false; 7847 } 7848 7849 if (!enable_preemption_timer) { 7850 vmx_x86_ops.set_hv_timer = NULL; 7851 vmx_x86_ops.cancel_hv_timer = NULL; 7852 vmx_x86_ops.request_immediate_exit = __kvm_request_immediate_exit; 7853 } 7854 7855 kvm_mce_cap_supported |= MCG_LMCE_P; 7856 7857 if (pt_mode != PT_MODE_SYSTEM && pt_mode != PT_MODE_HOST_GUEST) 7858 return -EINVAL; 7859 if (!enable_ept || !cpu_has_vmx_intel_pt()) 7860 pt_mode = PT_MODE_SYSTEM; 7861 7862 setup_default_sgx_lepubkeyhash(); 7863 7864 if (nested) { 7865 nested_vmx_setup_ctls_msrs(&vmcs_config.nested, 7866 vmx_capability.ept); 7867 7868 r = nested_vmx_hardware_setup(kvm_vmx_exit_handlers); 7869 if (r) 7870 return r; 7871 } 7872 7873 vmx_set_cpu_caps(); 7874 7875 r = alloc_kvm_area(); 7876 if (r) 7877 nested_vmx_hardware_unsetup(); 7878 7879 kvm_set_posted_intr_wakeup_handler(pi_wakeup_handler); 7880 7881 return r; 7882 } 7883 7884 static struct kvm_x86_init_ops vmx_init_ops __initdata = { 7885 .cpu_has_kvm_support = cpu_has_kvm_support, 7886 .disabled_by_bios = vmx_disabled_by_bios, 7887 .check_processor_compatibility = vmx_check_processor_compat, 7888 .hardware_setup = hardware_setup, 7889 7890 .runtime_ops = &vmx_x86_ops, 7891 }; 7892 7893 static void vmx_cleanup_l1d_flush(void) 7894 { 7895 if (vmx_l1d_flush_pages) { 7896 free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER); 7897 vmx_l1d_flush_pages = NULL; 7898 } 7899 /* Restore state so sysfs ignores VMX */ 7900 l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; 7901 } 7902 7903 static void vmx_exit(void) 7904 { 7905 #ifdef CONFIG_KEXEC_CORE 7906 RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL); 7907 synchronize_rcu(); 7908 #endif 7909 7910 kvm_exit(); 7911 7912 #if IS_ENABLED(CONFIG_HYPERV) 7913 if (static_branch_unlikely(&enable_evmcs)) { 7914 int cpu; 7915 struct hv_vp_assist_page *vp_ap; 7916 /* 7917 * Reset everything to support using non-enlightened VMCS 7918 * access later (e.g. when we reload the module with 7919 * enlightened_vmcs=0) 7920 */ 7921 for_each_online_cpu(cpu) { 7922 vp_ap = hv_get_vp_assist_page(cpu); 7923 7924 if (!vp_ap) 7925 continue; 7926 7927 vp_ap->nested_control.features.directhypercall = 0; 7928 vp_ap->current_nested_vmcs = 0; 7929 vp_ap->enlighten_vmentry = 0; 7930 } 7931 7932 static_branch_disable(&enable_evmcs); 7933 } 7934 #endif 7935 vmx_cleanup_l1d_flush(); 7936 7937 allow_smaller_maxphyaddr = false; 7938 } 7939 module_exit(vmx_exit); 7940 7941 static int __init vmx_init(void) 7942 { 7943 int r, cpu; 7944 7945 #if IS_ENABLED(CONFIG_HYPERV) 7946 /* 7947 * Enlightened VMCS usage should be recommended and the host needs 7948 * to support eVMCS v1 or above. We can also disable eVMCS support 7949 * with module parameter. 7950 */ 7951 if (enlightened_vmcs && 7952 ms_hyperv.hints & HV_X64_ENLIGHTENED_VMCS_RECOMMENDED && 7953 (ms_hyperv.nested_features & HV_X64_ENLIGHTENED_VMCS_VERSION) >= 7954 KVM_EVMCS_VERSION) { 7955 int cpu; 7956 7957 /* Check that we have assist pages on all online CPUs */ 7958 for_each_online_cpu(cpu) { 7959 if (!hv_get_vp_assist_page(cpu)) { 7960 enlightened_vmcs = false; 7961 break; 7962 } 7963 } 7964 7965 if (enlightened_vmcs) { 7966 pr_info("KVM: vmx: using Hyper-V Enlightened VMCS\n"); 7967 static_branch_enable(&enable_evmcs); 7968 } 7969 7970 if (ms_hyperv.nested_features & HV_X64_NESTED_DIRECT_FLUSH) 7971 vmx_x86_ops.enable_direct_tlbflush 7972 = hv_enable_direct_tlbflush; 7973 7974 } else { 7975 enlightened_vmcs = false; 7976 } 7977 #endif 7978 7979 r = kvm_init(&vmx_init_ops, sizeof(struct vcpu_vmx), 7980 __alignof__(struct vcpu_vmx), THIS_MODULE); 7981 if (r) 7982 return r; 7983 7984 /* 7985 * Must be called after kvm_init() so enable_ept is properly set 7986 * up. Hand the parameter mitigation value in which was stored in 7987 * the pre module init parser. If no parameter was given, it will 7988 * contain 'auto' which will be turned into the default 'cond' 7989 * mitigation mode. 7990 */ 7991 r = vmx_setup_l1d_flush(vmentry_l1d_flush_param); 7992 if (r) { 7993 vmx_exit(); 7994 return r; 7995 } 7996 7997 for_each_possible_cpu(cpu) { 7998 INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu)); 7999 8000 pi_init_cpu(cpu); 8001 } 8002 8003 #ifdef CONFIG_KEXEC_CORE 8004 rcu_assign_pointer(crash_vmclear_loaded_vmcss, 8005 crash_vmclear_local_loaded_vmcss); 8006 #endif 8007 vmx_check_vmcs12_offsets(); 8008 8009 /* 8010 * Shadow paging doesn't have a (further) performance penalty 8011 * from GUEST_MAXPHYADDR < HOST_MAXPHYADDR so enable it 8012 * by default 8013 */ 8014 if (!enable_ept) 8015 allow_smaller_maxphyaddr = true; 8016 8017 return 0; 8018 } 8019 module_init(vmx_init); 8020