1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Kernel-based Virtual Machine driver for Linux 4 * 5 * This module enables machines with Intel VT-x extensions to run virtual 6 * machines without emulation or binary translation. 7 * 8 * Copyright (C) 2006 Qumranet, Inc. 9 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 10 * 11 * Authors: 12 * Avi Kivity <avi@qumranet.com> 13 * Yaniv Kamay <yaniv@qumranet.com> 14 */ 15 16 #include <linux/frame.h> 17 #include <linux/highmem.h> 18 #include <linux/hrtimer.h> 19 #include <linux/kernel.h> 20 #include <linux/kvm_host.h> 21 #include <linux/module.h> 22 #include <linux/moduleparam.h> 23 #include <linux/mod_devicetable.h> 24 #include <linux/mm.h> 25 #include <linux/sched.h> 26 #include <linux/sched/smt.h> 27 #include <linux/slab.h> 28 #include <linux/tboot.h> 29 #include <linux/trace_events.h> 30 31 #include <asm/apic.h> 32 #include <asm/asm.h> 33 #include <asm/cpu.h> 34 #include <asm/debugreg.h> 35 #include <asm/desc.h> 36 #include <asm/fpu/internal.h> 37 #include <asm/io.h> 38 #include <asm/irq_remapping.h> 39 #include <asm/kexec.h> 40 #include <asm/perf_event.h> 41 #include <asm/mce.h> 42 #include <asm/mmu_context.h> 43 #include <asm/mshyperv.h> 44 #include <asm/spec-ctrl.h> 45 #include <asm/virtext.h> 46 #include <asm/vmx.h> 47 48 #include "capabilities.h" 49 #include "cpuid.h" 50 #include "evmcs.h" 51 #include "irq.h" 52 #include "kvm_cache_regs.h" 53 #include "lapic.h" 54 #include "mmu.h" 55 #include "nested.h" 56 #include "ops.h" 57 #include "pmu.h" 58 #include "trace.h" 59 #include "vmcs.h" 60 #include "vmcs12.h" 61 #include "vmx.h" 62 #include "x86.h" 63 64 MODULE_AUTHOR("Qumranet"); 65 MODULE_LICENSE("GPL"); 66 67 static const struct x86_cpu_id vmx_cpu_id[] = { 68 X86_FEATURE_MATCH(X86_FEATURE_VMX), 69 {} 70 }; 71 MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id); 72 73 bool __read_mostly enable_vpid = 1; 74 module_param_named(vpid, enable_vpid, bool, 0444); 75 76 static bool __read_mostly enable_vnmi = 1; 77 module_param_named(vnmi, enable_vnmi, bool, S_IRUGO); 78 79 bool __read_mostly flexpriority_enabled = 1; 80 module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO); 81 82 bool __read_mostly enable_ept = 1; 83 module_param_named(ept, enable_ept, bool, S_IRUGO); 84 85 bool __read_mostly enable_unrestricted_guest = 1; 86 module_param_named(unrestricted_guest, 87 enable_unrestricted_guest, bool, S_IRUGO); 88 89 bool __read_mostly enable_ept_ad_bits = 1; 90 module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO); 91 92 static bool __read_mostly emulate_invalid_guest_state = true; 93 module_param(emulate_invalid_guest_state, bool, S_IRUGO); 94 95 static bool __read_mostly fasteoi = 1; 96 module_param(fasteoi, bool, S_IRUGO); 97 98 static bool __read_mostly enable_apicv = 1; 99 module_param(enable_apicv, bool, S_IRUGO); 100 101 /* 102 * If nested=1, nested virtualization is supported, i.e., guests may use 103 * VMX and be a hypervisor for its own guests. If nested=0, guests may not 104 * use VMX instructions. 105 */ 106 static bool __read_mostly nested = 1; 107 module_param(nested, bool, S_IRUGO); 108 109 bool __read_mostly enable_pml = 1; 110 module_param_named(pml, enable_pml, bool, S_IRUGO); 111 112 static bool __read_mostly dump_invalid_vmcs = 0; 113 module_param(dump_invalid_vmcs, bool, 0644); 114 115 #define MSR_BITMAP_MODE_X2APIC 1 116 #define MSR_BITMAP_MODE_X2APIC_APICV 2 117 118 #define KVM_VMX_TSC_MULTIPLIER_MAX 0xffffffffffffffffULL 119 120 /* Guest_tsc -> host_tsc conversion requires 64-bit division. */ 121 static int __read_mostly cpu_preemption_timer_multi; 122 static bool __read_mostly enable_preemption_timer = 1; 123 #ifdef CONFIG_X86_64 124 module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO); 125 #endif 126 127 #define KVM_VM_CR0_ALWAYS_OFF (X86_CR0_NW | X86_CR0_CD) 128 #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR0_NE 129 #define KVM_VM_CR0_ALWAYS_ON \ 130 (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | \ 131 X86_CR0_WP | X86_CR0_PG | X86_CR0_PE) 132 #define KVM_CR4_GUEST_OWNED_BITS \ 133 (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \ 134 | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_TSD) 135 136 #define KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR4_VMXE 137 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) 138 #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE) 139 140 #define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM)) 141 142 #define MSR_IA32_RTIT_STATUS_MASK (~(RTIT_STATUS_FILTEREN | \ 143 RTIT_STATUS_CONTEXTEN | RTIT_STATUS_TRIGGEREN | \ 144 RTIT_STATUS_ERROR | RTIT_STATUS_STOPPED | \ 145 RTIT_STATUS_BYTECNT)) 146 147 #define MSR_IA32_RTIT_OUTPUT_BASE_MASK \ 148 (~((1UL << cpuid_query_maxphyaddr(vcpu)) - 1) | 0x7f) 149 150 /* 151 * These 2 parameters are used to config the controls for Pause-Loop Exiting: 152 * ple_gap: upper bound on the amount of time between two successive 153 * executions of PAUSE in a loop. Also indicate if ple enabled. 154 * According to test, this time is usually smaller than 128 cycles. 155 * ple_window: upper bound on the amount of time a guest is allowed to execute 156 * in a PAUSE loop. Tests indicate that most spinlocks are held for 157 * less than 2^12 cycles 158 * Time is measured based on a counter that runs at the same rate as the TSC, 159 * refer SDM volume 3b section 21.6.13 & 22.1.3. 160 */ 161 static unsigned int ple_gap = KVM_DEFAULT_PLE_GAP; 162 module_param(ple_gap, uint, 0444); 163 164 static unsigned int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW; 165 module_param(ple_window, uint, 0444); 166 167 /* Default doubles per-vcpu window every exit. */ 168 static unsigned int ple_window_grow = KVM_DEFAULT_PLE_WINDOW_GROW; 169 module_param(ple_window_grow, uint, 0444); 170 171 /* Default resets per-vcpu window every exit to ple_window. */ 172 static unsigned int ple_window_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK; 173 module_param(ple_window_shrink, uint, 0444); 174 175 /* Default is to compute the maximum so we can never overflow. */ 176 static unsigned int ple_window_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX; 177 module_param(ple_window_max, uint, 0444); 178 179 /* Default is SYSTEM mode, 1 for host-guest mode */ 180 int __read_mostly pt_mode = PT_MODE_SYSTEM; 181 module_param(pt_mode, int, S_IRUGO); 182 183 static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush); 184 static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond); 185 static DEFINE_MUTEX(vmx_l1d_flush_mutex); 186 187 /* Storage for pre module init parameter parsing */ 188 static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_L1D_FLUSH_AUTO; 189 190 static const struct { 191 const char *option; 192 bool for_parse; 193 } vmentry_l1d_param[] = { 194 [VMENTER_L1D_FLUSH_AUTO] = {"auto", true}, 195 [VMENTER_L1D_FLUSH_NEVER] = {"never", true}, 196 [VMENTER_L1D_FLUSH_COND] = {"cond", true}, 197 [VMENTER_L1D_FLUSH_ALWAYS] = {"always", true}, 198 [VMENTER_L1D_FLUSH_EPT_DISABLED] = {"EPT disabled", false}, 199 [VMENTER_L1D_FLUSH_NOT_REQUIRED] = {"not required", false}, 200 }; 201 202 #define L1D_CACHE_ORDER 4 203 static void *vmx_l1d_flush_pages; 204 205 static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf) 206 { 207 struct page *page; 208 unsigned int i; 209 210 if (!boot_cpu_has_bug(X86_BUG_L1TF)) { 211 l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED; 212 return 0; 213 } 214 215 if (!enable_ept) { 216 l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED; 217 return 0; 218 } 219 220 if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) { 221 u64 msr; 222 223 rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr); 224 if (msr & ARCH_CAP_SKIP_VMENTRY_L1DFLUSH) { 225 l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED; 226 return 0; 227 } 228 } 229 230 /* If set to auto use the default l1tf mitigation method */ 231 if (l1tf == VMENTER_L1D_FLUSH_AUTO) { 232 switch (l1tf_mitigation) { 233 case L1TF_MITIGATION_OFF: 234 l1tf = VMENTER_L1D_FLUSH_NEVER; 235 break; 236 case L1TF_MITIGATION_FLUSH_NOWARN: 237 case L1TF_MITIGATION_FLUSH: 238 case L1TF_MITIGATION_FLUSH_NOSMT: 239 l1tf = VMENTER_L1D_FLUSH_COND; 240 break; 241 case L1TF_MITIGATION_FULL: 242 case L1TF_MITIGATION_FULL_FORCE: 243 l1tf = VMENTER_L1D_FLUSH_ALWAYS; 244 break; 245 } 246 } else if (l1tf_mitigation == L1TF_MITIGATION_FULL_FORCE) { 247 l1tf = VMENTER_L1D_FLUSH_ALWAYS; 248 } 249 250 if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages && 251 !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) { 252 /* 253 * This allocation for vmx_l1d_flush_pages is not tied to a VM 254 * lifetime and so should not be charged to a memcg. 255 */ 256 page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER); 257 if (!page) 258 return -ENOMEM; 259 vmx_l1d_flush_pages = page_address(page); 260 261 /* 262 * Initialize each page with a different pattern in 263 * order to protect against KSM in the nested 264 * virtualization case. 265 */ 266 for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) { 267 memset(vmx_l1d_flush_pages + i * PAGE_SIZE, i + 1, 268 PAGE_SIZE); 269 } 270 } 271 272 l1tf_vmx_mitigation = l1tf; 273 274 if (l1tf != VMENTER_L1D_FLUSH_NEVER) 275 static_branch_enable(&vmx_l1d_should_flush); 276 else 277 static_branch_disable(&vmx_l1d_should_flush); 278 279 if (l1tf == VMENTER_L1D_FLUSH_COND) 280 static_branch_enable(&vmx_l1d_flush_cond); 281 else 282 static_branch_disable(&vmx_l1d_flush_cond); 283 return 0; 284 } 285 286 static int vmentry_l1d_flush_parse(const char *s) 287 { 288 unsigned int i; 289 290 if (s) { 291 for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) { 292 if (vmentry_l1d_param[i].for_parse && 293 sysfs_streq(s, vmentry_l1d_param[i].option)) 294 return i; 295 } 296 } 297 return -EINVAL; 298 } 299 300 static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp) 301 { 302 int l1tf, ret; 303 304 l1tf = vmentry_l1d_flush_parse(s); 305 if (l1tf < 0) 306 return l1tf; 307 308 if (!boot_cpu_has(X86_BUG_L1TF)) 309 return 0; 310 311 /* 312 * Has vmx_init() run already? If not then this is the pre init 313 * parameter parsing. In that case just store the value and let 314 * vmx_init() do the proper setup after enable_ept has been 315 * established. 316 */ 317 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) { 318 vmentry_l1d_flush_param = l1tf; 319 return 0; 320 } 321 322 mutex_lock(&vmx_l1d_flush_mutex); 323 ret = vmx_setup_l1d_flush(l1tf); 324 mutex_unlock(&vmx_l1d_flush_mutex); 325 return ret; 326 } 327 328 static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp) 329 { 330 if (WARN_ON_ONCE(l1tf_vmx_mitigation >= ARRAY_SIZE(vmentry_l1d_param))) 331 return sprintf(s, "???\n"); 332 333 return sprintf(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option); 334 } 335 336 static const struct kernel_param_ops vmentry_l1d_flush_ops = { 337 .set = vmentry_l1d_flush_set, 338 .get = vmentry_l1d_flush_get, 339 }; 340 module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, 0644); 341 342 static bool guest_state_valid(struct kvm_vcpu *vcpu); 343 static u32 vmx_segment_access_rights(struct kvm_segment *var); 344 static __always_inline void vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, 345 u32 msr, int type); 346 347 void vmx_vmexit(void); 348 349 #define vmx_insn_failed(fmt...) \ 350 do { \ 351 WARN_ONCE(1, fmt); \ 352 pr_warn_ratelimited(fmt); \ 353 } while (0) 354 355 asmlinkage void vmread_error(unsigned long field, bool fault) 356 { 357 if (fault) 358 kvm_spurious_fault(); 359 else 360 vmx_insn_failed("kvm: vmread failed: field=%lx\n", field); 361 } 362 363 noinline void vmwrite_error(unsigned long field, unsigned long value) 364 { 365 vmx_insn_failed("kvm: vmwrite failed: field=%lx val=%lx err=%d\n", 366 field, value, vmcs_read32(VM_INSTRUCTION_ERROR)); 367 } 368 369 noinline void vmclear_error(struct vmcs *vmcs, u64 phys_addr) 370 { 371 vmx_insn_failed("kvm: vmclear failed: %p/%llx\n", vmcs, phys_addr); 372 } 373 374 noinline void vmptrld_error(struct vmcs *vmcs, u64 phys_addr) 375 { 376 vmx_insn_failed("kvm: vmptrld failed: %p/%llx\n", vmcs, phys_addr); 377 } 378 379 noinline void invvpid_error(unsigned long ext, u16 vpid, gva_t gva) 380 { 381 vmx_insn_failed("kvm: invvpid failed: ext=0x%lx vpid=%u gva=0x%lx\n", 382 ext, vpid, gva); 383 } 384 385 noinline void invept_error(unsigned long ext, u64 eptp, gpa_t gpa) 386 { 387 vmx_insn_failed("kvm: invept failed: ext=0x%lx eptp=%llx gpa=0x%llx\n", 388 ext, eptp, gpa); 389 } 390 391 static DEFINE_PER_CPU(struct vmcs *, vmxarea); 392 DEFINE_PER_CPU(struct vmcs *, current_vmcs); 393 /* 394 * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed 395 * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it. 396 */ 397 static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu); 398 399 /* 400 * We maintian a per-CPU linked-list of vCPU, so in wakeup_handler() we 401 * can find which vCPU should be waken up. 402 */ 403 static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu); 404 static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock); 405 406 static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS); 407 static DEFINE_SPINLOCK(vmx_vpid_lock); 408 409 struct vmcs_config vmcs_config; 410 struct vmx_capability vmx_capability; 411 412 #define VMX_SEGMENT_FIELD(seg) \ 413 [VCPU_SREG_##seg] = { \ 414 .selector = GUEST_##seg##_SELECTOR, \ 415 .base = GUEST_##seg##_BASE, \ 416 .limit = GUEST_##seg##_LIMIT, \ 417 .ar_bytes = GUEST_##seg##_AR_BYTES, \ 418 } 419 420 static const struct kvm_vmx_segment_field { 421 unsigned selector; 422 unsigned base; 423 unsigned limit; 424 unsigned ar_bytes; 425 } kvm_vmx_segment_fields[] = { 426 VMX_SEGMENT_FIELD(CS), 427 VMX_SEGMENT_FIELD(DS), 428 VMX_SEGMENT_FIELD(ES), 429 VMX_SEGMENT_FIELD(FS), 430 VMX_SEGMENT_FIELD(GS), 431 VMX_SEGMENT_FIELD(SS), 432 VMX_SEGMENT_FIELD(TR), 433 VMX_SEGMENT_FIELD(LDTR), 434 }; 435 436 u64 host_efer; 437 static unsigned long host_idt_base; 438 439 /* 440 * Though SYSCALL is only supported in 64-bit mode on Intel CPUs, kvm 441 * will emulate SYSCALL in legacy mode if the vendor string in guest 442 * CPUID.0:{EBX,ECX,EDX} is "AuthenticAMD" or "AMDisbetter!" To 443 * support this emulation, IA32_STAR must always be included in 444 * vmx_msr_index[], even in i386 builds. 445 */ 446 const u32 vmx_msr_index[] = { 447 #ifdef CONFIG_X86_64 448 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, 449 #endif 450 MSR_EFER, MSR_TSC_AUX, MSR_STAR, 451 MSR_IA32_TSX_CTRL, 452 }; 453 454 #if IS_ENABLED(CONFIG_HYPERV) 455 static bool __read_mostly enlightened_vmcs = true; 456 module_param(enlightened_vmcs, bool, 0444); 457 458 /* check_ept_pointer() should be under protection of ept_pointer_lock. */ 459 static void check_ept_pointer_match(struct kvm *kvm) 460 { 461 struct kvm_vcpu *vcpu; 462 u64 tmp_eptp = INVALID_PAGE; 463 int i; 464 465 kvm_for_each_vcpu(i, vcpu, kvm) { 466 if (!VALID_PAGE(tmp_eptp)) { 467 tmp_eptp = to_vmx(vcpu)->ept_pointer; 468 } else if (tmp_eptp != to_vmx(vcpu)->ept_pointer) { 469 to_kvm_vmx(kvm)->ept_pointers_match 470 = EPT_POINTERS_MISMATCH; 471 return; 472 } 473 } 474 475 to_kvm_vmx(kvm)->ept_pointers_match = EPT_POINTERS_MATCH; 476 } 477 478 static int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush, 479 void *data) 480 { 481 struct kvm_tlb_range *range = data; 482 483 return hyperv_fill_flush_guest_mapping_list(flush, range->start_gfn, 484 range->pages); 485 } 486 487 static inline int __hv_remote_flush_tlb_with_range(struct kvm *kvm, 488 struct kvm_vcpu *vcpu, struct kvm_tlb_range *range) 489 { 490 u64 ept_pointer = to_vmx(vcpu)->ept_pointer; 491 492 /* 493 * FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE hypercall needs address 494 * of the base of EPT PML4 table, strip off EPT configuration 495 * information. 496 */ 497 if (range) 498 return hyperv_flush_guest_mapping_range(ept_pointer & PAGE_MASK, 499 kvm_fill_hv_flush_list_func, (void *)range); 500 else 501 return hyperv_flush_guest_mapping(ept_pointer & PAGE_MASK); 502 } 503 504 static int hv_remote_flush_tlb_with_range(struct kvm *kvm, 505 struct kvm_tlb_range *range) 506 { 507 struct kvm_vcpu *vcpu; 508 int ret = 0, i; 509 510 spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock); 511 512 if (to_kvm_vmx(kvm)->ept_pointers_match == EPT_POINTERS_CHECK) 513 check_ept_pointer_match(kvm); 514 515 if (to_kvm_vmx(kvm)->ept_pointers_match != EPT_POINTERS_MATCH) { 516 kvm_for_each_vcpu(i, vcpu, kvm) { 517 /* If ept_pointer is invalid pointer, bypass flush request. */ 518 if (VALID_PAGE(to_vmx(vcpu)->ept_pointer)) 519 ret |= __hv_remote_flush_tlb_with_range( 520 kvm, vcpu, range); 521 } 522 } else { 523 ret = __hv_remote_flush_tlb_with_range(kvm, 524 kvm_get_vcpu(kvm, 0), range); 525 } 526 527 spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock); 528 return ret; 529 } 530 static int hv_remote_flush_tlb(struct kvm *kvm) 531 { 532 return hv_remote_flush_tlb_with_range(kvm, NULL); 533 } 534 535 static int hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu) 536 { 537 struct hv_enlightened_vmcs *evmcs; 538 struct hv_partition_assist_pg **p_hv_pa_pg = 539 &vcpu->kvm->arch.hyperv.hv_pa_pg; 540 /* 541 * Synthetic VM-Exit is not enabled in current code and so All 542 * evmcs in singe VM shares same assist page. 543 */ 544 if (!*p_hv_pa_pg) 545 *p_hv_pa_pg = kzalloc(PAGE_SIZE, GFP_KERNEL); 546 547 if (!*p_hv_pa_pg) 548 return -ENOMEM; 549 550 evmcs = (struct hv_enlightened_vmcs *)to_vmx(vcpu)->loaded_vmcs->vmcs; 551 552 evmcs->partition_assist_page = 553 __pa(*p_hv_pa_pg); 554 evmcs->hv_vm_id = (unsigned long)vcpu->kvm; 555 evmcs->hv_enlightenments_control.nested_flush_hypercall = 1; 556 557 return 0; 558 } 559 560 #endif /* IS_ENABLED(CONFIG_HYPERV) */ 561 562 /* 563 * Comment's format: document - errata name - stepping - processor name. 564 * Refer from 565 * https://www.virtualbox.org/svn/vbox/trunk/src/VBox/VMM/VMMR0/HMR0.cpp 566 */ 567 static u32 vmx_preemption_cpu_tfms[] = { 568 /* 323344.pdf - BA86 - D0 - Xeon 7500 Series */ 569 0x000206E6, 570 /* 323056.pdf - AAX65 - C2 - Xeon L3406 */ 571 /* 322814.pdf - AAT59 - C2 - i7-600, i5-500, i5-400 and i3-300 Mobile */ 572 /* 322911.pdf - AAU65 - C2 - i5-600, i3-500 Desktop and Pentium G6950 */ 573 0x00020652, 574 /* 322911.pdf - AAU65 - K0 - i5-600, i3-500 Desktop and Pentium G6950 */ 575 0x00020655, 576 /* 322373.pdf - AAO95 - B1 - Xeon 3400 Series */ 577 /* 322166.pdf - AAN92 - B1 - i7-800 and i5-700 Desktop */ 578 /* 579 * 320767.pdf - AAP86 - B1 - 580 * i7-900 Mobile Extreme, i7-800 and i7-700 Mobile 581 */ 582 0x000106E5, 583 /* 321333.pdf - AAM126 - C0 - Xeon 3500 */ 584 0x000106A0, 585 /* 321333.pdf - AAM126 - C1 - Xeon 3500 */ 586 0x000106A1, 587 /* 320836.pdf - AAJ124 - C0 - i7-900 Desktop Extreme and i7-900 Desktop */ 588 0x000106A4, 589 /* 321333.pdf - AAM126 - D0 - Xeon 3500 */ 590 /* 321324.pdf - AAK139 - D0 - Xeon 5500 */ 591 /* 320836.pdf - AAJ124 - D0 - i7-900 Extreme and i7-900 Desktop */ 592 0x000106A5, 593 /* Xeon E3-1220 V2 */ 594 0x000306A8, 595 }; 596 597 static inline bool cpu_has_broken_vmx_preemption_timer(void) 598 { 599 u32 eax = cpuid_eax(0x00000001), i; 600 601 /* Clear the reserved bits */ 602 eax &= ~(0x3U << 14 | 0xfU << 28); 603 for (i = 0; i < ARRAY_SIZE(vmx_preemption_cpu_tfms); i++) 604 if (eax == vmx_preemption_cpu_tfms[i]) 605 return true; 606 607 return false; 608 } 609 610 static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu) 611 { 612 return flexpriority_enabled && lapic_in_kernel(vcpu); 613 } 614 615 static inline bool report_flexpriority(void) 616 { 617 return flexpriority_enabled; 618 } 619 620 static inline int __find_msr_index(struct vcpu_vmx *vmx, u32 msr) 621 { 622 int i; 623 624 for (i = 0; i < vmx->nmsrs; ++i) 625 if (vmx_msr_index[vmx->guest_msrs[i].index] == msr) 626 return i; 627 return -1; 628 } 629 630 struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr) 631 { 632 int i; 633 634 i = __find_msr_index(vmx, msr); 635 if (i >= 0) 636 return &vmx->guest_msrs[i]; 637 return NULL; 638 } 639 640 static int vmx_set_guest_msr(struct vcpu_vmx *vmx, struct shared_msr_entry *msr, u64 data) 641 { 642 int ret = 0; 643 644 u64 old_msr_data = msr->data; 645 msr->data = data; 646 if (msr - vmx->guest_msrs < vmx->save_nmsrs) { 647 preempt_disable(); 648 ret = kvm_set_shared_msr(msr->index, msr->data, 649 msr->mask); 650 preempt_enable(); 651 if (ret) 652 msr->data = old_msr_data; 653 } 654 return ret; 655 } 656 657 void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs) 658 { 659 vmcs_clear(loaded_vmcs->vmcs); 660 if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched) 661 vmcs_clear(loaded_vmcs->shadow_vmcs); 662 loaded_vmcs->cpu = -1; 663 loaded_vmcs->launched = 0; 664 } 665 666 #ifdef CONFIG_KEXEC_CORE 667 /* 668 * This bitmap is used to indicate whether the vmclear 669 * operation is enabled on all cpus. All disabled by 670 * default. 671 */ 672 static cpumask_t crash_vmclear_enabled_bitmap = CPU_MASK_NONE; 673 674 static inline void crash_enable_local_vmclear(int cpu) 675 { 676 cpumask_set_cpu(cpu, &crash_vmclear_enabled_bitmap); 677 } 678 679 static inline void crash_disable_local_vmclear(int cpu) 680 { 681 cpumask_clear_cpu(cpu, &crash_vmclear_enabled_bitmap); 682 } 683 684 static inline int crash_local_vmclear_enabled(int cpu) 685 { 686 return cpumask_test_cpu(cpu, &crash_vmclear_enabled_bitmap); 687 } 688 689 static void crash_vmclear_local_loaded_vmcss(void) 690 { 691 int cpu = raw_smp_processor_id(); 692 struct loaded_vmcs *v; 693 694 if (!crash_local_vmclear_enabled(cpu)) 695 return; 696 697 list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu), 698 loaded_vmcss_on_cpu_link) 699 vmcs_clear(v->vmcs); 700 } 701 #else 702 static inline void crash_enable_local_vmclear(int cpu) { } 703 static inline void crash_disable_local_vmclear(int cpu) { } 704 #endif /* CONFIG_KEXEC_CORE */ 705 706 static void __loaded_vmcs_clear(void *arg) 707 { 708 struct loaded_vmcs *loaded_vmcs = arg; 709 int cpu = raw_smp_processor_id(); 710 711 if (loaded_vmcs->cpu != cpu) 712 return; /* vcpu migration can race with cpu offline */ 713 if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs) 714 per_cpu(current_vmcs, cpu) = NULL; 715 crash_disable_local_vmclear(cpu); 716 list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link); 717 718 /* 719 * we should ensure updating loaded_vmcs->loaded_vmcss_on_cpu_link 720 * is before setting loaded_vmcs->vcpu to -1 which is done in 721 * loaded_vmcs_init. Otherwise, other cpu can see vcpu = -1 fist 722 * then adds the vmcs into percpu list before it is deleted. 723 */ 724 smp_wmb(); 725 726 loaded_vmcs_init(loaded_vmcs); 727 crash_enable_local_vmclear(cpu); 728 } 729 730 void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs) 731 { 732 int cpu = loaded_vmcs->cpu; 733 734 if (cpu != -1) 735 smp_call_function_single(cpu, 736 __loaded_vmcs_clear, loaded_vmcs, 1); 737 } 738 739 static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg, 740 unsigned field) 741 { 742 bool ret; 743 u32 mask = 1 << (seg * SEG_FIELD_NR + field); 744 745 if (!kvm_register_is_available(&vmx->vcpu, VCPU_EXREG_SEGMENTS)) { 746 kvm_register_mark_available(&vmx->vcpu, VCPU_EXREG_SEGMENTS); 747 vmx->segment_cache.bitmask = 0; 748 } 749 ret = vmx->segment_cache.bitmask & mask; 750 vmx->segment_cache.bitmask |= mask; 751 return ret; 752 } 753 754 static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg) 755 { 756 u16 *p = &vmx->segment_cache.seg[seg].selector; 757 758 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL)) 759 *p = vmcs_read16(kvm_vmx_segment_fields[seg].selector); 760 return *p; 761 } 762 763 static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg) 764 { 765 ulong *p = &vmx->segment_cache.seg[seg].base; 766 767 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE)) 768 *p = vmcs_readl(kvm_vmx_segment_fields[seg].base); 769 return *p; 770 } 771 772 static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg) 773 { 774 u32 *p = &vmx->segment_cache.seg[seg].limit; 775 776 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT)) 777 *p = vmcs_read32(kvm_vmx_segment_fields[seg].limit); 778 return *p; 779 } 780 781 static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg) 782 { 783 u32 *p = &vmx->segment_cache.seg[seg].ar; 784 785 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR)) 786 *p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes); 787 return *p; 788 } 789 790 void update_exception_bitmap(struct kvm_vcpu *vcpu) 791 { 792 u32 eb; 793 794 eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) | 795 (1u << DB_VECTOR) | (1u << AC_VECTOR); 796 /* 797 * Guest access to VMware backdoor ports could legitimately 798 * trigger #GP because of TSS I/O permission bitmap. 799 * We intercept those #GP and allow access to them anyway 800 * as VMware does. 801 */ 802 if (enable_vmware_backdoor) 803 eb |= (1u << GP_VECTOR); 804 if ((vcpu->guest_debug & 805 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) == 806 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) 807 eb |= 1u << BP_VECTOR; 808 if (to_vmx(vcpu)->rmode.vm86_active) 809 eb = ~0; 810 if (enable_ept) 811 eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */ 812 813 /* When we are running a nested L2 guest and L1 specified for it a 814 * certain exception bitmap, we must trap the same exceptions and pass 815 * them to L1. When running L2, we will only handle the exceptions 816 * specified above if L1 did not want them. 817 */ 818 if (is_guest_mode(vcpu)) 819 eb |= get_vmcs12(vcpu)->exception_bitmap; 820 821 vmcs_write32(EXCEPTION_BITMAP, eb); 822 } 823 824 /* 825 * Check if MSR is intercepted for currently loaded MSR bitmap. 826 */ 827 static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr) 828 { 829 unsigned long *msr_bitmap; 830 int f = sizeof(unsigned long); 831 832 if (!cpu_has_vmx_msr_bitmap()) 833 return true; 834 835 msr_bitmap = to_vmx(vcpu)->loaded_vmcs->msr_bitmap; 836 837 if (msr <= 0x1fff) { 838 return !!test_bit(msr, msr_bitmap + 0x800 / f); 839 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { 840 msr &= 0x1fff; 841 return !!test_bit(msr, msr_bitmap + 0xc00 / f); 842 } 843 844 return true; 845 } 846 847 static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx, 848 unsigned long entry, unsigned long exit) 849 { 850 vm_entry_controls_clearbit(vmx, entry); 851 vm_exit_controls_clearbit(vmx, exit); 852 } 853 854 int vmx_find_msr_index(struct vmx_msrs *m, u32 msr) 855 { 856 unsigned int i; 857 858 for (i = 0; i < m->nr; ++i) { 859 if (m->val[i].index == msr) 860 return i; 861 } 862 return -ENOENT; 863 } 864 865 static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) 866 { 867 int i; 868 struct msr_autoload *m = &vmx->msr_autoload; 869 870 switch (msr) { 871 case MSR_EFER: 872 if (cpu_has_load_ia32_efer()) { 873 clear_atomic_switch_msr_special(vmx, 874 VM_ENTRY_LOAD_IA32_EFER, 875 VM_EXIT_LOAD_IA32_EFER); 876 return; 877 } 878 break; 879 case MSR_CORE_PERF_GLOBAL_CTRL: 880 if (cpu_has_load_perf_global_ctrl()) { 881 clear_atomic_switch_msr_special(vmx, 882 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, 883 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL); 884 return; 885 } 886 break; 887 } 888 i = vmx_find_msr_index(&m->guest, msr); 889 if (i < 0) 890 goto skip_guest; 891 --m->guest.nr; 892 m->guest.val[i] = m->guest.val[m->guest.nr]; 893 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr); 894 895 skip_guest: 896 i = vmx_find_msr_index(&m->host, msr); 897 if (i < 0) 898 return; 899 900 --m->host.nr; 901 m->host.val[i] = m->host.val[m->host.nr]; 902 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr); 903 } 904 905 static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx, 906 unsigned long entry, unsigned long exit, 907 unsigned long guest_val_vmcs, unsigned long host_val_vmcs, 908 u64 guest_val, u64 host_val) 909 { 910 vmcs_write64(guest_val_vmcs, guest_val); 911 if (host_val_vmcs != HOST_IA32_EFER) 912 vmcs_write64(host_val_vmcs, host_val); 913 vm_entry_controls_setbit(vmx, entry); 914 vm_exit_controls_setbit(vmx, exit); 915 } 916 917 static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, 918 u64 guest_val, u64 host_val, bool entry_only) 919 { 920 int i, j = 0; 921 struct msr_autoload *m = &vmx->msr_autoload; 922 923 switch (msr) { 924 case MSR_EFER: 925 if (cpu_has_load_ia32_efer()) { 926 add_atomic_switch_msr_special(vmx, 927 VM_ENTRY_LOAD_IA32_EFER, 928 VM_EXIT_LOAD_IA32_EFER, 929 GUEST_IA32_EFER, 930 HOST_IA32_EFER, 931 guest_val, host_val); 932 return; 933 } 934 break; 935 case MSR_CORE_PERF_GLOBAL_CTRL: 936 if (cpu_has_load_perf_global_ctrl()) { 937 add_atomic_switch_msr_special(vmx, 938 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, 939 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL, 940 GUEST_IA32_PERF_GLOBAL_CTRL, 941 HOST_IA32_PERF_GLOBAL_CTRL, 942 guest_val, host_val); 943 return; 944 } 945 break; 946 case MSR_IA32_PEBS_ENABLE: 947 /* PEBS needs a quiescent period after being disabled (to write 948 * a record). Disabling PEBS through VMX MSR swapping doesn't 949 * provide that period, so a CPU could write host's record into 950 * guest's memory. 951 */ 952 wrmsrl(MSR_IA32_PEBS_ENABLE, 0); 953 } 954 955 i = vmx_find_msr_index(&m->guest, msr); 956 if (!entry_only) 957 j = vmx_find_msr_index(&m->host, msr); 958 959 if ((i < 0 && m->guest.nr == NR_LOADSTORE_MSRS) || 960 (j < 0 && m->host.nr == NR_LOADSTORE_MSRS)) { 961 printk_once(KERN_WARNING "Not enough msr switch entries. " 962 "Can't add msr %x\n", msr); 963 return; 964 } 965 if (i < 0) { 966 i = m->guest.nr++; 967 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr); 968 } 969 m->guest.val[i].index = msr; 970 m->guest.val[i].value = guest_val; 971 972 if (entry_only) 973 return; 974 975 if (j < 0) { 976 j = m->host.nr++; 977 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr); 978 } 979 m->host.val[j].index = msr; 980 m->host.val[j].value = host_val; 981 } 982 983 static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) 984 { 985 u64 guest_efer = vmx->vcpu.arch.efer; 986 u64 ignore_bits = 0; 987 988 /* Shadow paging assumes NX to be available. */ 989 if (!enable_ept) 990 guest_efer |= EFER_NX; 991 992 /* 993 * LMA and LME handled by hardware; SCE meaningless outside long mode. 994 */ 995 ignore_bits |= EFER_SCE; 996 #ifdef CONFIG_X86_64 997 ignore_bits |= EFER_LMA | EFER_LME; 998 /* SCE is meaningful only in long mode on Intel */ 999 if (guest_efer & EFER_LMA) 1000 ignore_bits &= ~(u64)EFER_SCE; 1001 #endif 1002 1003 /* 1004 * On EPT, we can't emulate NX, so we must switch EFER atomically. 1005 * On CPUs that support "load IA32_EFER", always switch EFER 1006 * atomically, since it's faster than switching it manually. 1007 */ 1008 if (cpu_has_load_ia32_efer() || 1009 (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) { 1010 if (!(guest_efer & EFER_LMA)) 1011 guest_efer &= ~EFER_LME; 1012 if (guest_efer != host_efer) 1013 add_atomic_switch_msr(vmx, MSR_EFER, 1014 guest_efer, host_efer, false); 1015 else 1016 clear_atomic_switch_msr(vmx, MSR_EFER); 1017 return false; 1018 } else { 1019 clear_atomic_switch_msr(vmx, MSR_EFER); 1020 1021 guest_efer &= ~ignore_bits; 1022 guest_efer |= host_efer & ignore_bits; 1023 1024 vmx->guest_msrs[efer_offset].data = guest_efer; 1025 vmx->guest_msrs[efer_offset].mask = ~ignore_bits; 1026 1027 return true; 1028 } 1029 } 1030 1031 #ifdef CONFIG_X86_32 1032 /* 1033 * On 32-bit kernels, VM exits still load the FS and GS bases from the 1034 * VMCS rather than the segment table. KVM uses this helper to figure 1035 * out the current bases to poke them into the VMCS before entry. 1036 */ 1037 static unsigned long segment_base(u16 selector) 1038 { 1039 struct desc_struct *table; 1040 unsigned long v; 1041 1042 if (!(selector & ~SEGMENT_RPL_MASK)) 1043 return 0; 1044 1045 table = get_current_gdt_ro(); 1046 1047 if ((selector & SEGMENT_TI_MASK) == SEGMENT_LDT) { 1048 u16 ldt_selector = kvm_read_ldt(); 1049 1050 if (!(ldt_selector & ~SEGMENT_RPL_MASK)) 1051 return 0; 1052 1053 table = (struct desc_struct *)segment_base(ldt_selector); 1054 } 1055 v = get_desc_base(&table[selector >> 3]); 1056 return v; 1057 } 1058 #endif 1059 1060 static inline void pt_load_msr(struct pt_ctx *ctx, u32 addr_range) 1061 { 1062 u32 i; 1063 1064 wrmsrl(MSR_IA32_RTIT_STATUS, ctx->status); 1065 wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base); 1066 wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask); 1067 wrmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match); 1068 for (i = 0; i < addr_range; i++) { 1069 wrmsrl(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]); 1070 wrmsrl(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]); 1071 } 1072 } 1073 1074 static inline void pt_save_msr(struct pt_ctx *ctx, u32 addr_range) 1075 { 1076 u32 i; 1077 1078 rdmsrl(MSR_IA32_RTIT_STATUS, ctx->status); 1079 rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base); 1080 rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask); 1081 rdmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match); 1082 for (i = 0; i < addr_range; i++) { 1083 rdmsrl(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]); 1084 rdmsrl(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]); 1085 } 1086 } 1087 1088 static void pt_guest_enter(struct vcpu_vmx *vmx) 1089 { 1090 if (pt_mode == PT_MODE_SYSTEM) 1091 return; 1092 1093 /* 1094 * GUEST_IA32_RTIT_CTL is already set in the VMCS. 1095 * Save host state before VM entry. 1096 */ 1097 rdmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl); 1098 if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) { 1099 wrmsrl(MSR_IA32_RTIT_CTL, 0); 1100 pt_save_msr(&vmx->pt_desc.host, vmx->pt_desc.addr_range); 1101 pt_load_msr(&vmx->pt_desc.guest, vmx->pt_desc.addr_range); 1102 } 1103 } 1104 1105 static void pt_guest_exit(struct vcpu_vmx *vmx) 1106 { 1107 if (pt_mode == PT_MODE_SYSTEM) 1108 return; 1109 1110 if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) { 1111 pt_save_msr(&vmx->pt_desc.guest, vmx->pt_desc.addr_range); 1112 pt_load_msr(&vmx->pt_desc.host, vmx->pt_desc.addr_range); 1113 } 1114 1115 /* Reload host state (IA32_RTIT_CTL will be cleared on VM exit). */ 1116 wrmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl); 1117 } 1118 1119 void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel, 1120 unsigned long fs_base, unsigned long gs_base) 1121 { 1122 if (unlikely(fs_sel != host->fs_sel)) { 1123 if (!(fs_sel & 7)) 1124 vmcs_write16(HOST_FS_SELECTOR, fs_sel); 1125 else 1126 vmcs_write16(HOST_FS_SELECTOR, 0); 1127 host->fs_sel = fs_sel; 1128 } 1129 if (unlikely(gs_sel != host->gs_sel)) { 1130 if (!(gs_sel & 7)) 1131 vmcs_write16(HOST_GS_SELECTOR, gs_sel); 1132 else 1133 vmcs_write16(HOST_GS_SELECTOR, 0); 1134 host->gs_sel = gs_sel; 1135 } 1136 if (unlikely(fs_base != host->fs_base)) { 1137 vmcs_writel(HOST_FS_BASE, fs_base); 1138 host->fs_base = fs_base; 1139 } 1140 if (unlikely(gs_base != host->gs_base)) { 1141 vmcs_writel(HOST_GS_BASE, gs_base); 1142 host->gs_base = gs_base; 1143 } 1144 } 1145 1146 void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) 1147 { 1148 struct vcpu_vmx *vmx = to_vmx(vcpu); 1149 struct vmcs_host_state *host_state; 1150 #ifdef CONFIG_X86_64 1151 int cpu = raw_smp_processor_id(); 1152 #endif 1153 unsigned long fs_base, gs_base; 1154 u16 fs_sel, gs_sel; 1155 int i; 1156 1157 vmx->req_immediate_exit = false; 1158 1159 /* 1160 * Note that guest MSRs to be saved/restored can also be changed 1161 * when guest state is loaded. This happens when guest transitions 1162 * to/from long-mode by setting MSR_EFER.LMA. 1163 */ 1164 if (!vmx->guest_msrs_ready) { 1165 vmx->guest_msrs_ready = true; 1166 for (i = 0; i < vmx->save_nmsrs; ++i) 1167 kvm_set_shared_msr(vmx->guest_msrs[i].index, 1168 vmx->guest_msrs[i].data, 1169 vmx->guest_msrs[i].mask); 1170 1171 } 1172 if (vmx->guest_state_loaded) 1173 return; 1174 1175 host_state = &vmx->loaded_vmcs->host_state; 1176 1177 /* 1178 * Set host fs and gs selectors. Unfortunately, 22.2.3 does not 1179 * allow segment selectors with cpl > 0 or ti == 1. 1180 */ 1181 host_state->ldt_sel = kvm_read_ldt(); 1182 1183 #ifdef CONFIG_X86_64 1184 savesegment(ds, host_state->ds_sel); 1185 savesegment(es, host_state->es_sel); 1186 1187 gs_base = cpu_kernelmode_gs_base(cpu); 1188 if (likely(is_64bit_mm(current->mm))) { 1189 save_fsgs_for_kvm(); 1190 fs_sel = current->thread.fsindex; 1191 gs_sel = current->thread.gsindex; 1192 fs_base = current->thread.fsbase; 1193 vmx->msr_host_kernel_gs_base = current->thread.gsbase; 1194 } else { 1195 savesegment(fs, fs_sel); 1196 savesegment(gs, gs_sel); 1197 fs_base = read_msr(MSR_FS_BASE); 1198 vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE); 1199 } 1200 1201 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); 1202 #else 1203 savesegment(fs, fs_sel); 1204 savesegment(gs, gs_sel); 1205 fs_base = segment_base(fs_sel); 1206 gs_base = segment_base(gs_sel); 1207 #endif 1208 1209 vmx_set_host_fs_gs(host_state, fs_sel, gs_sel, fs_base, gs_base); 1210 vmx->guest_state_loaded = true; 1211 } 1212 1213 static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx) 1214 { 1215 struct vmcs_host_state *host_state; 1216 1217 if (!vmx->guest_state_loaded) 1218 return; 1219 1220 host_state = &vmx->loaded_vmcs->host_state; 1221 1222 ++vmx->vcpu.stat.host_state_reload; 1223 1224 #ifdef CONFIG_X86_64 1225 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); 1226 #endif 1227 if (host_state->ldt_sel || (host_state->gs_sel & 7)) { 1228 kvm_load_ldt(host_state->ldt_sel); 1229 #ifdef CONFIG_X86_64 1230 load_gs_index(host_state->gs_sel); 1231 #else 1232 loadsegment(gs, host_state->gs_sel); 1233 #endif 1234 } 1235 if (host_state->fs_sel & 7) 1236 loadsegment(fs, host_state->fs_sel); 1237 #ifdef CONFIG_X86_64 1238 if (unlikely(host_state->ds_sel | host_state->es_sel)) { 1239 loadsegment(ds, host_state->ds_sel); 1240 loadsegment(es, host_state->es_sel); 1241 } 1242 #endif 1243 invalidate_tss_limit(); 1244 #ifdef CONFIG_X86_64 1245 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); 1246 #endif 1247 load_fixmap_gdt(raw_smp_processor_id()); 1248 vmx->guest_state_loaded = false; 1249 vmx->guest_msrs_ready = false; 1250 } 1251 1252 #ifdef CONFIG_X86_64 1253 static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx) 1254 { 1255 preempt_disable(); 1256 if (vmx->guest_state_loaded) 1257 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); 1258 preempt_enable(); 1259 return vmx->msr_guest_kernel_gs_base; 1260 } 1261 1262 static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data) 1263 { 1264 preempt_disable(); 1265 if (vmx->guest_state_loaded) 1266 wrmsrl(MSR_KERNEL_GS_BASE, data); 1267 preempt_enable(); 1268 vmx->msr_guest_kernel_gs_base = data; 1269 } 1270 #endif 1271 1272 static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu) 1273 { 1274 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); 1275 struct pi_desc old, new; 1276 unsigned int dest; 1277 1278 /* 1279 * In case of hot-plug or hot-unplug, we may have to undo 1280 * vmx_vcpu_pi_put even if there is no assigned device. And we 1281 * always keep PI.NDST up to date for simplicity: it makes the 1282 * code easier, and CPU migration is not a fast path. 1283 */ 1284 if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu) 1285 return; 1286 1287 /* 1288 * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change 1289 * PI.NDST: pi_post_block is the one expected to change PID.NDST and the 1290 * wakeup handler expects the vCPU to be on the blocked_vcpu_list that 1291 * matches PI.NDST. Otherwise, a vcpu may not be able to be woken up 1292 * correctly. 1293 */ 1294 if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR || vcpu->cpu == cpu) { 1295 pi_clear_sn(pi_desc); 1296 goto after_clear_sn; 1297 } 1298 1299 /* The full case. */ 1300 do { 1301 old.control = new.control = pi_desc->control; 1302 1303 dest = cpu_physical_id(cpu); 1304 1305 if (x2apic_enabled()) 1306 new.ndst = dest; 1307 else 1308 new.ndst = (dest << 8) & 0xFF00; 1309 1310 new.sn = 0; 1311 } while (cmpxchg64(&pi_desc->control, old.control, 1312 new.control) != old.control); 1313 1314 after_clear_sn: 1315 1316 /* 1317 * Clear SN before reading the bitmap. The VT-d firmware 1318 * writes the bitmap and reads SN atomically (5.2.3 in the 1319 * spec), so it doesn't really have a memory barrier that 1320 * pairs with this, but we cannot do that and we need one. 1321 */ 1322 smp_mb__after_atomic(); 1323 1324 if (!pi_is_pir_empty(pi_desc)) 1325 pi_set_on(pi_desc); 1326 } 1327 1328 void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu) 1329 { 1330 struct vcpu_vmx *vmx = to_vmx(vcpu); 1331 bool already_loaded = vmx->loaded_vmcs->cpu == cpu; 1332 1333 if (!already_loaded) { 1334 loaded_vmcs_clear(vmx->loaded_vmcs); 1335 local_irq_disable(); 1336 crash_disable_local_vmclear(cpu); 1337 1338 /* 1339 * Read loaded_vmcs->cpu should be before fetching 1340 * loaded_vmcs->loaded_vmcss_on_cpu_link. 1341 * See the comments in __loaded_vmcs_clear(). 1342 */ 1343 smp_rmb(); 1344 1345 list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link, 1346 &per_cpu(loaded_vmcss_on_cpu, cpu)); 1347 crash_enable_local_vmclear(cpu); 1348 local_irq_enable(); 1349 } 1350 1351 if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) { 1352 per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs; 1353 vmcs_load(vmx->loaded_vmcs->vmcs); 1354 indirect_branch_prediction_barrier(); 1355 } 1356 1357 if (!already_loaded) { 1358 void *gdt = get_current_gdt_ro(); 1359 unsigned long sysenter_esp; 1360 1361 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 1362 1363 /* 1364 * Linux uses per-cpu TSS and GDT, so set these when switching 1365 * processors. See 22.2.4. 1366 */ 1367 vmcs_writel(HOST_TR_BASE, 1368 (unsigned long)&get_cpu_entry_area(cpu)->tss.x86_tss); 1369 vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt); /* 22.2.4 */ 1370 1371 /* 1372 * VM exits change the host TR limit to 0x67 after a VM 1373 * exit. This is okay, since 0x67 covers everything except 1374 * the IO bitmap and have have code to handle the IO bitmap 1375 * being lost after a VM exit. 1376 */ 1377 BUILD_BUG_ON(IO_BITMAP_OFFSET - 1 != 0x67); 1378 1379 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp); 1380 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */ 1381 1382 vmx->loaded_vmcs->cpu = cpu; 1383 } 1384 1385 /* Setup TSC multiplier */ 1386 if (kvm_has_tsc_control && 1387 vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio) 1388 decache_tsc_multiplier(vmx); 1389 } 1390 1391 /* 1392 * Switches to specified vcpu, until a matching vcpu_put(), but assumes 1393 * vcpu mutex is already taken. 1394 */ 1395 void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 1396 { 1397 struct vcpu_vmx *vmx = to_vmx(vcpu); 1398 1399 vmx_vcpu_load_vmcs(vcpu, cpu); 1400 1401 vmx_vcpu_pi_load(vcpu, cpu); 1402 1403 vmx->host_pkru = read_pkru(); 1404 vmx->host_debugctlmsr = get_debugctlmsr(); 1405 } 1406 1407 static void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu) 1408 { 1409 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); 1410 1411 if (!kvm_arch_has_assigned_device(vcpu->kvm) || 1412 !irq_remapping_cap(IRQ_POSTING_CAP) || 1413 !kvm_vcpu_apicv_active(vcpu)) 1414 return; 1415 1416 /* Set SN when the vCPU is preempted */ 1417 if (vcpu->preempted) 1418 pi_set_sn(pi_desc); 1419 } 1420 1421 static void vmx_vcpu_put(struct kvm_vcpu *vcpu) 1422 { 1423 vmx_vcpu_pi_put(vcpu); 1424 1425 vmx_prepare_switch_to_host(to_vmx(vcpu)); 1426 } 1427 1428 static bool emulation_required(struct kvm_vcpu *vcpu) 1429 { 1430 return emulate_invalid_guest_state && !guest_state_valid(vcpu); 1431 } 1432 1433 static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu); 1434 1435 unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) 1436 { 1437 struct vcpu_vmx *vmx = to_vmx(vcpu); 1438 unsigned long rflags, save_rflags; 1439 1440 if (!kvm_register_is_available(vcpu, VCPU_EXREG_RFLAGS)) { 1441 kvm_register_mark_available(vcpu, VCPU_EXREG_RFLAGS); 1442 rflags = vmcs_readl(GUEST_RFLAGS); 1443 if (vmx->rmode.vm86_active) { 1444 rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS; 1445 save_rflags = vmx->rmode.save_rflags; 1446 rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; 1447 } 1448 vmx->rflags = rflags; 1449 } 1450 return vmx->rflags; 1451 } 1452 1453 void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) 1454 { 1455 struct vcpu_vmx *vmx = to_vmx(vcpu); 1456 unsigned long old_rflags; 1457 1458 if (enable_unrestricted_guest) { 1459 kvm_register_mark_available(vcpu, VCPU_EXREG_RFLAGS); 1460 vmx->rflags = rflags; 1461 vmcs_writel(GUEST_RFLAGS, rflags); 1462 return; 1463 } 1464 1465 old_rflags = vmx_get_rflags(vcpu); 1466 vmx->rflags = rflags; 1467 if (vmx->rmode.vm86_active) { 1468 vmx->rmode.save_rflags = rflags; 1469 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; 1470 } 1471 vmcs_writel(GUEST_RFLAGS, rflags); 1472 1473 if ((old_rflags ^ vmx->rflags) & X86_EFLAGS_VM) 1474 vmx->emulation_required = emulation_required(vcpu); 1475 } 1476 1477 u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu) 1478 { 1479 u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); 1480 int ret = 0; 1481 1482 if (interruptibility & GUEST_INTR_STATE_STI) 1483 ret |= KVM_X86_SHADOW_INT_STI; 1484 if (interruptibility & GUEST_INTR_STATE_MOV_SS) 1485 ret |= KVM_X86_SHADOW_INT_MOV_SS; 1486 1487 return ret; 1488 } 1489 1490 void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) 1491 { 1492 u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); 1493 u32 interruptibility = interruptibility_old; 1494 1495 interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS); 1496 1497 if (mask & KVM_X86_SHADOW_INT_MOV_SS) 1498 interruptibility |= GUEST_INTR_STATE_MOV_SS; 1499 else if (mask & KVM_X86_SHADOW_INT_STI) 1500 interruptibility |= GUEST_INTR_STATE_STI; 1501 1502 if ((interruptibility != interruptibility_old)) 1503 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility); 1504 } 1505 1506 static int vmx_rtit_ctl_check(struct kvm_vcpu *vcpu, u64 data) 1507 { 1508 struct vcpu_vmx *vmx = to_vmx(vcpu); 1509 unsigned long value; 1510 1511 /* 1512 * Any MSR write that attempts to change bits marked reserved will 1513 * case a #GP fault. 1514 */ 1515 if (data & vmx->pt_desc.ctl_bitmask) 1516 return 1; 1517 1518 /* 1519 * Any attempt to modify IA32_RTIT_CTL while TraceEn is set will 1520 * result in a #GP unless the same write also clears TraceEn. 1521 */ 1522 if ((vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) && 1523 ((vmx->pt_desc.guest.ctl ^ data) & ~RTIT_CTL_TRACEEN)) 1524 return 1; 1525 1526 /* 1527 * WRMSR to IA32_RTIT_CTL that sets TraceEn but clears this bit 1528 * and FabricEn would cause #GP, if 1529 * CPUID.(EAX=14H, ECX=0):ECX.SNGLRGNOUT[bit 2] = 0 1530 */ 1531 if ((data & RTIT_CTL_TRACEEN) && !(data & RTIT_CTL_TOPA) && 1532 !(data & RTIT_CTL_FABRIC_EN) && 1533 !intel_pt_validate_cap(vmx->pt_desc.caps, 1534 PT_CAP_single_range_output)) 1535 return 1; 1536 1537 /* 1538 * MTCFreq, CycThresh and PSBFreq encodings check, any MSR write that 1539 * utilize encodings marked reserved will casue a #GP fault. 1540 */ 1541 value = intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc_periods); 1542 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc) && 1543 !test_bit((data & RTIT_CTL_MTC_RANGE) >> 1544 RTIT_CTL_MTC_RANGE_OFFSET, &value)) 1545 return 1; 1546 value = intel_pt_validate_cap(vmx->pt_desc.caps, 1547 PT_CAP_cycle_thresholds); 1548 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc) && 1549 !test_bit((data & RTIT_CTL_CYC_THRESH) >> 1550 RTIT_CTL_CYC_THRESH_OFFSET, &value)) 1551 return 1; 1552 value = intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_periods); 1553 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc) && 1554 !test_bit((data & RTIT_CTL_PSB_FREQ) >> 1555 RTIT_CTL_PSB_FREQ_OFFSET, &value)) 1556 return 1; 1557 1558 /* 1559 * If ADDRx_CFG is reserved or the encodings is >2 will 1560 * cause a #GP fault. 1561 */ 1562 value = (data & RTIT_CTL_ADDR0) >> RTIT_CTL_ADDR0_OFFSET; 1563 if ((value && (vmx->pt_desc.addr_range < 1)) || (value > 2)) 1564 return 1; 1565 value = (data & RTIT_CTL_ADDR1) >> RTIT_CTL_ADDR1_OFFSET; 1566 if ((value && (vmx->pt_desc.addr_range < 2)) || (value > 2)) 1567 return 1; 1568 value = (data & RTIT_CTL_ADDR2) >> RTIT_CTL_ADDR2_OFFSET; 1569 if ((value && (vmx->pt_desc.addr_range < 3)) || (value > 2)) 1570 return 1; 1571 value = (data & RTIT_CTL_ADDR3) >> RTIT_CTL_ADDR3_OFFSET; 1572 if ((value && (vmx->pt_desc.addr_range < 4)) || (value > 2)) 1573 return 1; 1574 1575 return 0; 1576 } 1577 1578 static int skip_emulated_instruction(struct kvm_vcpu *vcpu) 1579 { 1580 unsigned long rip; 1581 1582 /* 1583 * Using VMCS.VM_EXIT_INSTRUCTION_LEN on EPT misconfig depends on 1584 * undefined behavior: Intel's SDM doesn't mandate the VMCS field be 1585 * set when EPT misconfig occurs. In practice, real hardware updates 1586 * VM_EXIT_INSTRUCTION_LEN on EPT misconfig, but other hypervisors 1587 * (namely Hyper-V) don't set it due to it being undefined behavior, 1588 * i.e. we end up advancing IP with some random value. 1589 */ 1590 if (!static_cpu_has(X86_FEATURE_HYPERVISOR) || 1591 to_vmx(vcpu)->exit_reason != EXIT_REASON_EPT_MISCONFIG) { 1592 rip = kvm_rip_read(vcpu); 1593 rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN); 1594 kvm_rip_write(vcpu, rip); 1595 } else { 1596 if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP)) 1597 return 0; 1598 } 1599 1600 /* skipping an emulated instruction also counts */ 1601 vmx_set_interrupt_shadow(vcpu, 0); 1602 1603 return 1; 1604 } 1605 1606 static void vmx_clear_hlt(struct kvm_vcpu *vcpu) 1607 { 1608 /* 1609 * Ensure that we clear the HLT state in the VMCS. We don't need to 1610 * explicitly skip the instruction because if the HLT state is set, 1611 * then the instruction is already executing and RIP has already been 1612 * advanced. 1613 */ 1614 if (kvm_hlt_in_guest(vcpu->kvm) && 1615 vmcs_read32(GUEST_ACTIVITY_STATE) == GUEST_ACTIVITY_HLT) 1616 vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE); 1617 } 1618 1619 static void vmx_queue_exception(struct kvm_vcpu *vcpu) 1620 { 1621 struct vcpu_vmx *vmx = to_vmx(vcpu); 1622 unsigned nr = vcpu->arch.exception.nr; 1623 bool has_error_code = vcpu->arch.exception.has_error_code; 1624 u32 error_code = vcpu->arch.exception.error_code; 1625 u32 intr_info = nr | INTR_INFO_VALID_MASK; 1626 1627 kvm_deliver_exception_payload(vcpu); 1628 1629 if (has_error_code) { 1630 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); 1631 intr_info |= INTR_INFO_DELIVER_CODE_MASK; 1632 } 1633 1634 if (vmx->rmode.vm86_active) { 1635 int inc_eip = 0; 1636 if (kvm_exception_is_soft(nr)) 1637 inc_eip = vcpu->arch.event_exit_inst_len; 1638 kvm_inject_realmode_interrupt(vcpu, nr, inc_eip); 1639 return; 1640 } 1641 1642 WARN_ON_ONCE(vmx->emulation_required); 1643 1644 if (kvm_exception_is_soft(nr)) { 1645 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1646 vmx->vcpu.arch.event_exit_inst_len); 1647 intr_info |= INTR_TYPE_SOFT_EXCEPTION; 1648 } else 1649 intr_info |= INTR_TYPE_HARD_EXCEPTION; 1650 1651 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info); 1652 1653 vmx_clear_hlt(vcpu); 1654 } 1655 1656 static bool vmx_rdtscp_supported(void) 1657 { 1658 return cpu_has_vmx_rdtscp(); 1659 } 1660 1661 static bool vmx_invpcid_supported(void) 1662 { 1663 return cpu_has_vmx_invpcid(); 1664 } 1665 1666 /* 1667 * Swap MSR entry in host/guest MSR entry array. 1668 */ 1669 static void move_msr_up(struct vcpu_vmx *vmx, int from, int to) 1670 { 1671 struct shared_msr_entry tmp; 1672 1673 tmp = vmx->guest_msrs[to]; 1674 vmx->guest_msrs[to] = vmx->guest_msrs[from]; 1675 vmx->guest_msrs[from] = tmp; 1676 } 1677 1678 /* 1679 * Set up the vmcs to automatically save and restore system 1680 * msrs. Don't touch the 64-bit msrs if the guest is in legacy 1681 * mode, as fiddling with msrs is very expensive. 1682 */ 1683 static void setup_msrs(struct vcpu_vmx *vmx) 1684 { 1685 int save_nmsrs, index; 1686 1687 save_nmsrs = 0; 1688 #ifdef CONFIG_X86_64 1689 /* 1690 * The SYSCALL MSRs are only needed on long mode guests, and only 1691 * when EFER.SCE is set. 1692 */ 1693 if (is_long_mode(&vmx->vcpu) && (vmx->vcpu.arch.efer & EFER_SCE)) { 1694 index = __find_msr_index(vmx, MSR_STAR); 1695 if (index >= 0) 1696 move_msr_up(vmx, index, save_nmsrs++); 1697 index = __find_msr_index(vmx, MSR_LSTAR); 1698 if (index >= 0) 1699 move_msr_up(vmx, index, save_nmsrs++); 1700 index = __find_msr_index(vmx, MSR_SYSCALL_MASK); 1701 if (index >= 0) 1702 move_msr_up(vmx, index, save_nmsrs++); 1703 } 1704 #endif 1705 index = __find_msr_index(vmx, MSR_EFER); 1706 if (index >= 0 && update_transition_efer(vmx, index)) 1707 move_msr_up(vmx, index, save_nmsrs++); 1708 index = __find_msr_index(vmx, MSR_TSC_AUX); 1709 if (index >= 0 && guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP)) 1710 move_msr_up(vmx, index, save_nmsrs++); 1711 index = __find_msr_index(vmx, MSR_IA32_TSX_CTRL); 1712 if (index >= 0) 1713 move_msr_up(vmx, index, save_nmsrs++); 1714 1715 vmx->save_nmsrs = save_nmsrs; 1716 vmx->guest_msrs_ready = false; 1717 1718 if (cpu_has_vmx_msr_bitmap()) 1719 vmx_update_msr_bitmap(&vmx->vcpu); 1720 } 1721 1722 static u64 vmx_read_l1_tsc_offset(struct kvm_vcpu *vcpu) 1723 { 1724 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 1725 1726 if (is_guest_mode(vcpu) && 1727 (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)) 1728 return vcpu->arch.tsc_offset - vmcs12->tsc_offset; 1729 1730 return vcpu->arch.tsc_offset; 1731 } 1732 1733 static u64 vmx_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) 1734 { 1735 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 1736 u64 g_tsc_offset = 0; 1737 1738 /* 1739 * We're here if L1 chose not to trap WRMSR to TSC. According 1740 * to the spec, this should set L1's TSC; The offset that L1 1741 * set for L2 remains unchanged, and still needs to be added 1742 * to the newly set TSC to get L2's TSC. 1743 */ 1744 if (is_guest_mode(vcpu) && 1745 (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)) 1746 g_tsc_offset = vmcs12->tsc_offset; 1747 1748 trace_kvm_write_tsc_offset(vcpu->vcpu_id, 1749 vcpu->arch.tsc_offset - g_tsc_offset, 1750 offset); 1751 vmcs_write64(TSC_OFFSET, offset + g_tsc_offset); 1752 return offset + g_tsc_offset; 1753 } 1754 1755 /* 1756 * nested_vmx_allowed() checks whether a guest should be allowed to use VMX 1757 * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for 1758 * all guests if the "nested" module option is off, and can also be disabled 1759 * for a single guest by disabling its VMX cpuid bit. 1760 */ 1761 bool nested_vmx_allowed(struct kvm_vcpu *vcpu) 1762 { 1763 return nested && guest_cpuid_has(vcpu, X86_FEATURE_VMX); 1764 } 1765 1766 static inline bool vmx_feature_control_msr_valid(struct kvm_vcpu *vcpu, 1767 uint64_t val) 1768 { 1769 uint64_t valid_bits = to_vmx(vcpu)->msr_ia32_feature_control_valid_bits; 1770 1771 return !(val & ~valid_bits); 1772 } 1773 1774 static int vmx_get_msr_feature(struct kvm_msr_entry *msr) 1775 { 1776 switch (msr->index) { 1777 case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: 1778 if (!nested) 1779 return 1; 1780 return vmx_get_vmx_msr(&vmcs_config.nested, msr->index, &msr->data); 1781 default: 1782 return 1; 1783 } 1784 1785 return 0; 1786 } 1787 1788 /* 1789 * Reads an msr value (of 'msr_index') into 'pdata'. 1790 * Returns 0 on success, non-0 otherwise. 1791 * Assumes vcpu_load() was already called. 1792 */ 1793 static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 1794 { 1795 struct vcpu_vmx *vmx = to_vmx(vcpu); 1796 struct shared_msr_entry *msr; 1797 u32 index; 1798 1799 switch (msr_info->index) { 1800 #ifdef CONFIG_X86_64 1801 case MSR_FS_BASE: 1802 msr_info->data = vmcs_readl(GUEST_FS_BASE); 1803 break; 1804 case MSR_GS_BASE: 1805 msr_info->data = vmcs_readl(GUEST_GS_BASE); 1806 break; 1807 case MSR_KERNEL_GS_BASE: 1808 msr_info->data = vmx_read_guest_kernel_gs_base(vmx); 1809 break; 1810 #endif 1811 case MSR_EFER: 1812 return kvm_get_msr_common(vcpu, msr_info); 1813 case MSR_IA32_TSX_CTRL: 1814 if (!msr_info->host_initiated && 1815 !(vcpu->arch.arch_capabilities & ARCH_CAP_TSX_CTRL_MSR)) 1816 return 1; 1817 goto find_shared_msr; 1818 case MSR_IA32_UMWAIT_CONTROL: 1819 if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx)) 1820 return 1; 1821 1822 msr_info->data = vmx->msr_ia32_umwait_control; 1823 break; 1824 case MSR_IA32_SPEC_CTRL: 1825 if (!msr_info->host_initiated && 1826 !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) 1827 return 1; 1828 1829 msr_info->data = to_vmx(vcpu)->spec_ctrl; 1830 break; 1831 case MSR_IA32_SYSENTER_CS: 1832 msr_info->data = vmcs_read32(GUEST_SYSENTER_CS); 1833 break; 1834 case MSR_IA32_SYSENTER_EIP: 1835 msr_info->data = vmcs_readl(GUEST_SYSENTER_EIP); 1836 break; 1837 case MSR_IA32_SYSENTER_ESP: 1838 msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP); 1839 break; 1840 case MSR_IA32_BNDCFGS: 1841 if (!kvm_mpx_supported() || 1842 (!msr_info->host_initiated && 1843 !guest_cpuid_has(vcpu, X86_FEATURE_MPX))) 1844 return 1; 1845 msr_info->data = vmcs_read64(GUEST_BNDCFGS); 1846 break; 1847 case MSR_IA32_MCG_EXT_CTL: 1848 if (!msr_info->host_initiated && 1849 !(vmx->msr_ia32_feature_control & 1850 FEATURE_CONTROL_LMCE)) 1851 return 1; 1852 msr_info->data = vcpu->arch.mcg_ext_ctl; 1853 break; 1854 case MSR_IA32_FEATURE_CONTROL: 1855 msr_info->data = vmx->msr_ia32_feature_control; 1856 break; 1857 case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: 1858 if (!nested_vmx_allowed(vcpu)) 1859 return 1; 1860 return vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index, 1861 &msr_info->data); 1862 case MSR_IA32_RTIT_CTL: 1863 if (pt_mode != PT_MODE_HOST_GUEST) 1864 return 1; 1865 msr_info->data = vmx->pt_desc.guest.ctl; 1866 break; 1867 case MSR_IA32_RTIT_STATUS: 1868 if (pt_mode != PT_MODE_HOST_GUEST) 1869 return 1; 1870 msr_info->data = vmx->pt_desc.guest.status; 1871 break; 1872 case MSR_IA32_RTIT_CR3_MATCH: 1873 if ((pt_mode != PT_MODE_HOST_GUEST) || 1874 !intel_pt_validate_cap(vmx->pt_desc.caps, 1875 PT_CAP_cr3_filtering)) 1876 return 1; 1877 msr_info->data = vmx->pt_desc.guest.cr3_match; 1878 break; 1879 case MSR_IA32_RTIT_OUTPUT_BASE: 1880 if ((pt_mode != PT_MODE_HOST_GUEST) || 1881 (!intel_pt_validate_cap(vmx->pt_desc.caps, 1882 PT_CAP_topa_output) && 1883 !intel_pt_validate_cap(vmx->pt_desc.caps, 1884 PT_CAP_single_range_output))) 1885 return 1; 1886 msr_info->data = vmx->pt_desc.guest.output_base; 1887 break; 1888 case MSR_IA32_RTIT_OUTPUT_MASK: 1889 if ((pt_mode != PT_MODE_HOST_GUEST) || 1890 (!intel_pt_validate_cap(vmx->pt_desc.caps, 1891 PT_CAP_topa_output) && 1892 !intel_pt_validate_cap(vmx->pt_desc.caps, 1893 PT_CAP_single_range_output))) 1894 return 1; 1895 msr_info->data = vmx->pt_desc.guest.output_mask; 1896 break; 1897 case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: 1898 index = msr_info->index - MSR_IA32_RTIT_ADDR0_A; 1899 if ((pt_mode != PT_MODE_HOST_GUEST) || 1900 (index >= 2 * intel_pt_validate_cap(vmx->pt_desc.caps, 1901 PT_CAP_num_address_ranges))) 1902 return 1; 1903 if (index % 2) 1904 msr_info->data = vmx->pt_desc.guest.addr_b[index / 2]; 1905 else 1906 msr_info->data = vmx->pt_desc.guest.addr_a[index / 2]; 1907 break; 1908 case MSR_TSC_AUX: 1909 if (!msr_info->host_initiated && 1910 !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP)) 1911 return 1; 1912 goto find_shared_msr; 1913 default: 1914 find_shared_msr: 1915 msr = find_msr_entry(vmx, msr_info->index); 1916 if (msr) { 1917 msr_info->data = msr->data; 1918 break; 1919 } 1920 return kvm_get_msr_common(vcpu, msr_info); 1921 } 1922 1923 return 0; 1924 } 1925 1926 /* 1927 * Writes msr value into into the appropriate "register". 1928 * Returns 0 on success, non-0 otherwise. 1929 * Assumes vcpu_load() was already called. 1930 */ 1931 static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 1932 { 1933 struct vcpu_vmx *vmx = to_vmx(vcpu); 1934 struct shared_msr_entry *msr; 1935 int ret = 0; 1936 u32 msr_index = msr_info->index; 1937 u64 data = msr_info->data; 1938 u32 index; 1939 1940 switch (msr_index) { 1941 case MSR_EFER: 1942 ret = kvm_set_msr_common(vcpu, msr_info); 1943 break; 1944 #ifdef CONFIG_X86_64 1945 case MSR_FS_BASE: 1946 vmx_segment_cache_clear(vmx); 1947 vmcs_writel(GUEST_FS_BASE, data); 1948 break; 1949 case MSR_GS_BASE: 1950 vmx_segment_cache_clear(vmx); 1951 vmcs_writel(GUEST_GS_BASE, data); 1952 break; 1953 case MSR_KERNEL_GS_BASE: 1954 vmx_write_guest_kernel_gs_base(vmx, data); 1955 break; 1956 #endif 1957 case MSR_IA32_SYSENTER_CS: 1958 if (is_guest_mode(vcpu)) 1959 get_vmcs12(vcpu)->guest_sysenter_cs = data; 1960 vmcs_write32(GUEST_SYSENTER_CS, data); 1961 break; 1962 case MSR_IA32_SYSENTER_EIP: 1963 if (is_guest_mode(vcpu)) 1964 get_vmcs12(vcpu)->guest_sysenter_eip = data; 1965 vmcs_writel(GUEST_SYSENTER_EIP, data); 1966 break; 1967 case MSR_IA32_SYSENTER_ESP: 1968 if (is_guest_mode(vcpu)) 1969 get_vmcs12(vcpu)->guest_sysenter_esp = data; 1970 vmcs_writel(GUEST_SYSENTER_ESP, data); 1971 break; 1972 case MSR_IA32_DEBUGCTLMSR: 1973 if (is_guest_mode(vcpu) && get_vmcs12(vcpu)->vm_exit_controls & 1974 VM_EXIT_SAVE_DEBUG_CONTROLS) 1975 get_vmcs12(vcpu)->guest_ia32_debugctl = data; 1976 1977 ret = kvm_set_msr_common(vcpu, msr_info); 1978 break; 1979 1980 case MSR_IA32_BNDCFGS: 1981 if (!kvm_mpx_supported() || 1982 (!msr_info->host_initiated && 1983 !guest_cpuid_has(vcpu, X86_FEATURE_MPX))) 1984 return 1; 1985 if (is_noncanonical_address(data & PAGE_MASK, vcpu) || 1986 (data & MSR_IA32_BNDCFGS_RSVD)) 1987 return 1; 1988 vmcs_write64(GUEST_BNDCFGS, data); 1989 break; 1990 case MSR_IA32_UMWAIT_CONTROL: 1991 if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx)) 1992 return 1; 1993 1994 /* The reserved bit 1 and non-32 bit [63:32] should be zero */ 1995 if (data & (BIT_ULL(1) | GENMASK_ULL(63, 32))) 1996 return 1; 1997 1998 vmx->msr_ia32_umwait_control = data; 1999 break; 2000 case MSR_IA32_SPEC_CTRL: 2001 if (!msr_info->host_initiated && 2002 !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) 2003 return 1; 2004 2005 /* The STIBP bit doesn't fault even if it's not advertised */ 2006 if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD)) 2007 return 1; 2008 2009 vmx->spec_ctrl = data; 2010 2011 if (!data) 2012 break; 2013 2014 /* 2015 * For non-nested: 2016 * When it's written (to non-zero) for the first time, pass 2017 * it through. 2018 * 2019 * For nested: 2020 * The handling of the MSR bitmap for L2 guests is done in 2021 * nested_vmx_merge_msr_bitmap. We should not touch the 2022 * vmcs02.msr_bitmap here since it gets completely overwritten 2023 * in the merging. We update the vmcs01 here for L1 as well 2024 * since it will end up touching the MSR anyway now. 2025 */ 2026 vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, 2027 MSR_IA32_SPEC_CTRL, 2028 MSR_TYPE_RW); 2029 break; 2030 case MSR_IA32_TSX_CTRL: 2031 if (!msr_info->host_initiated && 2032 !(vcpu->arch.arch_capabilities & ARCH_CAP_TSX_CTRL_MSR)) 2033 return 1; 2034 if (data & ~(TSX_CTRL_RTM_DISABLE | TSX_CTRL_CPUID_CLEAR)) 2035 return 1; 2036 goto find_shared_msr; 2037 case MSR_IA32_PRED_CMD: 2038 if (!msr_info->host_initiated && 2039 !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) 2040 return 1; 2041 2042 if (data & ~PRED_CMD_IBPB) 2043 return 1; 2044 2045 if (!data) 2046 break; 2047 2048 wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB); 2049 2050 /* 2051 * For non-nested: 2052 * When it's written (to non-zero) for the first time, pass 2053 * it through. 2054 * 2055 * For nested: 2056 * The handling of the MSR bitmap for L2 guests is done in 2057 * nested_vmx_merge_msr_bitmap. We should not touch the 2058 * vmcs02.msr_bitmap here since it gets completely overwritten 2059 * in the merging. 2060 */ 2061 vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, MSR_IA32_PRED_CMD, 2062 MSR_TYPE_W); 2063 break; 2064 case MSR_IA32_CR_PAT: 2065 if (!kvm_pat_valid(data)) 2066 return 1; 2067 2068 if (is_guest_mode(vcpu) && 2069 get_vmcs12(vcpu)->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT) 2070 get_vmcs12(vcpu)->guest_ia32_pat = data; 2071 2072 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { 2073 vmcs_write64(GUEST_IA32_PAT, data); 2074 vcpu->arch.pat = data; 2075 break; 2076 } 2077 ret = kvm_set_msr_common(vcpu, msr_info); 2078 break; 2079 case MSR_IA32_TSC_ADJUST: 2080 ret = kvm_set_msr_common(vcpu, msr_info); 2081 break; 2082 case MSR_IA32_MCG_EXT_CTL: 2083 if ((!msr_info->host_initiated && 2084 !(to_vmx(vcpu)->msr_ia32_feature_control & 2085 FEATURE_CONTROL_LMCE)) || 2086 (data & ~MCG_EXT_CTL_LMCE_EN)) 2087 return 1; 2088 vcpu->arch.mcg_ext_ctl = data; 2089 break; 2090 case MSR_IA32_FEATURE_CONTROL: 2091 if (!vmx_feature_control_msr_valid(vcpu, data) || 2092 (to_vmx(vcpu)->msr_ia32_feature_control & 2093 FEATURE_CONTROL_LOCKED && !msr_info->host_initiated)) 2094 return 1; 2095 vmx->msr_ia32_feature_control = data; 2096 if (msr_info->host_initiated && data == 0) 2097 vmx_leave_nested(vcpu); 2098 break; 2099 case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: 2100 if (!msr_info->host_initiated) 2101 return 1; /* they are read-only */ 2102 if (!nested_vmx_allowed(vcpu)) 2103 return 1; 2104 return vmx_set_vmx_msr(vcpu, msr_index, data); 2105 case MSR_IA32_RTIT_CTL: 2106 if ((pt_mode != PT_MODE_HOST_GUEST) || 2107 vmx_rtit_ctl_check(vcpu, data) || 2108 vmx->nested.vmxon) 2109 return 1; 2110 vmcs_write64(GUEST_IA32_RTIT_CTL, data); 2111 vmx->pt_desc.guest.ctl = data; 2112 pt_update_intercept_for_msr(vmx); 2113 break; 2114 case MSR_IA32_RTIT_STATUS: 2115 if ((pt_mode != PT_MODE_HOST_GUEST) || 2116 (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) || 2117 (data & MSR_IA32_RTIT_STATUS_MASK)) 2118 return 1; 2119 vmx->pt_desc.guest.status = data; 2120 break; 2121 case MSR_IA32_RTIT_CR3_MATCH: 2122 if ((pt_mode != PT_MODE_HOST_GUEST) || 2123 (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) || 2124 !intel_pt_validate_cap(vmx->pt_desc.caps, 2125 PT_CAP_cr3_filtering)) 2126 return 1; 2127 vmx->pt_desc.guest.cr3_match = data; 2128 break; 2129 case MSR_IA32_RTIT_OUTPUT_BASE: 2130 if ((pt_mode != PT_MODE_HOST_GUEST) || 2131 (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) || 2132 (!intel_pt_validate_cap(vmx->pt_desc.caps, 2133 PT_CAP_topa_output) && 2134 !intel_pt_validate_cap(vmx->pt_desc.caps, 2135 PT_CAP_single_range_output)) || 2136 (data & MSR_IA32_RTIT_OUTPUT_BASE_MASK)) 2137 return 1; 2138 vmx->pt_desc.guest.output_base = data; 2139 break; 2140 case MSR_IA32_RTIT_OUTPUT_MASK: 2141 if ((pt_mode != PT_MODE_HOST_GUEST) || 2142 (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) || 2143 (!intel_pt_validate_cap(vmx->pt_desc.caps, 2144 PT_CAP_topa_output) && 2145 !intel_pt_validate_cap(vmx->pt_desc.caps, 2146 PT_CAP_single_range_output))) 2147 return 1; 2148 vmx->pt_desc.guest.output_mask = data; 2149 break; 2150 case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: 2151 index = msr_info->index - MSR_IA32_RTIT_ADDR0_A; 2152 if ((pt_mode != PT_MODE_HOST_GUEST) || 2153 (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) || 2154 (index >= 2 * intel_pt_validate_cap(vmx->pt_desc.caps, 2155 PT_CAP_num_address_ranges))) 2156 return 1; 2157 if (index % 2) 2158 vmx->pt_desc.guest.addr_b[index / 2] = data; 2159 else 2160 vmx->pt_desc.guest.addr_a[index / 2] = data; 2161 break; 2162 case MSR_TSC_AUX: 2163 if (!msr_info->host_initiated && 2164 !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP)) 2165 return 1; 2166 /* Check reserved bit, higher 32 bits should be zero */ 2167 if ((data >> 32) != 0) 2168 return 1; 2169 goto find_shared_msr; 2170 2171 default: 2172 find_shared_msr: 2173 msr = find_msr_entry(vmx, msr_index); 2174 if (msr) 2175 ret = vmx_set_guest_msr(vmx, msr, data); 2176 else 2177 ret = kvm_set_msr_common(vcpu, msr_info); 2178 } 2179 2180 return ret; 2181 } 2182 2183 static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) 2184 { 2185 kvm_register_mark_available(vcpu, reg); 2186 2187 switch (reg) { 2188 case VCPU_REGS_RSP: 2189 vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP); 2190 break; 2191 case VCPU_REGS_RIP: 2192 vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP); 2193 break; 2194 case VCPU_EXREG_PDPTR: 2195 if (enable_ept) 2196 ept_save_pdptrs(vcpu); 2197 break; 2198 case VCPU_EXREG_CR3: 2199 if (enable_unrestricted_guest || (enable_ept && is_paging(vcpu))) 2200 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); 2201 break; 2202 default: 2203 WARN_ON_ONCE(1); 2204 break; 2205 } 2206 } 2207 2208 static __init int cpu_has_kvm_support(void) 2209 { 2210 return cpu_has_vmx(); 2211 } 2212 2213 static __init int vmx_disabled_by_bios(void) 2214 { 2215 u64 msr; 2216 2217 rdmsrl(MSR_IA32_FEATURE_CONTROL, msr); 2218 if (msr & FEATURE_CONTROL_LOCKED) { 2219 /* launched w/ TXT and VMX disabled */ 2220 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX) 2221 && tboot_enabled()) 2222 return 1; 2223 /* launched w/o TXT and VMX only enabled w/ TXT */ 2224 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX) 2225 && (msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX) 2226 && !tboot_enabled()) { 2227 printk(KERN_WARNING "kvm: disable TXT in the BIOS or " 2228 "activate TXT before enabling KVM\n"); 2229 return 1; 2230 } 2231 /* launched w/o TXT and VMX disabled */ 2232 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX) 2233 && !tboot_enabled()) 2234 return 1; 2235 } 2236 2237 return 0; 2238 } 2239 2240 static void kvm_cpu_vmxon(u64 addr) 2241 { 2242 cr4_set_bits(X86_CR4_VMXE); 2243 intel_pt_handle_vmx(1); 2244 2245 asm volatile ("vmxon %0" : : "m"(addr)); 2246 } 2247 2248 static int hardware_enable(void) 2249 { 2250 int cpu = raw_smp_processor_id(); 2251 u64 phys_addr = __pa(per_cpu(vmxarea, cpu)); 2252 u64 old, test_bits; 2253 2254 if (cr4_read_shadow() & X86_CR4_VMXE) 2255 return -EBUSY; 2256 2257 /* 2258 * This can happen if we hot-added a CPU but failed to allocate 2259 * VP assist page for it. 2260 */ 2261 if (static_branch_unlikely(&enable_evmcs) && 2262 !hv_get_vp_assist_page(cpu)) 2263 return -EFAULT; 2264 2265 INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu)); 2266 INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu)); 2267 spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); 2268 2269 /* 2270 * Now we can enable the vmclear operation in kdump 2271 * since the loaded_vmcss_on_cpu list on this cpu 2272 * has been initialized. 2273 * 2274 * Though the cpu is not in VMX operation now, there 2275 * is no problem to enable the vmclear operation 2276 * for the loaded_vmcss_on_cpu list is empty! 2277 */ 2278 crash_enable_local_vmclear(cpu); 2279 2280 rdmsrl(MSR_IA32_FEATURE_CONTROL, old); 2281 2282 test_bits = FEATURE_CONTROL_LOCKED; 2283 test_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; 2284 if (tboot_enabled()) 2285 test_bits |= FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX; 2286 2287 if ((old & test_bits) != test_bits) { 2288 /* enable and lock */ 2289 wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits); 2290 } 2291 kvm_cpu_vmxon(phys_addr); 2292 if (enable_ept) 2293 ept_sync_global(); 2294 2295 return 0; 2296 } 2297 2298 static void vmclear_local_loaded_vmcss(void) 2299 { 2300 int cpu = raw_smp_processor_id(); 2301 struct loaded_vmcs *v, *n; 2302 2303 list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu), 2304 loaded_vmcss_on_cpu_link) 2305 __loaded_vmcs_clear(v); 2306 } 2307 2308 2309 /* Just like cpu_vmxoff(), but with the __kvm_handle_fault_on_reboot() 2310 * tricks. 2311 */ 2312 static void kvm_cpu_vmxoff(void) 2313 { 2314 asm volatile (__ex("vmxoff")); 2315 2316 intel_pt_handle_vmx(0); 2317 cr4_clear_bits(X86_CR4_VMXE); 2318 } 2319 2320 static void hardware_disable(void) 2321 { 2322 vmclear_local_loaded_vmcss(); 2323 kvm_cpu_vmxoff(); 2324 } 2325 2326 static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, 2327 u32 msr, u32 *result) 2328 { 2329 u32 vmx_msr_low, vmx_msr_high; 2330 u32 ctl = ctl_min | ctl_opt; 2331 2332 rdmsr(msr, vmx_msr_low, vmx_msr_high); 2333 2334 ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */ 2335 ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */ 2336 2337 /* Ensure minimum (required) set of control bits are supported. */ 2338 if (ctl_min & ~ctl) 2339 return -EIO; 2340 2341 *result = ctl; 2342 return 0; 2343 } 2344 2345 static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf, 2346 struct vmx_capability *vmx_cap) 2347 { 2348 u32 vmx_msr_low, vmx_msr_high; 2349 u32 min, opt, min2, opt2; 2350 u32 _pin_based_exec_control = 0; 2351 u32 _cpu_based_exec_control = 0; 2352 u32 _cpu_based_2nd_exec_control = 0; 2353 u32 _vmexit_control = 0; 2354 u32 _vmentry_control = 0; 2355 2356 memset(vmcs_conf, 0, sizeof(*vmcs_conf)); 2357 min = CPU_BASED_HLT_EXITING | 2358 #ifdef CONFIG_X86_64 2359 CPU_BASED_CR8_LOAD_EXITING | 2360 CPU_BASED_CR8_STORE_EXITING | 2361 #endif 2362 CPU_BASED_CR3_LOAD_EXITING | 2363 CPU_BASED_CR3_STORE_EXITING | 2364 CPU_BASED_UNCOND_IO_EXITING | 2365 CPU_BASED_MOV_DR_EXITING | 2366 CPU_BASED_USE_TSC_OFFSETING | 2367 CPU_BASED_MWAIT_EXITING | 2368 CPU_BASED_MONITOR_EXITING | 2369 CPU_BASED_INVLPG_EXITING | 2370 CPU_BASED_RDPMC_EXITING; 2371 2372 opt = CPU_BASED_TPR_SHADOW | 2373 CPU_BASED_USE_MSR_BITMAPS | 2374 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; 2375 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS, 2376 &_cpu_based_exec_control) < 0) 2377 return -EIO; 2378 #ifdef CONFIG_X86_64 2379 if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW)) 2380 _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING & 2381 ~CPU_BASED_CR8_STORE_EXITING; 2382 #endif 2383 if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) { 2384 min2 = 0; 2385 opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2386 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2387 SECONDARY_EXEC_WBINVD_EXITING | 2388 SECONDARY_EXEC_ENABLE_VPID | 2389 SECONDARY_EXEC_ENABLE_EPT | 2390 SECONDARY_EXEC_UNRESTRICTED_GUEST | 2391 SECONDARY_EXEC_PAUSE_LOOP_EXITING | 2392 SECONDARY_EXEC_DESC | 2393 SECONDARY_EXEC_RDTSCP | 2394 SECONDARY_EXEC_ENABLE_INVPCID | 2395 SECONDARY_EXEC_APIC_REGISTER_VIRT | 2396 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2397 SECONDARY_EXEC_SHADOW_VMCS | 2398 SECONDARY_EXEC_XSAVES | 2399 SECONDARY_EXEC_RDSEED_EXITING | 2400 SECONDARY_EXEC_RDRAND_EXITING | 2401 SECONDARY_EXEC_ENABLE_PML | 2402 SECONDARY_EXEC_TSC_SCALING | 2403 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE | 2404 SECONDARY_EXEC_PT_USE_GPA | 2405 SECONDARY_EXEC_PT_CONCEAL_VMX | 2406 SECONDARY_EXEC_ENABLE_VMFUNC | 2407 SECONDARY_EXEC_ENCLS_EXITING; 2408 if (adjust_vmx_controls(min2, opt2, 2409 MSR_IA32_VMX_PROCBASED_CTLS2, 2410 &_cpu_based_2nd_exec_control) < 0) 2411 return -EIO; 2412 } 2413 #ifndef CONFIG_X86_64 2414 if (!(_cpu_based_2nd_exec_control & 2415 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) 2416 _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW; 2417 #endif 2418 2419 if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW)) 2420 _cpu_based_2nd_exec_control &= ~( 2421 SECONDARY_EXEC_APIC_REGISTER_VIRT | 2422 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2423 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); 2424 2425 rdmsr_safe(MSR_IA32_VMX_EPT_VPID_CAP, 2426 &vmx_cap->ept, &vmx_cap->vpid); 2427 2428 if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) { 2429 /* CR3 accesses and invlpg don't need to cause VM Exits when EPT 2430 enabled */ 2431 _cpu_based_exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING | 2432 CPU_BASED_CR3_STORE_EXITING | 2433 CPU_BASED_INVLPG_EXITING); 2434 } else if (vmx_cap->ept) { 2435 vmx_cap->ept = 0; 2436 pr_warn_once("EPT CAP should not exist if not support " 2437 "1-setting enable EPT VM-execution control\n"); 2438 } 2439 if (!(_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_VPID) && 2440 vmx_cap->vpid) { 2441 vmx_cap->vpid = 0; 2442 pr_warn_once("VPID CAP should not exist if not support " 2443 "1-setting enable VPID VM-execution control\n"); 2444 } 2445 2446 min = VM_EXIT_SAVE_DEBUG_CONTROLS | VM_EXIT_ACK_INTR_ON_EXIT; 2447 #ifdef CONFIG_X86_64 2448 min |= VM_EXIT_HOST_ADDR_SPACE_SIZE; 2449 #endif 2450 opt = VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2451 VM_EXIT_LOAD_IA32_PAT | 2452 VM_EXIT_LOAD_IA32_EFER | 2453 VM_EXIT_CLEAR_BNDCFGS | 2454 VM_EXIT_PT_CONCEAL_PIP | 2455 VM_EXIT_CLEAR_IA32_RTIT_CTL; 2456 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS, 2457 &_vmexit_control) < 0) 2458 return -EIO; 2459 2460 min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING; 2461 opt = PIN_BASED_VIRTUAL_NMIS | PIN_BASED_POSTED_INTR | 2462 PIN_BASED_VMX_PREEMPTION_TIMER; 2463 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS, 2464 &_pin_based_exec_control) < 0) 2465 return -EIO; 2466 2467 if (cpu_has_broken_vmx_preemption_timer()) 2468 _pin_based_exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER; 2469 if (!(_cpu_based_2nd_exec_control & 2470 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)) 2471 _pin_based_exec_control &= ~PIN_BASED_POSTED_INTR; 2472 2473 min = VM_ENTRY_LOAD_DEBUG_CONTROLS; 2474 opt = VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | 2475 VM_ENTRY_LOAD_IA32_PAT | 2476 VM_ENTRY_LOAD_IA32_EFER | 2477 VM_ENTRY_LOAD_BNDCFGS | 2478 VM_ENTRY_PT_CONCEAL_PIP | 2479 VM_ENTRY_LOAD_IA32_RTIT_CTL; 2480 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS, 2481 &_vmentry_control) < 0) 2482 return -EIO; 2483 2484 /* 2485 * Some cpus support VM_{ENTRY,EXIT}_IA32_PERF_GLOBAL_CTRL but they 2486 * can't be used due to an errata where VM Exit may incorrectly clear 2487 * IA32_PERF_GLOBAL_CTRL[34:32]. Workaround the errata by using the 2488 * MSR load mechanism to switch IA32_PERF_GLOBAL_CTRL. 2489 */ 2490 if (boot_cpu_data.x86 == 0x6) { 2491 switch (boot_cpu_data.x86_model) { 2492 case 26: /* AAK155 */ 2493 case 30: /* AAP115 */ 2494 case 37: /* AAT100 */ 2495 case 44: /* BC86,AAY89,BD102 */ 2496 case 46: /* BA97 */ 2497 _vmentry_control &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL; 2498 _vmexit_control &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL; 2499 pr_warn_once("kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL " 2500 "does not work properly. Using workaround\n"); 2501 break; 2502 default: 2503 break; 2504 } 2505 } 2506 2507 2508 rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high); 2509 2510 /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */ 2511 if ((vmx_msr_high & 0x1fff) > PAGE_SIZE) 2512 return -EIO; 2513 2514 #ifdef CONFIG_X86_64 2515 /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */ 2516 if (vmx_msr_high & (1u<<16)) 2517 return -EIO; 2518 #endif 2519 2520 /* Require Write-Back (WB) memory type for VMCS accesses. */ 2521 if (((vmx_msr_high >> 18) & 15) != 6) 2522 return -EIO; 2523 2524 vmcs_conf->size = vmx_msr_high & 0x1fff; 2525 vmcs_conf->order = get_order(vmcs_conf->size); 2526 vmcs_conf->basic_cap = vmx_msr_high & ~0x1fff; 2527 2528 vmcs_conf->revision_id = vmx_msr_low; 2529 2530 vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control; 2531 vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control; 2532 vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control; 2533 vmcs_conf->vmexit_ctrl = _vmexit_control; 2534 vmcs_conf->vmentry_ctrl = _vmentry_control; 2535 2536 if (static_branch_unlikely(&enable_evmcs)) 2537 evmcs_sanitize_exec_ctrls(vmcs_conf); 2538 2539 return 0; 2540 } 2541 2542 struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags) 2543 { 2544 int node = cpu_to_node(cpu); 2545 struct page *pages; 2546 struct vmcs *vmcs; 2547 2548 pages = __alloc_pages_node(node, flags, vmcs_config.order); 2549 if (!pages) 2550 return NULL; 2551 vmcs = page_address(pages); 2552 memset(vmcs, 0, vmcs_config.size); 2553 2554 /* KVM supports Enlightened VMCS v1 only */ 2555 if (static_branch_unlikely(&enable_evmcs)) 2556 vmcs->hdr.revision_id = KVM_EVMCS_VERSION; 2557 else 2558 vmcs->hdr.revision_id = vmcs_config.revision_id; 2559 2560 if (shadow) 2561 vmcs->hdr.shadow_vmcs = 1; 2562 return vmcs; 2563 } 2564 2565 void free_vmcs(struct vmcs *vmcs) 2566 { 2567 free_pages((unsigned long)vmcs, vmcs_config.order); 2568 } 2569 2570 /* 2571 * Free a VMCS, but before that VMCLEAR it on the CPU where it was last loaded 2572 */ 2573 void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs) 2574 { 2575 if (!loaded_vmcs->vmcs) 2576 return; 2577 loaded_vmcs_clear(loaded_vmcs); 2578 free_vmcs(loaded_vmcs->vmcs); 2579 loaded_vmcs->vmcs = NULL; 2580 if (loaded_vmcs->msr_bitmap) 2581 free_page((unsigned long)loaded_vmcs->msr_bitmap); 2582 WARN_ON(loaded_vmcs->shadow_vmcs != NULL); 2583 } 2584 2585 int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs) 2586 { 2587 loaded_vmcs->vmcs = alloc_vmcs(false); 2588 if (!loaded_vmcs->vmcs) 2589 return -ENOMEM; 2590 2591 loaded_vmcs->shadow_vmcs = NULL; 2592 loaded_vmcs->hv_timer_soft_disabled = false; 2593 loaded_vmcs_init(loaded_vmcs); 2594 2595 if (cpu_has_vmx_msr_bitmap()) { 2596 loaded_vmcs->msr_bitmap = (unsigned long *) 2597 __get_free_page(GFP_KERNEL_ACCOUNT); 2598 if (!loaded_vmcs->msr_bitmap) 2599 goto out_vmcs; 2600 memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE); 2601 2602 if (IS_ENABLED(CONFIG_HYPERV) && 2603 static_branch_unlikely(&enable_evmcs) && 2604 (ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) { 2605 struct hv_enlightened_vmcs *evmcs = 2606 (struct hv_enlightened_vmcs *)loaded_vmcs->vmcs; 2607 2608 evmcs->hv_enlightenments_control.msr_bitmap = 1; 2609 } 2610 } 2611 2612 memset(&loaded_vmcs->host_state, 0, sizeof(struct vmcs_host_state)); 2613 memset(&loaded_vmcs->controls_shadow, 0, 2614 sizeof(struct vmcs_controls_shadow)); 2615 2616 return 0; 2617 2618 out_vmcs: 2619 free_loaded_vmcs(loaded_vmcs); 2620 return -ENOMEM; 2621 } 2622 2623 static void free_kvm_area(void) 2624 { 2625 int cpu; 2626 2627 for_each_possible_cpu(cpu) { 2628 free_vmcs(per_cpu(vmxarea, cpu)); 2629 per_cpu(vmxarea, cpu) = NULL; 2630 } 2631 } 2632 2633 static __init int alloc_kvm_area(void) 2634 { 2635 int cpu; 2636 2637 for_each_possible_cpu(cpu) { 2638 struct vmcs *vmcs; 2639 2640 vmcs = alloc_vmcs_cpu(false, cpu, GFP_KERNEL); 2641 if (!vmcs) { 2642 free_kvm_area(); 2643 return -ENOMEM; 2644 } 2645 2646 /* 2647 * When eVMCS is enabled, alloc_vmcs_cpu() sets 2648 * vmcs->revision_id to KVM_EVMCS_VERSION instead of 2649 * revision_id reported by MSR_IA32_VMX_BASIC. 2650 * 2651 * However, even though not explicitly documented by 2652 * TLFS, VMXArea passed as VMXON argument should 2653 * still be marked with revision_id reported by 2654 * physical CPU. 2655 */ 2656 if (static_branch_unlikely(&enable_evmcs)) 2657 vmcs->hdr.revision_id = vmcs_config.revision_id; 2658 2659 per_cpu(vmxarea, cpu) = vmcs; 2660 } 2661 return 0; 2662 } 2663 2664 static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg, 2665 struct kvm_segment *save) 2666 { 2667 if (!emulate_invalid_guest_state) { 2668 /* 2669 * CS and SS RPL should be equal during guest entry according 2670 * to VMX spec, but in reality it is not always so. Since vcpu 2671 * is in the middle of the transition from real mode to 2672 * protected mode it is safe to assume that RPL 0 is a good 2673 * default value. 2674 */ 2675 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS) 2676 save->selector &= ~SEGMENT_RPL_MASK; 2677 save->dpl = save->selector & SEGMENT_RPL_MASK; 2678 save->s = 1; 2679 } 2680 vmx_set_segment(vcpu, save, seg); 2681 } 2682 2683 static void enter_pmode(struct kvm_vcpu *vcpu) 2684 { 2685 unsigned long flags; 2686 struct vcpu_vmx *vmx = to_vmx(vcpu); 2687 2688 /* 2689 * Update real mode segment cache. It may be not up-to-date if sement 2690 * register was written while vcpu was in a guest mode. 2691 */ 2692 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); 2693 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); 2694 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); 2695 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); 2696 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); 2697 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); 2698 2699 vmx->rmode.vm86_active = 0; 2700 2701 vmx_segment_cache_clear(vmx); 2702 2703 vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); 2704 2705 flags = vmcs_readl(GUEST_RFLAGS); 2706 flags &= RMODE_GUEST_OWNED_EFLAGS_BITS; 2707 flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; 2708 vmcs_writel(GUEST_RFLAGS, flags); 2709 2710 vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) | 2711 (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME)); 2712 2713 update_exception_bitmap(vcpu); 2714 2715 fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); 2716 fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); 2717 fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); 2718 fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); 2719 fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); 2720 fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); 2721 } 2722 2723 static void fix_rmode_seg(int seg, struct kvm_segment *save) 2724 { 2725 const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; 2726 struct kvm_segment var = *save; 2727 2728 var.dpl = 0x3; 2729 if (seg == VCPU_SREG_CS) 2730 var.type = 0x3; 2731 2732 if (!emulate_invalid_guest_state) { 2733 var.selector = var.base >> 4; 2734 var.base = var.base & 0xffff0; 2735 var.limit = 0xffff; 2736 var.g = 0; 2737 var.db = 0; 2738 var.present = 1; 2739 var.s = 1; 2740 var.l = 0; 2741 var.unusable = 0; 2742 var.type = 0x3; 2743 var.avl = 0; 2744 if (save->base & 0xf) 2745 printk_once(KERN_WARNING "kvm: segment base is not " 2746 "paragraph aligned when entering " 2747 "protected mode (seg=%d)", seg); 2748 } 2749 2750 vmcs_write16(sf->selector, var.selector); 2751 vmcs_writel(sf->base, var.base); 2752 vmcs_write32(sf->limit, var.limit); 2753 vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var)); 2754 } 2755 2756 static void enter_rmode(struct kvm_vcpu *vcpu) 2757 { 2758 unsigned long flags; 2759 struct vcpu_vmx *vmx = to_vmx(vcpu); 2760 struct kvm_vmx *kvm_vmx = to_kvm_vmx(vcpu->kvm); 2761 2762 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); 2763 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); 2764 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); 2765 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); 2766 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); 2767 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); 2768 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); 2769 2770 vmx->rmode.vm86_active = 1; 2771 2772 /* 2773 * Very old userspace does not call KVM_SET_TSS_ADDR before entering 2774 * vcpu. Warn the user that an update is overdue. 2775 */ 2776 if (!kvm_vmx->tss_addr) 2777 printk_once(KERN_WARNING "kvm: KVM_SET_TSS_ADDR need to be " 2778 "called before entering vcpu\n"); 2779 2780 vmx_segment_cache_clear(vmx); 2781 2782 vmcs_writel(GUEST_TR_BASE, kvm_vmx->tss_addr); 2783 vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1); 2784 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); 2785 2786 flags = vmcs_readl(GUEST_RFLAGS); 2787 vmx->rmode.save_rflags = flags; 2788 2789 flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; 2790 2791 vmcs_writel(GUEST_RFLAGS, flags); 2792 vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME); 2793 update_exception_bitmap(vcpu); 2794 2795 fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); 2796 fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); 2797 fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); 2798 fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); 2799 fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); 2800 fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); 2801 2802 kvm_mmu_reset_context(vcpu); 2803 } 2804 2805 void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) 2806 { 2807 struct vcpu_vmx *vmx = to_vmx(vcpu); 2808 struct shared_msr_entry *msr = find_msr_entry(vmx, MSR_EFER); 2809 2810 if (!msr) 2811 return; 2812 2813 vcpu->arch.efer = efer; 2814 if (efer & EFER_LMA) { 2815 vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); 2816 msr->data = efer; 2817 } else { 2818 vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); 2819 2820 msr->data = efer & ~EFER_LME; 2821 } 2822 setup_msrs(vmx); 2823 } 2824 2825 #ifdef CONFIG_X86_64 2826 2827 static void enter_lmode(struct kvm_vcpu *vcpu) 2828 { 2829 u32 guest_tr_ar; 2830 2831 vmx_segment_cache_clear(to_vmx(vcpu)); 2832 2833 guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES); 2834 if ((guest_tr_ar & VMX_AR_TYPE_MASK) != VMX_AR_TYPE_BUSY_64_TSS) { 2835 pr_debug_ratelimited("%s: tss fixup for long mode. \n", 2836 __func__); 2837 vmcs_write32(GUEST_TR_AR_BYTES, 2838 (guest_tr_ar & ~VMX_AR_TYPE_MASK) 2839 | VMX_AR_TYPE_BUSY_64_TSS); 2840 } 2841 vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA); 2842 } 2843 2844 static void exit_lmode(struct kvm_vcpu *vcpu) 2845 { 2846 vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); 2847 vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA); 2848 } 2849 2850 #endif 2851 2852 static void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr) 2853 { 2854 int vpid = to_vmx(vcpu)->vpid; 2855 2856 if (!vpid_sync_vcpu_addr(vpid, addr)) 2857 vpid_sync_context(vpid); 2858 2859 /* 2860 * If VPIDs are not supported or enabled, then the above is a no-op. 2861 * But we don't really need a TLB flush in that case anyway, because 2862 * each VM entry/exit includes an implicit flush when VPID is 0. 2863 */ 2864 } 2865 2866 static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) 2867 { 2868 ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits; 2869 2870 vcpu->arch.cr0 &= ~cr0_guest_owned_bits; 2871 vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits; 2872 } 2873 2874 static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) 2875 { 2876 ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits; 2877 2878 vcpu->arch.cr4 &= ~cr4_guest_owned_bits; 2879 vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits; 2880 } 2881 2882 static void ept_load_pdptrs(struct kvm_vcpu *vcpu) 2883 { 2884 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 2885 2886 if (!kvm_register_is_dirty(vcpu, VCPU_EXREG_PDPTR)) 2887 return; 2888 2889 if (is_pae_paging(vcpu)) { 2890 vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]); 2891 vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]); 2892 vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]); 2893 vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]); 2894 } 2895 } 2896 2897 void ept_save_pdptrs(struct kvm_vcpu *vcpu) 2898 { 2899 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 2900 2901 if (is_pae_paging(vcpu)) { 2902 mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0); 2903 mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1); 2904 mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2); 2905 mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3); 2906 } 2907 2908 kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR); 2909 } 2910 2911 static void ept_update_paging_mode_cr0(unsigned long *hw_cr0, 2912 unsigned long cr0, 2913 struct kvm_vcpu *vcpu) 2914 { 2915 struct vcpu_vmx *vmx = to_vmx(vcpu); 2916 2917 if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3)) 2918 vmx_cache_reg(vcpu, VCPU_EXREG_CR3); 2919 if (!(cr0 & X86_CR0_PG)) { 2920 /* From paging/starting to nonpaging */ 2921 exec_controls_setbit(vmx, CPU_BASED_CR3_LOAD_EXITING | 2922 CPU_BASED_CR3_STORE_EXITING); 2923 vcpu->arch.cr0 = cr0; 2924 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu)); 2925 } else if (!is_paging(vcpu)) { 2926 /* From nonpaging to paging */ 2927 exec_controls_clearbit(vmx, CPU_BASED_CR3_LOAD_EXITING | 2928 CPU_BASED_CR3_STORE_EXITING); 2929 vcpu->arch.cr0 = cr0; 2930 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu)); 2931 } 2932 2933 if (!(cr0 & X86_CR0_WP)) 2934 *hw_cr0 &= ~X86_CR0_WP; 2935 } 2936 2937 void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 2938 { 2939 struct vcpu_vmx *vmx = to_vmx(vcpu); 2940 unsigned long hw_cr0; 2941 2942 hw_cr0 = (cr0 & ~KVM_VM_CR0_ALWAYS_OFF); 2943 if (enable_unrestricted_guest) 2944 hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST; 2945 else { 2946 hw_cr0 |= KVM_VM_CR0_ALWAYS_ON; 2947 2948 if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE)) 2949 enter_pmode(vcpu); 2950 2951 if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE)) 2952 enter_rmode(vcpu); 2953 } 2954 2955 #ifdef CONFIG_X86_64 2956 if (vcpu->arch.efer & EFER_LME) { 2957 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) 2958 enter_lmode(vcpu); 2959 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) 2960 exit_lmode(vcpu); 2961 } 2962 #endif 2963 2964 if (enable_ept && !enable_unrestricted_guest) 2965 ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu); 2966 2967 vmcs_writel(CR0_READ_SHADOW, cr0); 2968 vmcs_writel(GUEST_CR0, hw_cr0); 2969 vcpu->arch.cr0 = cr0; 2970 2971 /* depends on vcpu->arch.cr0 to be set to a new value */ 2972 vmx->emulation_required = emulation_required(vcpu); 2973 } 2974 2975 static int get_ept_level(struct kvm_vcpu *vcpu) 2976 { 2977 if (cpu_has_vmx_ept_5levels() && (cpuid_maxphyaddr(vcpu) > 48)) 2978 return 5; 2979 return 4; 2980 } 2981 2982 u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa) 2983 { 2984 u64 eptp = VMX_EPTP_MT_WB; 2985 2986 eptp |= (get_ept_level(vcpu) == 5) ? VMX_EPTP_PWL_5 : VMX_EPTP_PWL_4; 2987 2988 if (enable_ept_ad_bits && 2989 (!is_guest_mode(vcpu) || nested_ept_ad_enabled(vcpu))) 2990 eptp |= VMX_EPTP_AD_ENABLE_BIT; 2991 eptp |= (root_hpa & PAGE_MASK); 2992 2993 return eptp; 2994 } 2995 2996 void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) 2997 { 2998 struct kvm *kvm = vcpu->kvm; 2999 bool update_guest_cr3 = true; 3000 unsigned long guest_cr3; 3001 u64 eptp; 3002 3003 guest_cr3 = cr3; 3004 if (enable_ept) { 3005 eptp = construct_eptp(vcpu, cr3); 3006 vmcs_write64(EPT_POINTER, eptp); 3007 3008 if (kvm_x86_ops->tlb_remote_flush) { 3009 spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock); 3010 to_vmx(vcpu)->ept_pointer = eptp; 3011 to_kvm_vmx(kvm)->ept_pointers_match 3012 = EPT_POINTERS_CHECK; 3013 spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock); 3014 } 3015 3016 /* Loading vmcs02.GUEST_CR3 is handled by nested VM-Enter. */ 3017 if (is_guest_mode(vcpu)) 3018 update_guest_cr3 = false; 3019 else if (!enable_unrestricted_guest && !is_paging(vcpu)) 3020 guest_cr3 = to_kvm_vmx(kvm)->ept_identity_map_addr; 3021 else if (test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail)) 3022 guest_cr3 = vcpu->arch.cr3; 3023 else /* vmcs01.GUEST_CR3 is already up-to-date. */ 3024 update_guest_cr3 = false; 3025 ept_load_pdptrs(vcpu); 3026 } 3027 3028 if (update_guest_cr3) 3029 vmcs_writel(GUEST_CR3, guest_cr3); 3030 } 3031 3032 int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 3033 { 3034 struct vcpu_vmx *vmx = to_vmx(vcpu); 3035 /* 3036 * Pass through host's Machine Check Enable value to hw_cr4, which 3037 * is in force while we are in guest mode. Do not let guests control 3038 * this bit, even if host CR4.MCE == 0. 3039 */ 3040 unsigned long hw_cr4; 3041 3042 hw_cr4 = (cr4_read_shadow() & X86_CR4_MCE) | (cr4 & ~X86_CR4_MCE); 3043 if (enable_unrestricted_guest) 3044 hw_cr4 |= KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST; 3045 else if (vmx->rmode.vm86_active) 3046 hw_cr4 |= KVM_RMODE_VM_CR4_ALWAYS_ON; 3047 else 3048 hw_cr4 |= KVM_PMODE_VM_CR4_ALWAYS_ON; 3049 3050 if (!boot_cpu_has(X86_FEATURE_UMIP) && vmx_umip_emulated()) { 3051 if (cr4 & X86_CR4_UMIP) { 3052 secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_DESC); 3053 hw_cr4 &= ~X86_CR4_UMIP; 3054 } else if (!is_guest_mode(vcpu) || 3055 !nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC)) { 3056 secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_DESC); 3057 } 3058 } 3059 3060 if (cr4 & X86_CR4_VMXE) { 3061 /* 3062 * To use VMXON (and later other VMX instructions), a guest 3063 * must first be able to turn on cr4.VMXE (see handle_vmon()). 3064 * So basically the check on whether to allow nested VMX 3065 * is here. We operate under the default treatment of SMM, 3066 * so VMX cannot be enabled under SMM. 3067 */ 3068 if (!nested_vmx_allowed(vcpu) || is_smm(vcpu)) 3069 return 1; 3070 } 3071 3072 if (vmx->nested.vmxon && !nested_cr4_valid(vcpu, cr4)) 3073 return 1; 3074 3075 vcpu->arch.cr4 = cr4; 3076 3077 if (!enable_unrestricted_guest) { 3078 if (enable_ept) { 3079 if (!is_paging(vcpu)) { 3080 hw_cr4 &= ~X86_CR4_PAE; 3081 hw_cr4 |= X86_CR4_PSE; 3082 } else if (!(cr4 & X86_CR4_PAE)) { 3083 hw_cr4 &= ~X86_CR4_PAE; 3084 } 3085 } 3086 3087 /* 3088 * SMEP/SMAP/PKU is disabled if CPU is in non-paging mode in 3089 * hardware. To emulate this behavior, SMEP/SMAP/PKU needs 3090 * to be manually disabled when guest switches to non-paging 3091 * mode. 3092 * 3093 * If !enable_unrestricted_guest, the CPU is always running 3094 * with CR0.PG=1 and CR4 needs to be modified. 3095 * If enable_unrestricted_guest, the CPU automatically 3096 * disables SMEP/SMAP/PKU when the guest sets CR0.PG=0. 3097 */ 3098 if (!is_paging(vcpu)) 3099 hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE); 3100 } 3101 3102 vmcs_writel(CR4_READ_SHADOW, cr4); 3103 vmcs_writel(GUEST_CR4, hw_cr4); 3104 return 0; 3105 } 3106 3107 void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) 3108 { 3109 struct vcpu_vmx *vmx = to_vmx(vcpu); 3110 u32 ar; 3111 3112 if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { 3113 *var = vmx->rmode.segs[seg]; 3114 if (seg == VCPU_SREG_TR 3115 || var->selector == vmx_read_guest_seg_selector(vmx, seg)) 3116 return; 3117 var->base = vmx_read_guest_seg_base(vmx, seg); 3118 var->selector = vmx_read_guest_seg_selector(vmx, seg); 3119 return; 3120 } 3121 var->base = vmx_read_guest_seg_base(vmx, seg); 3122 var->limit = vmx_read_guest_seg_limit(vmx, seg); 3123 var->selector = vmx_read_guest_seg_selector(vmx, seg); 3124 ar = vmx_read_guest_seg_ar(vmx, seg); 3125 var->unusable = (ar >> 16) & 1; 3126 var->type = ar & 15; 3127 var->s = (ar >> 4) & 1; 3128 var->dpl = (ar >> 5) & 3; 3129 /* 3130 * Some userspaces do not preserve unusable property. Since usable 3131 * segment has to be present according to VMX spec we can use present 3132 * property to amend userspace bug by making unusable segment always 3133 * nonpresent. vmx_segment_access_rights() already marks nonpresent 3134 * segment as unusable. 3135 */ 3136 var->present = !var->unusable; 3137 var->avl = (ar >> 12) & 1; 3138 var->l = (ar >> 13) & 1; 3139 var->db = (ar >> 14) & 1; 3140 var->g = (ar >> 15) & 1; 3141 } 3142 3143 static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg) 3144 { 3145 struct kvm_segment s; 3146 3147 if (to_vmx(vcpu)->rmode.vm86_active) { 3148 vmx_get_segment(vcpu, &s, seg); 3149 return s.base; 3150 } 3151 return vmx_read_guest_seg_base(to_vmx(vcpu), seg); 3152 } 3153 3154 int vmx_get_cpl(struct kvm_vcpu *vcpu) 3155 { 3156 struct vcpu_vmx *vmx = to_vmx(vcpu); 3157 3158 if (unlikely(vmx->rmode.vm86_active)) 3159 return 0; 3160 else { 3161 int ar = vmx_read_guest_seg_ar(vmx, VCPU_SREG_SS); 3162 return VMX_AR_DPL(ar); 3163 } 3164 } 3165 3166 static u32 vmx_segment_access_rights(struct kvm_segment *var) 3167 { 3168 u32 ar; 3169 3170 if (var->unusable || !var->present) 3171 ar = 1 << 16; 3172 else { 3173 ar = var->type & 15; 3174 ar |= (var->s & 1) << 4; 3175 ar |= (var->dpl & 3) << 5; 3176 ar |= (var->present & 1) << 7; 3177 ar |= (var->avl & 1) << 12; 3178 ar |= (var->l & 1) << 13; 3179 ar |= (var->db & 1) << 14; 3180 ar |= (var->g & 1) << 15; 3181 } 3182 3183 return ar; 3184 } 3185 3186 void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) 3187 { 3188 struct vcpu_vmx *vmx = to_vmx(vcpu); 3189 const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; 3190 3191 vmx_segment_cache_clear(vmx); 3192 3193 if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { 3194 vmx->rmode.segs[seg] = *var; 3195 if (seg == VCPU_SREG_TR) 3196 vmcs_write16(sf->selector, var->selector); 3197 else if (var->s) 3198 fix_rmode_seg(seg, &vmx->rmode.segs[seg]); 3199 goto out; 3200 } 3201 3202 vmcs_writel(sf->base, var->base); 3203 vmcs_write32(sf->limit, var->limit); 3204 vmcs_write16(sf->selector, var->selector); 3205 3206 /* 3207 * Fix the "Accessed" bit in AR field of segment registers for older 3208 * qemu binaries. 3209 * IA32 arch specifies that at the time of processor reset the 3210 * "Accessed" bit in the AR field of segment registers is 1. And qemu 3211 * is setting it to 0 in the userland code. This causes invalid guest 3212 * state vmexit when "unrestricted guest" mode is turned on. 3213 * Fix for this setup issue in cpu_reset is being pushed in the qemu 3214 * tree. Newer qemu binaries with that qemu fix would not need this 3215 * kvm hack. 3216 */ 3217 if (enable_unrestricted_guest && (seg != VCPU_SREG_LDTR)) 3218 var->type |= 0x1; /* Accessed */ 3219 3220 vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var)); 3221 3222 out: 3223 vmx->emulation_required = emulation_required(vcpu); 3224 } 3225 3226 static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) 3227 { 3228 u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS); 3229 3230 *db = (ar >> 14) & 1; 3231 *l = (ar >> 13) & 1; 3232 } 3233 3234 static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) 3235 { 3236 dt->size = vmcs_read32(GUEST_IDTR_LIMIT); 3237 dt->address = vmcs_readl(GUEST_IDTR_BASE); 3238 } 3239 3240 static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) 3241 { 3242 vmcs_write32(GUEST_IDTR_LIMIT, dt->size); 3243 vmcs_writel(GUEST_IDTR_BASE, dt->address); 3244 } 3245 3246 static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) 3247 { 3248 dt->size = vmcs_read32(GUEST_GDTR_LIMIT); 3249 dt->address = vmcs_readl(GUEST_GDTR_BASE); 3250 } 3251 3252 static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) 3253 { 3254 vmcs_write32(GUEST_GDTR_LIMIT, dt->size); 3255 vmcs_writel(GUEST_GDTR_BASE, dt->address); 3256 } 3257 3258 static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg) 3259 { 3260 struct kvm_segment var; 3261 u32 ar; 3262 3263 vmx_get_segment(vcpu, &var, seg); 3264 var.dpl = 0x3; 3265 if (seg == VCPU_SREG_CS) 3266 var.type = 0x3; 3267 ar = vmx_segment_access_rights(&var); 3268 3269 if (var.base != (var.selector << 4)) 3270 return false; 3271 if (var.limit != 0xffff) 3272 return false; 3273 if (ar != 0xf3) 3274 return false; 3275 3276 return true; 3277 } 3278 3279 static bool code_segment_valid(struct kvm_vcpu *vcpu) 3280 { 3281 struct kvm_segment cs; 3282 unsigned int cs_rpl; 3283 3284 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); 3285 cs_rpl = cs.selector & SEGMENT_RPL_MASK; 3286 3287 if (cs.unusable) 3288 return false; 3289 if (~cs.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_ACCESSES_MASK)) 3290 return false; 3291 if (!cs.s) 3292 return false; 3293 if (cs.type & VMX_AR_TYPE_WRITEABLE_MASK) { 3294 if (cs.dpl > cs_rpl) 3295 return false; 3296 } else { 3297 if (cs.dpl != cs_rpl) 3298 return false; 3299 } 3300 if (!cs.present) 3301 return false; 3302 3303 /* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */ 3304 return true; 3305 } 3306 3307 static bool stack_segment_valid(struct kvm_vcpu *vcpu) 3308 { 3309 struct kvm_segment ss; 3310 unsigned int ss_rpl; 3311 3312 vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); 3313 ss_rpl = ss.selector & SEGMENT_RPL_MASK; 3314 3315 if (ss.unusable) 3316 return true; 3317 if (ss.type != 3 && ss.type != 7) 3318 return false; 3319 if (!ss.s) 3320 return false; 3321 if (ss.dpl != ss_rpl) /* DPL != RPL */ 3322 return false; 3323 if (!ss.present) 3324 return false; 3325 3326 return true; 3327 } 3328 3329 static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg) 3330 { 3331 struct kvm_segment var; 3332 unsigned int rpl; 3333 3334 vmx_get_segment(vcpu, &var, seg); 3335 rpl = var.selector & SEGMENT_RPL_MASK; 3336 3337 if (var.unusable) 3338 return true; 3339 if (!var.s) 3340 return false; 3341 if (!var.present) 3342 return false; 3343 if (~var.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_WRITEABLE_MASK)) { 3344 if (var.dpl < rpl) /* DPL < RPL */ 3345 return false; 3346 } 3347 3348 /* TODO: Add other members to kvm_segment_field to allow checking for other access 3349 * rights flags 3350 */ 3351 return true; 3352 } 3353 3354 static bool tr_valid(struct kvm_vcpu *vcpu) 3355 { 3356 struct kvm_segment tr; 3357 3358 vmx_get_segment(vcpu, &tr, VCPU_SREG_TR); 3359 3360 if (tr.unusable) 3361 return false; 3362 if (tr.selector & SEGMENT_TI_MASK) /* TI = 1 */ 3363 return false; 3364 if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */ 3365 return false; 3366 if (!tr.present) 3367 return false; 3368 3369 return true; 3370 } 3371 3372 static bool ldtr_valid(struct kvm_vcpu *vcpu) 3373 { 3374 struct kvm_segment ldtr; 3375 3376 vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR); 3377 3378 if (ldtr.unusable) 3379 return true; 3380 if (ldtr.selector & SEGMENT_TI_MASK) /* TI = 1 */ 3381 return false; 3382 if (ldtr.type != 2) 3383 return false; 3384 if (!ldtr.present) 3385 return false; 3386 3387 return true; 3388 } 3389 3390 static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu) 3391 { 3392 struct kvm_segment cs, ss; 3393 3394 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); 3395 vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); 3396 3397 return ((cs.selector & SEGMENT_RPL_MASK) == 3398 (ss.selector & SEGMENT_RPL_MASK)); 3399 } 3400 3401 /* 3402 * Check if guest state is valid. Returns true if valid, false if 3403 * not. 3404 * We assume that registers are always usable 3405 */ 3406 static bool guest_state_valid(struct kvm_vcpu *vcpu) 3407 { 3408 if (enable_unrestricted_guest) 3409 return true; 3410 3411 /* real mode guest state checks */ 3412 if (!is_protmode(vcpu) || (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) { 3413 if (!rmode_segment_valid(vcpu, VCPU_SREG_CS)) 3414 return false; 3415 if (!rmode_segment_valid(vcpu, VCPU_SREG_SS)) 3416 return false; 3417 if (!rmode_segment_valid(vcpu, VCPU_SREG_DS)) 3418 return false; 3419 if (!rmode_segment_valid(vcpu, VCPU_SREG_ES)) 3420 return false; 3421 if (!rmode_segment_valid(vcpu, VCPU_SREG_FS)) 3422 return false; 3423 if (!rmode_segment_valid(vcpu, VCPU_SREG_GS)) 3424 return false; 3425 } else { 3426 /* protected mode guest state checks */ 3427 if (!cs_ss_rpl_check(vcpu)) 3428 return false; 3429 if (!code_segment_valid(vcpu)) 3430 return false; 3431 if (!stack_segment_valid(vcpu)) 3432 return false; 3433 if (!data_segment_valid(vcpu, VCPU_SREG_DS)) 3434 return false; 3435 if (!data_segment_valid(vcpu, VCPU_SREG_ES)) 3436 return false; 3437 if (!data_segment_valid(vcpu, VCPU_SREG_FS)) 3438 return false; 3439 if (!data_segment_valid(vcpu, VCPU_SREG_GS)) 3440 return false; 3441 if (!tr_valid(vcpu)) 3442 return false; 3443 if (!ldtr_valid(vcpu)) 3444 return false; 3445 } 3446 /* TODO: 3447 * - Add checks on RIP 3448 * - Add checks on RFLAGS 3449 */ 3450 3451 return true; 3452 } 3453 3454 static int init_rmode_tss(struct kvm *kvm) 3455 { 3456 gfn_t fn; 3457 u16 data = 0; 3458 int idx, r; 3459 3460 idx = srcu_read_lock(&kvm->srcu); 3461 fn = to_kvm_vmx(kvm)->tss_addr >> PAGE_SHIFT; 3462 r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE); 3463 if (r < 0) 3464 goto out; 3465 data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE; 3466 r = kvm_write_guest_page(kvm, fn++, &data, 3467 TSS_IOPB_BASE_OFFSET, sizeof(u16)); 3468 if (r < 0) 3469 goto out; 3470 r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE); 3471 if (r < 0) 3472 goto out; 3473 r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE); 3474 if (r < 0) 3475 goto out; 3476 data = ~0; 3477 r = kvm_write_guest_page(kvm, fn, &data, 3478 RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1, 3479 sizeof(u8)); 3480 out: 3481 srcu_read_unlock(&kvm->srcu, idx); 3482 return r; 3483 } 3484 3485 static int init_rmode_identity_map(struct kvm *kvm) 3486 { 3487 struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm); 3488 int i, idx, r = 0; 3489 kvm_pfn_t identity_map_pfn; 3490 u32 tmp; 3491 3492 /* Protect kvm_vmx->ept_identity_pagetable_done. */ 3493 mutex_lock(&kvm->slots_lock); 3494 3495 if (likely(kvm_vmx->ept_identity_pagetable_done)) 3496 goto out2; 3497 3498 if (!kvm_vmx->ept_identity_map_addr) 3499 kvm_vmx->ept_identity_map_addr = VMX_EPT_IDENTITY_PAGETABLE_ADDR; 3500 identity_map_pfn = kvm_vmx->ept_identity_map_addr >> PAGE_SHIFT; 3501 3502 r = __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 3503 kvm_vmx->ept_identity_map_addr, PAGE_SIZE); 3504 if (r < 0) 3505 goto out2; 3506 3507 idx = srcu_read_lock(&kvm->srcu); 3508 r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE); 3509 if (r < 0) 3510 goto out; 3511 /* Set up identity-mapping pagetable for EPT in real mode */ 3512 for (i = 0; i < PT32_ENT_PER_PAGE; i++) { 3513 tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | 3514 _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE); 3515 r = kvm_write_guest_page(kvm, identity_map_pfn, 3516 &tmp, i * sizeof(tmp), sizeof(tmp)); 3517 if (r < 0) 3518 goto out; 3519 } 3520 kvm_vmx->ept_identity_pagetable_done = true; 3521 3522 out: 3523 srcu_read_unlock(&kvm->srcu, idx); 3524 3525 out2: 3526 mutex_unlock(&kvm->slots_lock); 3527 return r; 3528 } 3529 3530 static void seg_setup(int seg) 3531 { 3532 const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; 3533 unsigned int ar; 3534 3535 vmcs_write16(sf->selector, 0); 3536 vmcs_writel(sf->base, 0); 3537 vmcs_write32(sf->limit, 0xffff); 3538 ar = 0x93; 3539 if (seg == VCPU_SREG_CS) 3540 ar |= 0x08; /* code segment */ 3541 3542 vmcs_write32(sf->ar_bytes, ar); 3543 } 3544 3545 static int alloc_apic_access_page(struct kvm *kvm) 3546 { 3547 struct page *page; 3548 int r = 0; 3549 3550 mutex_lock(&kvm->slots_lock); 3551 if (kvm->arch.apic_access_page_done) 3552 goto out; 3553 r = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 3554 APIC_DEFAULT_PHYS_BASE, PAGE_SIZE); 3555 if (r) 3556 goto out; 3557 3558 page = gfn_to_page(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT); 3559 if (is_error_page(page)) { 3560 r = -EFAULT; 3561 goto out; 3562 } 3563 3564 /* 3565 * Do not pin the page in memory, so that memory hot-unplug 3566 * is able to migrate it. 3567 */ 3568 put_page(page); 3569 kvm->arch.apic_access_page_done = true; 3570 out: 3571 mutex_unlock(&kvm->slots_lock); 3572 return r; 3573 } 3574 3575 int allocate_vpid(void) 3576 { 3577 int vpid; 3578 3579 if (!enable_vpid) 3580 return 0; 3581 spin_lock(&vmx_vpid_lock); 3582 vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS); 3583 if (vpid < VMX_NR_VPIDS) 3584 __set_bit(vpid, vmx_vpid_bitmap); 3585 else 3586 vpid = 0; 3587 spin_unlock(&vmx_vpid_lock); 3588 return vpid; 3589 } 3590 3591 void free_vpid(int vpid) 3592 { 3593 if (!enable_vpid || vpid == 0) 3594 return; 3595 spin_lock(&vmx_vpid_lock); 3596 __clear_bit(vpid, vmx_vpid_bitmap); 3597 spin_unlock(&vmx_vpid_lock); 3598 } 3599 3600 static __always_inline void vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, 3601 u32 msr, int type) 3602 { 3603 int f = sizeof(unsigned long); 3604 3605 if (!cpu_has_vmx_msr_bitmap()) 3606 return; 3607 3608 if (static_branch_unlikely(&enable_evmcs)) 3609 evmcs_touch_msr_bitmap(); 3610 3611 /* 3612 * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals 3613 * have the write-low and read-high bitmap offsets the wrong way round. 3614 * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. 3615 */ 3616 if (msr <= 0x1fff) { 3617 if (type & MSR_TYPE_R) 3618 /* read-low */ 3619 __clear_bit(msr, msr_bitmap + 0x000 / f); 3620 3621 if (type & MSR_TYPE_W) 3622 /* write-low */ 3623 __clear_bit(msr, msr_bitmap + 0x800 / f); 3624 3625 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { 3626 msr &= 0x1fff; 3627 if (type & MSR_TYPE_R) 3628 /* read-high */ 3629 __clear_bit(msr, msr_bitmap + 0x400 / f); 3630 3631 if (type & MSR_TYPE_W) 3632 /* write-high */ 3633 __clear_bit(msr, msr_bitmap + 0xc00 / f); 3634 3635 } 3636 } 3637 3638 static __always_inline void vmx_enable_intercept_for_msr(unsigned long *msr_bitmap, 3639 u32 msr, int type) 3640 { 3641 int f = sizeof(unsigned long); 3642 3643 if (!cpu_has_vmx_msr_bitmap()) 3644 return; 3645 3646 if (static_branch_unlikely(&enable_evmcs)) 3647 evmcs_touch_msr_bitmap(); 3648 3649 /* 3650 * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals 3651 * have the write-low and read-high bitmap offsets the wrong way round. 3652 * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. 3653 */ 3654 if (msr <= 0x1fff) { 3655 if (type & MSR_TYPE_R) 3656 /* read-low */ 3657 __set_bit(msr, msr_bitmap + 0x000 / f); 3658 3659 if (type & MSR_TYPE_W) 3660 /* write-low */ 3661 __set_bit(msr, msr_bitmap + 0x800 / f); 3662 3663 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { 3664 msr &= 0x1fff; 3665 if (type & MSR_TYPE_R) 3666 /* read-high */ 3667 __set_bit(msr, msr_bitmap + 0x400 / f); 3668 3669 if (type & MSR_TYPE_W) 3670 /* write-high */ 3671 __set_bit(msr, msr_bitmap + 0xc00 / f); 3672 3673 } 3674 } 3675 3676 static __always_inline void vmx_set_intercept_for_msr(unsigned long *msr_bitmap, 3677 u32 msr, int type, bool value) 3678 { 3679 if (value) 3680 vmx_enable_intercept_for_msr(msr_bitmap, msr, type); 3681 else 3682 vmx_disable_intercept_for_msr(msr_bitmap, msr, type); 3683 } 3684 3685 static u8 vmx_msr_bitmap_mode(struct kvm_vcpu *vcpu) 3686 { 3687 u8 mode = 0; 3688 3689 if (cpu_has_secondary_exec_ctrls() && 3690 (secondary_exec_controls_get(to_vmx(vcpu)) & 3691 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) { 3692 mode |= MSR_BITMAP_MODE_X2APIC; 3693 if (enable_apicv && kvm_vcpu_apicv_active(vcpu)) 3694 mode |= MSR_BITMAP_MODE_X2APIC_APICV; 3695 } 3696 3697 return mode; 3698 } 3699 3700 static void vmx_update_msr_bitmap_x2apic(unsigned long *msr_bitmap, 3701 u8 mode) 3702 { 3703 int msr; 3704 3705 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { 3706 unsigned word = msr / BITS_PER_LONG; 3707 msr_bitmap[word] = (mode & MSR_BITMAP_MODE_X2APIC_APICV) ? 0 : ~0; 3708 msr_bitmap[word + (0x800 / sizeof(long))] = ~0; 3709 } 3710 3711 if (mode & MSR_BITMAP_MODE_X2APIC) { 3712 /* 3713 * TPR reads and writes can be virtualized even if virtual interrupt 3714 * delivery is not in use. 3715 */ 3716 vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_TASKPRI), MSR_TYPE_RW); 3717 if (mode & MSR_BITMAP_MODE_X2APIC_APICV) { 3718 vmx_enable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_TMCCT), MSR_TYPE_R); 3719 vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_EOI), MSR_TYPE_W); 3720 vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_SELF_IPI), MSR_TYPE_W); 3721 } 3722 } 3723 } 3724 3725 void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu) 3726 { 3727 struct vcpu_vmx *vmx = to_vmx(vcpu); 3728 unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap; 3729 u8 mode = vmx_msr_bitmap_mode(vcpu); 3730 u8 changed = mode ^ vmx->msr_bitmap_mode; 3731 3732 if (!changed) 3733 return; 3734 3735 if (changed & (MSR_BITMAP_MODE_X2APIC | MSR_BITMAP_MODE_X2APIC_APICV)) 3736 vmx_update_msr_bitmap_x2apic(msr_bitmap, mode); 3737 3738 vmx->msr_bitmap_mode = mode; 3739 } 3740 3741 void pt_update_intercept_for_msr(struct vcpu_vmx *vmx) 3742 { 3743 unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap; 3744 bool flag = !(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN); 3745 u32 i; 3746 3747 vmx_set_intercept_for_msr(msr_bitmap, MSR_IA32_RTIT_STATUS, 3748 MSR_TYPE_RW, flag); 3749 vmx_set_intercept_for_msr(msr_bitmap, MSR_IA32_RTIT_OUTPUT_BASE, 3750 MSR_TYPE_RW, flag); 3751 vmx_set_intercept_for_msr(msr_bitmap, MSR_IA32_RTIT_OUTPUT_MASK, 3752 MSR_TYPE_RW, flag); 3753 vmx_set_intercept_for_msr(msr_bitmap, MSR_IA32_RTIT_CR3_MATCH, 3754 MSR_TYPE_RW, flag); 3755 for (i = 0; i < vmx->pt_desc.addr_range; i++) { 3756 vmx_set_intercept_for_msr(msr_bitmap, 3757 MSR_IA32_RTIT_ADDR0_A + i * 2, MSR_TYPE_RW, flag); 3758 vmx_set_intercept_for_msr(msr_bitmap, 3759 MSR_IA32_RTIT_ADDR0_B + i * 2, MSR_TYPE_RW, flag); 3760 } 3761 } 3762 3763 static bool vmx_get_enable_apicv(struct kvm *kvm) 3764 { 3765 return enable_apicv; 3766 } 3767 3768 static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu) 3769 { 3770 struct vcpu_vmx *vmx = to_vmx(vcpu); 3771 void *vapic_page; 3772 u32 vppr; 3773 int rvi; 3774 3775 if (WARN_ON_ONCE(!is_guest_mode(vcpu)) || 3776 !nested_cpu_has_vid(get_vmcs12(vcpu)) || 3777 WARN_ON_ONCE(!vmx->nested.virtual_apic_map.gfn)) 3778 return false; 3779 3780 rvi = vmx_get_rvi(); 3781 3782 vapic_page = vmx->nested.virtual_apic_map.hva; 3783 vppr = *((u32 *)(vapic_page + APIC_PROCPRI)); 3784 3785 return ((rvi & 0xf0) > (vppr & 0xf0)); 3786 } 3787 3788 static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu, 3789 bool nested) 3790 { 3791 #ifdef CONFIG_SMP 3792 int pi_vec = nested ? POSTED_INTR_NESTED_VECTOR : POSTED_INTR_VECTOR; 3793 3794 if (vcpu->mode == IN_GUEST_MODE) { 3795 /* 3796 * The vector of interrupt to be delivered to vcpu had 3797 * been set in PIR before this function. 3798 * 3799 * Following cases will be reached in this block, and 3800 * we always send a notification event in all cases as 3801 * explained below. 3802 * 3803 * Case 1: vcpu keeps in non-root mode. Sending a 3804 * notification event posts the interrupt to vcpu. 3805 * 3806 * Case 2: vcpu exits to root mode and is still 3807 * runnable. PIR will be synced to vIRR before the 3808 * next vcpu entry. Sending a notification event in 3809 * this case has no effect, as vcpu is not in root 3810 * mode. 3811 * 3812 * Case 3: vcpu exits to root mode and is blocked. 3813 * vcpu_block() has already synced PIR to vIRR and 3814 * never blocks vcpu if vIRR is not cleared. Therefore, 3815 * a blocked vcpu here does not wait for any requested 3816 * interrupts in PIR, and sending a notification event 3817 * which has no effect is safe here. 3818 */ 3819 3820 apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec); 3821 return true; 3822 } 3823 #endif 3824 return false; 3825 } 3826 3827 static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu, 3828 int vector) 3829 { 3830 struct vcpu_vmx *vmx = to_vmx(vcpu); 3831 3832 if (is_guest_mode(vcpu) && 3833 vector == vmx->nested.posted_intr_nv) { 3834 /* 3835 * If a posted intr is not recognized by hardware, 3836 * we will accomplish it in the next vmentry. 3837 */ 3838 vmx->nested.pi_pending = true; 3839 kvm_make_request(KVM_REQ_EVENT, vcpu); 3840 /* the PIR and ON have been set by L1. */ 3841 if (!kvm_vcpu_trigger_posted_interrupt(vcpu, true)) 3842 kvm_vcpu_kick(vcpu); 3843 return 0; 3844 } 3845 return -1; 3846 } 3847 /* 3848 * Send interrupt to vcpu via posted interrupt way. 3849 * 1. If target vcpu is running(non-root mode), send posted interrupt 3850 * notification to vcpu and hardware will sync PIR to vIRR atomically. 3851 * 2. If target vcpu isn't running(root mode), kick it to pick up the 3852 * interrupt from PIR in next vmentry. 3853 */ 3854 static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector) 3855 { 3856 struct vcpu_vmx *vmx = to_vmx(vcpu); 3857 int r; 3858 3859 r = vmx_deliver_nested_posted_interrupt(vcpu, vector); 3860 if (!r) 3861 return; 3862 3863 if (pi_test_and_set_pir(vector, &vmx->pi_desc)) 3864 return; 3865 3866 /* If a previous notification has sent the IPI, nothing to do. */ 3867 if (pi_test_and_set_on(&vmx->pi_desc)) 3868 return; 3869 3870 if (!kvm_vcpu_trigger_posted_interrupt(vcpu, false)) 3871 kvm_vcpu_kick(vcpu); 3872 } 3873 3874 /* 3875 * Set up the vmcs's constant host-state fields, i.e., host-state fields that 3876 * will not change in the lifetime of the guest. 3877 * Note that host-state that does change is set elsewhere. E.g., host-state 3878 * that is set differently for each CPU is set in vmx_vcpu_load(), not here. 3879 */ 3880 void vmx_set_constant_host_state(struct vcpu_vmx *vmx) 3881 { 3882 u32 low32, high32; 3883 unsigned long tmpl; 3884 unsigned long cr0, cr3, cr4; 3885 3886 cr0 = read_cr0(); 3887 WARN_ON(cr0 & X86_CR0_TS); 3888 vmcs_writel(HOST_CR0, cr0); /* 22.2.3 */ 3889 3890 /* 3891 * Save the most likely value for this task's CR3 in the VMCS. 3892 * We can't use __get_current_cr3_fast() because we're not atomic. 3893 */ 3894 cr3 = __read_cr3(); 3895 vmcs_writel(HOST_CR3, cr3); /* 22.2.3 FIXME: shadow tables */ 3896 vmx->loaded_vmcs->host_state.cr3 = cr3; 3897 3898 /* Save the most likely value for this task's CR4 in the VMCS. */ 3899 cr4 = cr4_read_shadow(); 3900 vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */ 3901 vmx->loaded_vmcs->host_state.cr4 = cr4; 3902 3903 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ 3904 #ifdef CONFIG_X86_64 3905 /* 3906 * Load null selectors, so we can avoid reloading them in 3907 * vmx_prepare_switch_to_host(), in case userspace uses 3908 * the null selectors too (the expected case). 3909 */ 3910 vmcs_write16(HOST_DS_SELECTOR, 0); 3911 vmcs_write16(HOST_ES_SELECTOR, 0); 3912 #else 3913 vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ 3914 vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */ 3915 #endif 3916 vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ 3917 vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */ 3918 3919 vmcs_writel(HOST_IDTR_BASE, host_idt_base); /* 22.2.4 */ 3920 3921 vmcs_writel(HOST_RIP, (unsigned long)vmx_vmexit); /* 22.2.5 */ 3922 3923 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32); 3924 vmcs_write32(HOST_IA32_SYSENTER_CS, low32); 3925 rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl); 3926 vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl); /* 22.2.3 */ 3927 3928 if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) { 3929 rdmsr(MSR_IA32_CR_PAT, low32, high32); 3930 vmcs_write64(HOST_IA32_PAT, low32 | ((u64) high32 << 32)); 3931 } 3932 3933 if (cpu_has_load_ia32_efer()) 3934 vmcs_write64(HOST_IA32_EFER, host_efer); 3935 } 3936 3937 void set_cr4_guest_host_mask(struct vcpu_vmx *vmx) 3938 { 3939 vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS; 3940 if (enable_ept) 3941 vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE; 3942 if (is_guest_mode(&vmx->vcpu)) 3943 vmx->vcpu.arch.cr4_guest_owned_bits &= 3944 ~get_vmcs12(&vmx->vcpu)->cr4_guest_host_mask; 3945 vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits); 3946 } 3947 3948 u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx) 3949 { 3950 u32 pin_based_exec_ctrl = vmcs_config.pin_based_exec_ctrl; 3951 3952 if (!kvm_vcpu_apicv_active(&vmx->vcpu)) 3953 pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR; 3954 3955 if (!enable_vnmi) 3956 pin_based_exec_ctrl &= ~PIN_BASED_VIRTUAL_NMIS; 3957 3958 if (!enable_preemption_timer) 3959 pin_based_exec_ctrl &= ~PIN_BASED_VMX_PREEMPTION_TIMER; 3960 3961 return pin_based_exec_ctrl; 3962 } 3963 3964 static void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu) 3965 { 3966 struct vcpu_vmx *vmx = to_vmx(vcpu); 3967 3968 pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx)); 3969 if (cpu_has_secondary_exec_ctrls()) { 3970 if (kvm_vcpu_apicv_active(vcpu)) 3971 secondary_exec_controls_setbit(vmx, 3972 SECONDARY_EXEC_APIC_REGISTER_VIRT | 3973 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); 3974 else 3975 secondary_exec_controls_clearbit(vmx, 3976 SECONDARY_EXEC_APIC_REGISTER_VIRT | 3977 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); 3978 } 3979 3980 if (cpu_has_vmx_msr_bitmap()) 3981 vmx_update_msr_bitmap(vcpu); 3982 } 3983 3984 u32 vmx_exec_control(struct vcpu_vmx *vmx) 3985 { 3986 u32 exec_control = vmcs_config.cpu_based_exec_ctrl; 3987 3988 if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT) 3989 exec_control &= ~CPU_BASED_MOV_DR_EXITING; 3990 3991 if (!cpu_need_tpr_shadow(&vmx->vcpu)) { 3992 exec_control &= ~CPU_BASED_TPR_SHADOW; 3993 #ifdef CONFIG_X86_64 3994 exec_control |= CPU_BASED_CR8_STORE_EXITING | 3995 CPU_BASED_CR8_LOAD_EXITING; 3996 #endif 3997 } 3998 if (!enable_ept) 3999 exec_control |= CPU_BASED_CR3_STORE_EXITING | 4000 CPU_BASED_CR3_LOAD_EXITING | 4001 CPU_BASED_INVLPG_EXITING; 4002 if (kvm_mwait_in_guest(vmx->vcpu.kvm)) 4003 exec_control &= ~(CPU_BASED_MWAIT_EXITING | 4004 CPU_BASED_MONITOR_EXITING); 4005 if (kvm_hlt_in_guest(vmx->vcpu.kvm)) 4006 exec_control &= ~CPU_BASED_HLT_EXITING; 4007 return exec_control; 4008 } 4009 4010 4011 static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx) 4012 { 4013 struct kvm_vcpu *vcpu = &vmx->vcpu; 4014 4015 u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl; 4016 4017 if (pt_mode == PT_MODE_SYSTEM) 4018 exec_control &= ~(SECONDARY_EXEC_PT_USE_GPA | SECONDARY_EXEC_PT_CONCEAL_VMX); 4019 if (!cpu_need_virtualize_apic_accesses(vcpu)) 4020 exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; 4021 if (vmx->vpid == 0) 4022 exec_control &= ~SECONDARY_EXEC_ENABLE_VPID; 4023 if (!enable_ept) { 4024 exec_control &= ~SECONDARY_EXEC_ENABLE_EPT; 4025 enable_unrestricted_guest = 0; 4026 } 4027 if (!enable_unrestricted_guest) 4028 exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST; 4029 if (kvm_pause_in_guest(vmx->vcpu.kvm)) 4030 exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING; 4031 if (!kvm_vcpu_apicv_active(vcpu)) 4032 exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT | 4033 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); 4034 exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; 4035 4036 /* SECONDARY_EXEC_DESC is enabled/disabled on writes to CR4.UMIP, 4037 * in vmx_set_cr4. */ 4038 exec_control &= ~SECONDARY_EXEC_DESC; 4039 4040 /* SECONDARY_EXEC_SHADOW_VMCS is enabled when L1 executes VMPTRLD 4041 (handle_vmptrld). 4042 We can NOT enable shadow_vmcs here because we don't have yet 4043 a current VMCS12 4044 */ 4045 exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS; 4046 4047 if (!enable_pml) 4048 exec_control &= ~SECONDARY_EXEC_ENABLE_PML; 4049 4050 if (vmx_xsaves_supported()) { 4051 /* Exposing XSAVES only when XSAVE is exposed */ 4052 bool xsaves_enabled = 4053 guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && 4054 guest_cpuid_has(vcpu, X86_FEATURE_XSAVES); 4055 4056 vcpu->arch.xsaves_enabled = xsaves_enabled; 4057 4058 if (!xsaves_enabled) 4059 exec_control &= ~SECONDARY_EXEC_XSAVES; 4060 4061 if (nested) { 4062 if (xsaves_enabled) 4063 vmx->nested.msrs.secondary_ctls_high |= 4064 SECONDARY_EXEC_XSAVES; 4065 else 4066 vmx->nested.msrs.secondary_ctls_high &= 4067 ~SECONDARY_EXEC_XSAVES; 4068 } 4069 } 4070 4071 if (vmx_rdtscp_supported()) { 4072 bool rdtscp_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP); 4073 if (!rdtscp_enabled) 4074 exec_control &= ~SECONDARY_EXEC_RDTSCP; 4075 4076 if (nested) { 4077 if (rdtscp_enabled) 4078 vmx->nested.msrs.secondary_ctls_high |= 4079 SECONDARY_EXEC_RDTSCP; 4080 else 4081 vmx->nested.msrs.secondary_ctls_high &= 4082 ~SECONDARY_EXEC_RDTSCP; 4083 } 4084 } 4085 4086 if (vmx_invpcid_supported()) { 4087 /* Exposing INVPCID only when PCID is exposed */ 4088 bool invpcid_enabled = 4089 guest_cpuid_has(vcpu, X86_FEATURE_INVPCID) && 4090 guest_cpuid_has(vcpu, X86_FEATURE_PCID); 4091 4092 if (!invpcid_enabled) { 4093 exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID; 4094 guest_cpuid_clear(vcpu, X86_FEATURE_INVPCID); 4095 } 4096 4097 if (nested) { 4098 if (invpcid_enabled) 4099 vmx->nested.msrs.secondary_ctls_high |= 4100 SECONDARY_EXEC_ENABLE_INVPCID; 4101 else 4102 vmx->nested.msrs.secondary_ctls_high &= 4103 ~SECONDARY_EXEC_ENABLE_INVPCID; 4104 } 4105 } 4106 4107 if (vmx_rdrand_supported()) { 4108 bool rdrand_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDRAND); 4109 if (rdrand_enabled) 4110 exec_control &= ~SECONDARY_EXEC_RDRAND_EXITING; 4111 4112 if (nested) { 4113 if (rdrand_enabled) 4114 vmx->nested.msrs.secondary_ctls_high |= 4115 SECONDARY_EXEC_RDRAND_EXITING; 4116 else 4117 vmx->nested.msrs.secondary_ctls_high &= 4118 ~SECONDARY_EXEC_RDRAND_EXITING; 4119 } 4120 } 4121 4122 if (vmx_rdseed_supported()) { 4123 bool rdseed_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDSEED); 4124 if (rdseed_enabled) 4125 exec_control &= ~SECONDARY_EXEC_RDSEED_EXITING; 4126 4127 if (nested) { 4128 if (rdseed_enabled) 4129 vmx->nested.msrs.secondary_ctls_high |= 4130 SECONDARY_EXEC_RDSEED_EXITING; 4131 else 4132 vmx->nested.msrs.secondary_ctls_high &= 4133 ~SECONDARY_EXEC_RDSEED_EXITING; 4134 } 4135 } 4136 4137 if (vmx_waitpkg_supported()) { 4138 bool waitpkg_enabled = 4139 guest_cpuid_has(vcpu, X86_FEATURE_WAITPKG); 4140 4141 if (!waitpkg_enabled) 4142 exec_control &= ~SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE; 4143 4144 if (nested) { 4145 if (waitpkg_enabled) 4146 vmx->nested.msrs.secondary_ctls_high |= 4147 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE; 4148 else 4149 vmx->nested.msrs.secondary_ctls_high &= 4150 ~SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE; 4151 } 4152 } 4153 4154 vmx->secondary_exec_control = exec_control; 4155 } 4156 4157 static void ept_set_mmio_spte_mask(void) 4158 { 4159 /* 4160 * EPT Misconfigurations can be generated if the value of bits 2:0 4161 * of an EPT paging-structure entry is 110b (write/execute). 4162 */ 4163 kvm_mmu_set_mmio_spte_mask(VMX_EPT_RWX_MASK, 4164 VMX_EPT_MISCONFIG_WX_VALUE, 0); 4165 } 4166 4167 #define VMX_XSS_EXIT_BITMAP 0 4168 4169 /* 4170 * Noting that the initialization of Guest-state Area of VMCS is in 4171 * vmx_vcpu_reset(). 4172 */ 4173 static void init_vmcs(struct vcpu_vmx *vmx) 4174 { 4175 if (nested) 4176 nested_vmx_set_vmcs_shadowing_bitmap(); 4177 4178 if (cpu_has_vmx_msr_bitmap()) 4179 vmcs_write64(MSR_BITMAP, __pa(vmx->vmcs01.msr_bitmap)); 4180 4181 vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */ 4182 4183 /* Control */ 4184 pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx)); 4185 4186 exec_controls_set(vmx, vmx_exec_control(vmx)); 4187 4188 if (cpu_has_secondary_exec_ctrls()) { 4189 vmx_compute_secondary_exec_control(vmx); 4190 secondary_exec_controls_set(vmx, vmx->secondary_exec_control); 4191 } 4192 4193 if (kvm_vcpu_apicv_active(&vmx->vcpu)) { 4194 vmcs_write64(EOI_EXIT_BITMAP0, 0); 4195 vmcs_write64(EOI_EXIT_BITMAP1, 0); 4196 vmcs_write64(EOI_EXIT_BITMAP2, 0); 4197 vmcs_write64(EOI_EXIT_BITMAP3, 0); 4198 4199 vmcs_write16(GUEST_INTR_STATUS, 0); 4200 4201 vmcs_write16(POSTED_INTR_NV, POSTED_INTR_VECTOR); 4202 vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->pi_desc))); 4203 } 4204 4205 if (!kvm_pause_in_guest(vmx->vcpu.kvm)) { 4206 vmcs_write32(PLE_GAP, ple_gap); 4207 vmx->ple_window = ple_window; 4208 vmx->ple_window_dirty = true; 4209 } 4210 4211 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0); 4212 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0); 4213 vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */ 4214 4215 vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */ 4216 vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */ 4217 vmx_set_constant_host_state(vmx); 4218 vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */ 4219 vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */ 4220 4221 if (cpu_has_vmx_vmfunc()) 4222 vmcs_write64(VM_FUNCTION_CONTROL, 0); 4223 4224 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); 4225 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); 4226 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val)); 4227 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0); 4228 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val)); 4229 4230 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) 4231 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); 4232 4233 vm_exit_controls_set(vmx, vmx_vmexit_ctrl()); 4234 4235 /* 22.2.1, 20.8.1 */ 4236 vm_entry_controls_set(vmx, vmx_vmentry_ctrl()); 4237 4238 vmx->vcpu.arch.cr0_guest_owned_bits = X86_CR0_TS; 4239 vmcs_writel(CR0_GUEST_HOST_MASK, ~X86_CR0_TS); 4240 4241 set_cr4_guest_host_mask(vmx); 4242 4243 if (vmx->vpid != 0) 4244 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); 4245 4246 if (vmx_xsaves_supported()) 4247 vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP); 4248 4249 if (enable_pml) { 4250 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); 4251 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); 4252 } 4253 4254 if (cpu_has_vmx_encls_vmexit()) 4255 vmcs_write64(ENCLS_EXITING_BITMAP, -1ull); 4256 4257 if (pt_mode == PT_MODE_HOST_GUEST) { 4258 memset(&vmx->pt_desc, 0, sizeof(vmx->pt_desc)); 4259 /* Bit[6~0] are forced to 1, writes are ignored. */ 4260 vmx->pt_desc.guest.output_mask = 0x7F; 4261 vmcs_write64(GUEST_IA32_RTIT_CTL, 0); 4262 } 4263 } 4264 4265 static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) 4266 { 4267 struct vcpu_vmx *vmx = to_vmx(vcpu); 4268 struct msr_data apic_base_msr; 4269 u64 cr0; 4270 4271 vmx->rmode.vm86_active = 0; 4272 vmx->spec_ctrl = 0; 4273 4274 vmx->msr_ia32_umwait_control = 0; 4275 4276 vcpu->arch.microcode_version = 0x100000000ULL; 4277 vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); 4278 vmx->hv_deadline_tsc = -1; 4279 kvm_set_cr8(vcpu, 0); 4280 4281 if (!init_event) { 4282 apic_base_msr.data = APIC_DEFAULT_PHYS_BASE | 4283 MSR_IA32_APICBASE_ENABLE; 4284 if (kvm_vcpu_is_reset_bsp(vcpu)) 4285 apic_base_msr.data |= MSR_IA32_APICBASE_BSP; 4286 apic_base_msr.host_initiated = true; 4287 kvm_set_apic_base(vcpu, &apic_base_msr); 4288 } 4289 4290 vmx_segment_cache_clear(vmx); 4291 4292 seg_setup(VCPU_SREG_CS); 4293 vmcs_write16(GUEST_CS_SELECTOR, 0xf000); 4294 vmcs_writel(GUEST_CS_BASE, 0xffff0000ul); 4295 4296 seg_setup(VCPU_SREG_DS); 4297 seg_setup(VCPU_SREG_ES); 4298 seg_setup(VCPU_SREG_FS); 4299 seg_setup(VCPU_SREG_GS); 4300 seg_setup(VCPU_SREG_SS); 4301 4302 vmcs_write16(GUEST_TR_SELECTOR, 0); 4303 vmcs_writel(GUEST_TR_BASE, 0); 4304 vmcs_write32(GUEST_TR_LIMIT, 0xffff); 4305 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); 4306 4307 vmcs_write16(GUEST_LDTR_SELECTOR, 0); 4308 vmcs_writel(GUEST_LDTR_BASE, 0); 4309 vmcs_write32(GUEST_LDTR_LIMIT, 0xffff); 4310 vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082); 4311 4312 if (!init_event) { 4313 vmcs_write32(GUEST_SYSENTER_CS, 0); 4314 vmcs_writel(GUEST_SYSENTER_ESP, 0); 4315 vmcs_writel(GUEST_SYSENTER_EIP, 0); 4316 vmcs_write64(GUEST_IA32_DEBUGCTL, 0); 4317 } 4318 4319 kvm_set_rflags(vcpu, X86_EFLAGS_FIXED); 4320 kvm_rip_write(vcpu, 0xfff0); 4321 4322 vmcs_writel(GUEST_GDTR_BASE, 0); 4323 vmcs_write32(GUEST_GDTR_LIMIT, 0xffff); 4324 4325 vmcs_writel(GUEST_IDTR_BASE, 0); 4326 vmcs_write32(GUEST_IDTR_LIMIT, 0xffff); 4327 4328 vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE); 4329 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0); 4330 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, 0); 4331 if (kvm_mpx_supported()) 4332 vmcs_write64(GUEST_BNDCFGS, 0); 4333 4334 setup_msrs(vmx); 4335 4336 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */ 4337 4338 if (cpu_has_vmx_tpr_shadow() && !init_event) { 4339 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0); 4340 if (cpu_need_tpr_shadow(vcpu)) 4341 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 4342 __pa(vcpu->arch.apic->regs)); 4343 vmcs_write32(TPR_THRESHOLD, 0); 4344 } 4345 4346 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); 4347 4348 cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET; 4349 vmx->vcpu.arch.cr0 = cr0; 4350 vmx_set_cr0(vcpu, cr0); /* enter rmode */ 4351 vmx_set_cr4(vcpu, 0); 4352 vmx_set_efer(vcpu, 0); 4353 4354 update_exception_bitmap(vcpu); 4355 4356 vpid_sync_context(vmx->vpid); 4357 if (init_event) 4358 vmx_clear_hlt(vcpu); 4359 } 4360 4361 static void enable_irq_window(struct kvm_vcpu *vcpu) 4362 { 4363 exec_controls_setbit(to_vmx(vcpu), CPU_BASED_VIRTUAL_INTR_PENDING); 4364 } 4365 4366 static void enable_nmi_window(struct kvm_vcpu *vcpu) 4367 { 4368 if (!enable_vnmi || 4369 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) { 4370 enable_irq_window(vcpu); 4371 return; 4372 } 4373 4374 exec_controls_setbit(to_vmx(vcpu), CPU_BASED_VIRTUAL_NMI_PENDING); 4375 } 4376 4377 static void vmx_inject_irq(struct kvm_vcpu *vcpu) 4378 { 4379 struct vcpu_vmx *vmx = to_vmx(vcpu); 4380 uint32_t intr; 4381 int irq = vcpu->arch.interrupt.nr; 4382 4383 trace_kvm_inj_virq(irq); 4384 4385 ++vcpu->stat.irq_injections; 4386 if (vmx->rmode.vm86_active) { 4387 int inc_eip = 0; 4388 if (vcpu->arch.interrupt.soft) 4389 inc_eip = vcpu->arch.event_exit_inst_len; 4390 kvm_inject_realmode_interrupt(vcpu, irq, inc_eip); 4391 return; 4392 } 4393 intr = irq | INTR_INFO_VALID_MASK; 4394 if (vcpu->arch.interrupt.soft) { 4395 intr |= INTR_TYPE_SOFT_INTR; 4396 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 4397 vmx->vcpu.arch.event_exit_inst_len); 4398 } else 4399 intr |= INTR_TYPE_EXT_INTR; 4400 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr); 4401 4402 vmx_clear_hlt(vcpu); 4403 } 4404 4405 static void vmx_inject_nmi(struct kvm_vcpu *vcpu) 4406 { 4407 struct vcpu_vmx *vmx = to_vmx(vcpu); 4408 4409 if (!enable_vnmi) { 4410 /* 4411 * Tracking the NMI-blocked state in software is built upon 4412 * finding the next open IRQ window. This, in turn, depends on 4413 * well-behaving guests: They have to keep IRQs disabled at 4414 * least as long as the NMI handler runs. Otherwise we may 4415 * cause NMI nesting, maybe breaking the guest. But as this is 4416 * highly unlikely, we can live with the residual risk. 4417 */ 4418 vmx->loaded_vmcs->soft_vnmi_blocked = 1; 4419 vmx->loaded_vmcs->vnmi_blocked_time = 0; 4420 } 4421 4422 ++vcpu->stat.nmi_injections; 4423 vmx->loaded_vmcs->nmi_known_unmasked = false; 4424 4425 if (vmx->rmode.vm86_active) { 4426 kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0); 4427 return; 4428 } 4429 4430 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 4431 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR); 4432 4433 vmx_clear_hlt(vcpu); 4434 } 4435 4436 bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu) 4437 { 4438 struct vcpu_vmx *vmx = to_vmx(vcpu); 4439 bool masked; 4440 4441 if (!enable_vnmi) 4442 return vmx->loaded_vmcs->soft_vnmi_blocked; 4443 if (vmx->loaded_vmcs->nmi_known_unmasked) 4444 return false; 4445 masked = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI; 4446 vmx->loaded_vmcs->nmi_known_unmasked = !masked; 4447 return masked; 4448 } 4449 4450 void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) 4451 { 4452 struct vcpu_vmx *vmx = to_vmx(vcpu); 4453 4454 if (!enable_vnmi) { 4455 if (vmx->loaded_vmcs->soft_vnmi_blocked != masked) { 4456 vmx->loaded_vmcs->soft_vnmi_blocked = masked; 4457 vmx->loaded_vmcs->vnmi_blocked_time = 0; 4458 } 4459 } else { 4460 vmx->loaded_vmcs->nmi_known_unmasked = !masked; 4461 if (masked) 4462 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, 4463 GUEST_INTR_STATE_NMI); 4464 else 4465 vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, 4466 GUEST_INTR_STATE_NMI); 4467 } 4468 } 4469 4470 static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) 4471 { 4472 if (to_vmx(vcpu)->nested.nested_run_pending) 4473 return 0; 4474 4475 if (!enable_vnmi && 4476 to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked) 4477 return 0; 4478 4479 return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 4480 (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI 4481 | GUEST_INTR_STATE_NMI)); 4482 } 4483 4484 static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu) 4485 { 4486 return (!to_vmx(vcpu)->nested.nested_run_pending && 4487 vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && 4488 !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 4489 (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS)); 4490 } 4491 4492 static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) 4493 { 4494 int ret; 4495 4496 if (enable_unrestricted_guest) 4497 return 0; 4498 4499 ret = x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, addr, 4500 PAGE_SIZE * 3); 4501 if (ret) 4502 return ret; 4503 to_kvm_vmx(kvm)->tss_addr = addr; 4504 return init_rmode_tss(kvm); 4505 } 4506 4507 static int vmx_set_identity_map_addr(struct kvm *kvm, u64 ident_addr) 4508 { 4509 to_kvm_vmx(kvm)->ept_identity_map_addr = ident_addr; 4510 return 0; 4511 } 4512 4513 static bool rmode_exception(struct kvm_vcpu *vcpu, int vec) 4514 { 4515 switch (vec) { 4516 case BP_VECTOR: 4517 /* 4518 * Update instruction length as we may reinject the exception 4519 * from user space while in guest debugging mode. 4520 */ 4521 to_vmx(vcpu)->vcpu.arch.event_exit_inst_len = 4522 vmcs_read32(VM_EXIT_INSTRUCTION_LEN); 4523 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) 4524 return false; 4525 /* fall through */ 4526 case DB_VECTOR: 4527 if (vcpu->guest_debug & 4528 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) 4529 return false; 4530 /* fall through */ 4531 case DE_VECTOR: 4532 case OF_VECTOR: 4533 case BR_VECTOR: 4534 case UD_VECTOR: 4535 case DF_VECTOR: 4536 case SS_VECTOR: 4537 case GP_VECTOR: 4538 case MF_VECTOR: 4539 return true; 4540 break; 4541 } 4542 return false; 4543 } 4544 4545 static int handle_rmode_exception(struct kvm_vcpu *vcpu, 4546 int vec, u32 err_code) 4547 { 4548 /* 4549 * Instruction with address size override prefix opcode 0x67 4550 * Cause the #SS fault with 0 error code in VM86 mode. 4551 */ 4552 if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) { 4553 if (kvm_emulate_instruction(vcpu, 0)) { 4554 if (vcpu->arch.halt_request) { 4555 vcpu->arch.halt_request = 0; 4556 return kvm_vcpu_halt(vcpu); 4557 } 4558 return 1; 4559 } 4560 return 0; 4561 } 4562 4563 /* 4564 * Forward all other exceptions that are valid in real mode. 4565 * FIXME: Breaks guest debugging in real mode, needs to be fixed with 4566 * the required debugging infrastructure rework. 4567 */ 4568 kvm_queue_exception(vcpu, vec); 4569 return 1; 4570 } 4571 4572 /* 4573 * Trigger machine check on the host. We assume all the MSRs are already set up 4574 * by the CPU and that we still run on the same CPU as the MCE occurred on. 4575 * We pass a fake environment to the machine check handler because we want 4576 * the guest to be always treated like user space, no matter what context 4577 * it used internally. 4578 */ 4579 static void kvm_machine_check(void) 4580 { 4581 #if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64) 4582 struct pt_regs regs = { 4583 .cs = 3, /* Fake ring 3 no matter what the guest ran on */ 4584 .flags = X86_EFLAGS_IF, 4585 }; 4586 4587 do_machine_check(®s, 0); 4588 #endif 4589 } 4590 4591 static int handle_machine_check(struct kvm_vcpu *vcpu) 4592 { 4593 /* handled by vmx_vcpu_run() */ 4594 return 1; 4595 } 4596 4597 static int handle_exception_nmi(struct kvm_vcpu *vcpu) 4598 { 4599 struct vcpu_vmx *vmx = to_vmx(vcpu); 4600 struct kvm_run *kvm_run = vcpu->run; 4601 u32 intr_info, ex_no, error_code; 4602 unsigned long cr2, rip, dr6; 4603 u32 vect_info; 4604 4605 vect_info = vmx->idt_vectoring_info; 4606 intr_info = vmx->exit_intr_info; 4607 4608 if (is_machine_check(intr_info) || is_nmi(intr_info)) 4609 return 1; /* handled by handle_exception_nmi_irqoff() */ 4610 4611 if (is_invalid_opcode(intr_info)) 4612 return handle_ud(vcpu); 4613 4614 error_code = 0; 4615 if (intr_info & INTR_INFO_DELIVER_CODE_MASK) 4616 error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE); 4617 4618 if (!vmx->rmode.vm86_active && is_gp_fault(intr_info)) { 4619 WARN_ON_ONCE(!enable_vmware_backdoor); 4620 4621 /* 4622 * VMware backdoor emulation on #GP interception only handles 4623 * IN{S}, OUT{S}, and RDPMC, none of which generate a non-zero 4624 * error code on #GP. 4625 */ 4626 if (error_code) { 4627 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); 4628 return 1; 4629 } 4630 return kvm_emulate_instruction(vcpu, EMULTYPE_VMWARE_GP); 4631 } 4632 4633 /* 4634 * The #PF with PFEC.RSVD = 1 indicates the guest is accessing 4635 * MMIO, it is better to report an internal error. 4636 * See the comments in vmx_handle_exit. 4637 */ 4638 if ((vect_info & VECTORING_INFO_VALID_MASK) && 4639 !(is_page_fault(intr_info) && !(error_code & PFERR_RSVD_MASK))) { 4640 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 4641 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX; 4642 vcpu->run->internal.ndata = 3; 4643 vcpu->run->internal.data[0] = vect_info; 4644 vcpu->run->internal.data[1] = intr_info; 4645 vcpu->run->internal.data[2] = error_code; 4646 return 0; 4647 } 4648 4649 if (is_page_fault(intr_info)) { 4650 cr2 = vmcs_readl(EXIT_QUALIFICATION); 4651 /* EPT won't cause page fault directly */ 4652 WARN_ON_ONCE(!vcpu->arch.apf.host_apf_reason && enable_ept); 4653 return kvm_handle_page_fault(vcpu, error_code, cr2, NULL, 0); 4654 } 4655 4656 ex_no = intr_info & INTR_INFO_VECTOR_MASK; 4657 4658 if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no)) 4659 return handle_rmode_exception(vcpu, ex_no, error_code); 4660 4661 switch (ex_no) { 4662 case AC_VECTOR: 4663 kvm_queue_exception_e(vcpu, AC_VECTOR, error_code); 4664 return 1; 4665 case DB_VECTOR: 4666 dr6 = vmcs_readl(EXIT_QUALIFICATION); 4667 if (!(vcpu->guest_debug & 4668 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) { 4669 vcpu->arch.dr6 &= ~DR_TRAP_BITS; 4670 vcpu->arch.dr6 |= dr6 | DR6_RTM; 4671 if (is_icebp(intr_info)) 4672 WARN_ON(!skip_emulated_instruction(vcpu)); 4673 4674 kvm_queue_exception(vcpu, DB_VECTOR); 4675 return 1; 4676 } 4677 kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1; 4678 kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7); 4679 /* fall through */ 4680 case BP_VECTOR: 4681 /* 4682 * Update instruction length as we may reinject #BP from 4683 * user space while in guest debugging mode. Reading it for 4684 * #DB as well causes no harm, it is not used in that case. 4685 */ 4686 vmx->vcpu.arch.event_exit_inst_len = 4687 vmcs_read32(VM_EXIT_INSTRUCTION_LEN); 4688 kvm_run->exit_reason = KVM_EXIT_DEBUG; 4689 rip = kvm_rip_read(vcpu); 4690 kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip; 4691 kvm_run->debug.arch.exception = ex_no; 4692 break; 4693 default: 4694 kvm_run->exit_reason = KVM_EXIT_EXCEPTION; 4695 kvm_run->ex.exception = ex_no; 4696 kvm_run->ex.error_code = error_code; 4697 break; 4698 } 4699 return 0; 4700 } 4701 4702 static __always_inline int handle_external_interrupt(struct kvm_vcpu *vcpu) 4703 { 4704 ++vcpu->stat.irq_exits; 4705 return 1; 4706 } 4707 4708 static int handle_triple_fault(struct kvm_vcpu *vcpu) 4709 { 4710 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; 4711 vcpu->mmio_needed = 0; 4712 return 0; 4713 } 4714 4715 static int handle_io(struct kvm_vcpu *vcpu) 4716 { 4717 unsigned long exit_qualification; 4718 int size, in, string; 4719 unsigned port; 4720 4721 exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 4722 string = (exit_qualification & 16) != 0; 4723 4724 ++vcpu->stat.io_exits; 4725 4726 if (string) 4727 return kvm_emulate_instruction(vcpu, 0); 4728 4729 port = exit_qualification >> 16; 4730 size = (exit_qualification & 7) + 1; 4731 in = (exit_qualification & 8) != 0; 4732 4733 return kvm_fast_pio(vcpu, size, port, in); 4734 } 4735 4736 static void 4737 vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) 4738 { 4739 /* 4740 * Patch in the VMCALL instruction: 4741 */ 4742 hypercall[0] = 0x0f; 4743 hypercall[1] = 0x01; 4744 hypercall[2] = 0xc1; 4745 } 4746 4747 /* called to set cr0 as appropriate for a mov-to-cr0 exit. */ 4748 static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val) 4749 { 4750 if (is_guest_mode(vcpu)) { 4751 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 4752 unsigned long orig_val = val; 4753 4754 /* 4755 * We get here when L2 changed cr0 in a way that did not change 4756 * any of L1's shadowed bits (see nested_vmx_exit_handled_cr), 4757 * but did change L0 shadowed bits. So we first calculate the 4758 * effective cr0 value that L1 would like to write into the 4759 * hardware. It consists of the L2-owned bits from the new 4760 * value combined with the L1-owned bits from L1's guest_cr0. 4761 */ 4762 val = (val & ~vmcs12->cr0_guest_host_mask) | 4763 (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask); 4764 4765 if (!nested_guest_cr0_valid(vcpu, val)) 4766 return 1; 4767 4768 if (kvm_set_cr0(vcpu, val)) 4769 return 1; 4770 vmcs_writel(CR0_READ_SHADOW, orig_val); 4771 return 0; 4772 } else { 4773 if (to_vmx(vcpu)->nested.vmxon && 4774 !nested_host_cr0_valid(vcpu, val)) 4775 return 1; 4776 4777 return kvm_set_cr0(vcpu, val); 4778 } 4779 } 4780 4781 static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val) 4782 { 4783 if (is_guest_mode(vcpu)) { 4784 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 4785 unsigned long orig_val = val; 4786 4787 /* analogously to handle_set_cr0 */ 4788 val = (val & ~vmcs12->cr4_guest_host_mask) | 4789 (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask); 4790 if (kvm_set_cr4(vcpu, val)) 4791 return 1; 4792 vmcs_writel(CR4_READ_SHADOW, orig_val); 4793 return 0; 4794 } else 4795 return kvm_set_cr4(vcpu, val); 4796 } 4797 4798 static int handle_desc(struct kvm_vcpu *vcpu) 4799 { 4800 WARN_ON(!(vcpu->arch.cr4 & X86_CR4_UMIP)); 4801 return kvm_emulate_instruction(vcpu, 0); 4802 } 4803 4804 static int handle_cr(struct kvm_vcpu *vcpu) 4805 { 4806 unsigned long exit_qualification, val; 4807 int cr; 4808 int reg; 4809 int err; 4810 int ret; 4811 4812 exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 4813 cr = exit_qualification & 15; 4814 reg = (exit_qualification >> 8) & 15; 4815 switch ((exit_qualification >> 4) & 3) { 4816 case 0: /* mov to cr */ 4817 val = kvm_register_readl(vcpu, reg); 4818 trace_kvm_cr_write(cr, val); 4819 switch (cr) { 4820 case 0: 4821 err = handle_set_cr0(vcpu, val); 4822 return kvm_complete_insn_gp(vcpu, err); 4823 case 3: 4824 WARN_ON_ONCE(enable_unrestricted_guest); 4825 err = kvm_set_cr3(vcpu, val); 4826 return kvm_complete_insn_gp(vcpu, err); 4827 case 4: 4828 err = handle_set_cr4(vcpu, val); 4829 return kvm_complete_insn_gp(vcpu, err); 4830 case 8: { 4831 u8 cr8_prev = kvm_get_cr8(vcpu); 4832 u8 cr8 = (u8)val; 4833 err = kvm_set_cr8(vcpu, cr8); 4834 ret = kvm_complete_insn_gp(vcpu, err); 4835 if (lapic_in_kernel(vcpu)) 4836 return ret; 4837 if (cr8_prev <= cr8) 4838 return ret; 4839 /* 4840 * TODO: we might be squashing a 4841 * KVM_GUESTDBG_SINGLESTEP-triggered 4842 * KVM_EXIT_DEBUG here. 4843 */ 4844 vcpu->run->exit_reason = KVM_EXIT_SET_TPR; 4845 return 0; 4846 } 4847 } 4848 break; 4849 case 2: /* clts */ 4850 WARN_ONCE(1, "Guest should always own CR0.TS"); 4851 vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS)); 4852 trace_kvm_cr_write(0, kvm_read_cr0(vcpu)); 4853 return kvm_skip_emulated_instruction(vcpu); 4854 case 1: /*mov from cr*/ 4855 switch (cr) { 4856 case 3: 4857 WARN_ON_ONCE(enable_unrestricted_guest); 4858 val = kvm_read_cr3(vcpu); 4859 kvm_register_write(vcpu, reg, val); 4860 trace_kvm_cr_read(cr, val); 4861 return kvm_skip_emulated_instruction(vcpu); 4862 case 8: 4863 val = kvm_get_cr8(vcpu); 4864 kvm_register_write(vcpu, reg, val); 4865 trace_kvm_cr_read(cr, val); 4866 return kvm_skip_emulated_instruction(vcpu); 4867 } 4868 break; 4869 case 3: /* lmsw */ 4870 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f; 4871 trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val); 4872 kvm_lmsw(vcpu, val); 4873 4874 return kvm_skip_emulated_instruction(vcpu); 4875 default: 4876 break; 4877 } 4878 vcpu->run->exit_reason = 0; 4879 vcpu_unimpl(vcpu, "unhandled control register: op %d cr %d\n", 4880 (int)(exit_qualification >> 4) & 3, cr); 4881 return 0; 4882 } 4883 4884 static int handle_dr(struct kvm_vcpu *vcpu) 4885 { 4886 unsigned long exit_qualification; 4887 int dr, dr7, reg; 4888 4889 exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 4890 dr = exit_qualification & DEBUG_REG_ACCESS_NUM; 4891 4892 /* First, if DR does not exist, trigger UD */ 4893 if (!kvm_require_dr(vcpu, dr)) 4894 return 1; 4895 4896 /* Do not handle if the CPL > 0, will trigger GP on re-entry */ 4897 if (!kvm_require_cpl(vcpu, 0)) 4898 return 1; 4899 dr7 = vmcs_readl(GUEST_DR7); 4900 if (dr7 & DR7_GD) { 4901 /* 4902 * As the vm-exit takes precedence over the debug trap, we 4903 * need to emulate the latter, either for the host or the 4904 * guest debugging itself. 4905 */ 4906 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { 4907 vcpu->run->debug.arch.dr6 = vcpu->arch.dr6; 4908 vcpu->run->debug.arch.dr7 = dr7; 4909 vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu); 4910 vcpu->run->debug.arch.exception = DB_VECTOR; 4911 vcpu->run->exit_reason = KVM_EXIT_DEBUG; 4912 return 0; 4913 } else { 4914 vcpu->arch.dr6 &= ~DR_TRAP_BITS; 4915 vcpu->arch.dr6 |= DR6_BD | DR6_RTM; 4916 kvm_queue_exception(vcpu, DB_VECTOR); 4917 return 1; 4918 } 4919 } 4920 4921 if (vcpu->guest_debug == 0) { 4922 exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_MOV_DR_EXITING); 4923 4924 /* 4925 * No more DR vmexits; force a reload of the debug registers 4926 * and reenter on this instruction. The next vmexit will 4927 * retrieve the full state of the debug registers. 4928 */ 4929 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT; 4930 return 1; 4931 } 4932 4933 reg = DEBUG_REG_ACCESS_REG(exit_qualification); 4934 if (exit_qualification & TYPE_MOV_FROM_DR) { 4935 unsigned long val; 4936 4937 if (kvm_get_dr(vcpu, dr, &val)) 4938 return 1; 4939 kvm_register_write(vcpu, reg, val); 4940 } else 4941 if (kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg))) 4942 return 1; 4943 4944 return kvm_skip_emulated_instruction(vcpu); 4945 } 4946 4947 static u64 vmx_get_dr6(struct kvm_vcpu *vcpu) 4948 { 4949 return vcpu->arch.dr6; 4950 } 4951 4952 static void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val) 4953 { 4954 } 4955 4956 static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu) 4957 { 4958 get_debugreg(vcpu->arch.db[0], 0); 4959 get_debugreg(vcpu->arch.db[1], 1); 4960 get_debugreg(vcpu->arch.db[2], 2); 4961 get_debugreg(vcpu->arch.db[3], 3); 4962 get_debugreg(vcpu->arch.dr6, 6); 4963 vcpu->arch.dr7 = vmcs_readl(GUEST_DR7); 4964 4965 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT; 4966 exec_controls_setbit(to_vmx(vcpu), CPU_BASED_MOV_DR_EXITING); 4967 } 4968 4969 static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val) 4970 { 4971 vmcs_writel(GUEST_DR7, val); 4972 } 4973 4974 static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu) 4975 { 4976 kvm_apic_update_ppr(vcpu); 4977 return 1; 4978 } 4979 4980 static int handle_interrupt_window(struct kvm_vcpu *vcpu) 4981 { 4982 exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_VIRTUAL_INTR_PENDING); 4983 4984 kvm_make_request(KVM_REQ_EVENT, vcpu); 4985 4986 ++vcpu->stat.irq_window_exits; 4987 return 1; 4988 } 4989 4990 static int handle_vmcall(struct kvm_vcpu *vcpu) 4991 { 4992 return kvm_emulate_hypercall(vcpu); 4993 } 4994 4995 static int handle_invd(struct kvm_vcpu *vcpu) 4996 { 4997 return kvm_emulate_instruction(vcpu, 0); 4998 } 4999 5000 static int handle_invlpg(struct kvm_vcpu *vcpu) 5001 { 5002 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 5003 5004 kvm_mmu_invlpg(vcpu, exit_qualification); 5005 return kvm_skip_emulated_instruction(vcpu); 5006 } 5007 5008 static int handle_rdpmc(struct kvm_vcpu *vcpu) 5009 { 5010 int err; 5011 5012 err = kvm_rdpmc(vcpu); 5013 return kvm_complete_insn_gp(vcpu, err); 5014 } 5015 5016 static int handle_wbinvd(struct kvm_vcpu *vcpu) 5017 { 5018 return kvm_emulate_wbinvd(vcpu); 5019 } 5020 5021 static int handle_xsetbv(struct kvm_vcpu *vcpu) 5022 { 5023 u64 new_bv = kvm_read_edx_eax(vcpu); 5024 u32 index = kvm_rcx_read(vcpu); 5025 5026 if (kvm_set_xcr(vcpu, index, new_bv) == 0) 5027 return kvm_skip_emulated_instruction(vcpu); 5028 return 1; 5029 } 5030 5031 static int handle_apic_access(struct kvm_vcpu *vcpu) 5032 { 5033 if (likely(fasteoi)) { 5034 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 5035 int access_type, offset; 5036 5037 access_type = exit_qualification & APIC_ACCESS_TYPE; 5038 offset = exit_qualification & APIC_ACCESS_OFFSET; 5039 /* 5040 * Sane guest uses MOV to write EOI, with written value 5041 * not cared. So make a short-circuit here by avoiding 5042 * heavy instruction emulation. 5043 */ 5044 if ((access_type == TYPE_LINEAR_APIC_INST_WRITE) && 5045 (offset == APIC_EOI)) { 5046 kvm_lapic_set_eoi(vcpu); 5047 return kvm_skip_emulated_instruction(vcpu); 5048 } 5049 } 5050 return kvm_emulate_instruction(vcpu, 0); 5051 } 5052 5053 static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu) 5054 { 5055 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 5056 int vector = exit_qualification & 0xff; 5057 5058 /* EOI-induced VM exit is trap-like and thus no need to adjust IP */ 5059 kvm_apic_set_eoi_accelerated(vcpu, vector); 5060 return 1; 5061 } 5062 5063 static int handle_apic_write(struct kvm_vcpu *vcpu) 5064 { 5065 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 5066 u32 offset = exit_qualification & 0xfff; 5067 5068 /* APIC-write VM exit is trap-like and thus no need to adjust IP */ 5069 kvm_apic_write_nodecode(vcpu, offset); 5070 return 1; 5071 } 5072 5073 static int handle_task_switch(struct kvm_vcpu *vcpu) 5074 { 5075 struct vcpu_vmx *vmx = to_vmx(vcpu); 5076 unsigned long exit_qualification; 5077 bool has_error_code = false; 5078 u32 error_code = 0; 5079 u16 tss_selector; 5080 int reason, type, idt_v, idt_index; 5081 5082 idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK); 5083 idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK); 5084 type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK); 5085 5086 exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 5087 5088 reason = (u32)exit_qualification >> 30; 5089 if (reason == TASK_SWITCH_GATE && idt_v) { 5090 switch (type) { 5091 case INTR_TYPE_NMI_INTR: 5092 vcpu->arch.nmi_injected = false; 5093 vmx_set_nmi_mask(vcpu, true); 5094 break; 5095 case INTR_TYPE_EXT_INTR: 5096 case INTR_TYPE_SOFT_INTR: 5097 kvm_clear_interrupt_queue(vcpu); 5098 break; 5099 case INTR_TYPE_HARD_EXCEPTION: 5100 if (vmx->idt_vectoring_info & 5101 VECTORING_INFO_DELIVER_CODE_MASK) { 5102 has_error_code = true; 5103 error_code = 5104 vmcs_read32(IDT_VECTORING_ERROR_CODE); 5105 } 5106 /* fall through */ 5107 case INTR_TYPE_SOFT_EXCEPTION: 5108 kvm_clear_exception_queue(vcpu); 5109 break; 5110 default: 5111 break; 5112 } 5113 } 5114 tss_selector = exit_qualification; 5115 5116 if (!idt_v || (type != INTR_TYPE_HARD_EXCEPTION && 5117 type != INTR_TYPE_EXT_INTR && 5118 type != INTR_TYPE_NMI_INTR)) 5119 WARN_ON(!skip_emulated_instruction(vcpu)); 5120 5121 /* 5122 * TODO: What about debug traps on tss switch? 5123 * Are we supposed to inject them and update dr6? 5124 */ 5125 return kvm_task_switch(vcpu, tss_selector, 5126 type == INTR_TYPE_SOFT_INTR ? idt_index : -1, 5127 reason, has_error_code, error_code); 5128 } 5129 5130 static int handle_ept_violation(struct kvm_vcpu *vcpu) 5131 { 5132 unsigned long exit_qualification; 5133 gpa_t gpa; 5134 u64 error_code; 5135 5136 exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 5137 5138 /* 5139 * EPT violation happened while executing iret from NMI, 5140 * "blocked by NMI" bit has to be set before next VM entry. 5141 * There are errata that may cause this bit to not be set: 5142 * AAK134, BY25. 5143 */ 5144 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && 5145 enable_vnmi && 5146 (exit_qualification & INTR_INFO_UNBLOCK_NMI)) 5147 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); 5148 5149 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); 5150 trace_kvm_page_fault(gpa, exit_qualification); 5151 5152 /* Is it a read fault? */ 5153 error_code = (exit_qualification & EPT_VIOLATION_ACC_READ) 5154 ? PFERR_USER_MASK : 0; 5155 /* Is it a write fault? */ 5156 error_code |= (exit_qualification & EPT_VIOLATION_ACC_WRITE) 5157 ? PFERR_WRITE_MASK : 0; 5158 /* Is it a fetch fault? */ 5159 error_code |= (exit_qualification & EPT_VIOLATION_ACC_INSTR) 5160 ? PFERR_FETCH_MASK : 0; 5161 /* ept page table entry is present? */ 5162 error_code |= (exit_qualification & 5163 (EPT_VIOLATION_READABLE | EPT_VIOLATION_WRITABLE | 5164 EPT_VIOLATION_EXECUTABLE)) 5165 ? PFERR_PRESENT_MASK : 0; 5166 5167 error_code |= (exit_qualification & 0x100) != 0 ? 5168 PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK; 5169 5170 vcpu->arch.exit_qualification = exit_qualification; 5171 return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0); 5172 } 5173 5174 static int handle_ept_misconfig(struct kvm_vcpu *vcpu) 5175 { 5176 gpa_t gpa; 5177 5178 /* 5179 * A nested guest cannot optimize MMIO vmexits, because we have an 5180 * nGPA here instead of the required GPA. 5181 */ 5182 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); 5183 if (!is_guest_mode(vcpu) && 5184 !kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) { 5185 trace_kvm_fast_mmio(gpa); 5186 return kvm_skip_emulated_instruction(vcpu); 5187 } 5188 5189 return kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0); 5190 } 5191 5192 static int handle_nmi_window(struct kvm_vcpu *vcpu) 5193 { 5194 WARN_ON_ONCE(!enable_vnmi); 5195 exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_VIRTUAL_NMI_PENDING); 5196 ++vcpu->stat.nmi_window_exits; 5197 kvm_make_request(KVM_REQ_EVENT, vcpu); 5198 5199 return 1; 5200 } 5201 5202 static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) 5203 { 5204 struct vcpu_vmx *vmx = to_vmx(vcpu); 5205 bool intr_window_requested; 5206 unsigned count = 130; 5207 5208 /* 5209 * We should never reach the point where we are emulating L2 5210 * due to invalid guest state as that means we incorrectly 5211 * allowed a nested VMEntry with an invalid vmcs12. 5212 */ 5213 WARN_ON_ONCE(vmx->emulation_required && vmx->nested.nested_run_pending); 5214 5215 intr_window_requested = exec_controls_get(vmx) & 5216 CPU_BASED_VIRTUAL_INTR_PENDING; 5217 5218 while (vmx->emulation_required && count-- != 0) { 5219 if (intr_window_requested && vmx_interrupt_allowed(vcpu)) 5220 return handle_interrupt_window(&vmx->vcpu); 5221 5222 if (kvm_test_request(KVM_REQ_EVENT, vcpu)) 5223 return 1; 5224 5225 if (!kvm_emulate_instruction(vcpu, 0)) 5226 return 0; 5227 5228 if (vmx->emulation_required && !vmx->rmode.vm86_active && 5229 vcpu->arch.exception.pending) { 5230 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 5231 vcpu->run->internal.suberror = 5232 KVM_INTERNAL_ERROR_EMULATION; 5233 vcpu->run->internal.ndata = 0; 5234 return 0; 5235 } 5236 5237 if (vcpu->arch.halt_request) { 5238 vcpu->arch.halt_request = 0; 5239 return kvm_vcpu_halt(vcpu); 5240 } 5241 5242 /* 5243 * Note, return 1 and not 0, vcpu_run() is responsible for 5244 * morphing the pending signal into the proper return code. 5245 */ 5246 if (signal_pending(current)) 5247 return 1; 5248 5249 if (need_resched()) 5250 schedule(); 5251 } 5252 5253 return 1; 5254 } 5255 5256 static void grow_ple_window(struct kvm_vcpu *vcpu) 5257 { 5258 struct vcpu_vmx *vmx = to_vmx(vcpu); 5259 unsigned int old = vmx->ple_window; 5260 5261 vmx->ple_window = __grow_ple_window(old, ple_window, 5262 ple_window_grow, 5263 ple_window_max); 5264 5265 if (vmx->ple_window != old) { 5266 vmx->ple_window_dirty = true; 5267 trace_kvm_ple_window_update(vcpu->vcpu_id, 5268 vmx->ple_window, old); 5269 } 5270 } 5271 5272 static void shrink_ple_window(struct kvm_vcpu *vcpu) 5273 { 5274 struct vcpu_vmx *vmx = to_vmx(vcpu); 5275 unsigned int old = vmx->ple_window; 5276 5277 vmx->ple_window = __shrink_ple_window(old, ple_window, 5278 ple_window_shrink, 5279 ple_window); 5280 5281 if (vmx->ple_window != old) { 5282 vmx->ple_window_dirty = true; 5283 trace_kvm_ple_window_update(vcpu->vcpu_id, 5284 vmx->ple_window, old); 5285 } 5286 } 5287 5288 /* 5289 * Handler for POSTED_INTERRUPT_WAKEUP_VECTOR. 5290 */ 5291 static void wakeup_handler(void) 5292 { 5293 struct kvm_vcpu *vcpu; 5294 int cpu = smp_processor_id(); 5295 5296 spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); 5297 list_for_each_entry(vcpu, &per_cpu(blocked_vcpu_on_cpu, cpu), 5298 blocked_vcpu_list) { 5299 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); 5300 5301 if (pi_test_on(pi_desc) == 1) 5302 kvm_vcpu_kick(vcpu); 5303 } 5304 spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); 5305 } 5306 5307 static void vmx_enable_tdp(void) 5308 { 5309 kvm_mmu_set_mask_ptes(VMX_EPT_READABLE_MASK, 5310 enable_ept_ad_bits ? VMX_EPT_ACCESS_BIT : 0ull, 5311 enable_ept_ad_bits ? VMX_EPT_DIRTY_BIT : 0ull, 5312 0ull, VMX_EPT_EXECUTABLE_MASK, 5313 cpu_has_vmx_ept_execute_only() ? 0ull : VMX_EPT_READABLE_MASK, 5314 VMX_EPT_RWX_MASK, 0ull); 5315 5316 ept_set_mmio_spte_mask(); 5317 kvm_enable_tdp(); 5318 } 5319 5320 /* 5321 * Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE 5322 * exiting, so only get here on cpu with PAUSE-Loop-Exiting. 5323 */ 5324 static int handle_pause(struct kvm_vcpu *vcpu) 5325 { 5326 if (!kvm_pause_in_guest(vcpu->kvm)) 5327 grow_ple_window(vcpu); 5328 5329 /* 5330 * Intel sdm vol3 ch-25.1.3 says: The "PAUSE-loop exiting" 5331 * VM-execution control is ignored if CPL > 0. OTOH, KVM 5332 * never set PAUSE_EXITING and just set PLE if supported, 5333 * so the vcpu must be CPL=0 if it gets a PAUSE exit. 5334 */ 5335 kvm_vcpu_on_spin(vcpu, true); 5336 return kvm_skip_emulated_instruction(vcpu); 5337 } 5338 5339 static int handle_nop(struct kvm_vcpu *vcpu) 5340 { 5341 return kvm_skip_emulated_instruction(vcpu); 5342 } 5343 5344 static int handle_mwait(struct kvm_vcpu *vcpu) 5345 { 5346 printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n"); 5347 return handle_nop(vcpu); 5348 } 5349 5350 static int handle_invalid_op(struct kvm_vcpu *vcpu) 5351 { 5352 kvm_queue_exception(vcpu, UD_VECTOR); 5353 return 1; 5354 } 5355 5356 static int handle_monitor_trap(struct kvm_vcpu *vcpu) 5357 { 5358 return 1; 5359 } 5360 5361 static int handle_monitor(struct kvm_vcpu *vcpu) 5362 { 5363 printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n"); 5364 return handle_nop(vcpu); 5365 } 5366 5367 static int handle_invpcid(struct kvm_vcpu *vcpu) 5368 { 5369 u32 vmx_instruction_info; 5370 unsigned long type; 5371 bool pcid_enabled; 5372 gva_t gva; 5373 struct x86_exception e; 5374 unsigned i; 5375 unsigned long roots_to_free = 0; 5376 struct { 5377 u64 pcid; 5378 u64 gla; 5379 } operand; 5380 5381 if (!guest_cpuid_has(vcpu, X86_FEATURE_INVPCID)) { 5382 kvm_queue_exception(vcpu, UD_VECTOR); 5383 return 1; 5384 } 5385 5386 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 5387 type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); 5388 5389 if (type > 3) { 5390 kvm_inject_gp(vcpu, 0); 5391 return 1; 5392 } 5393 5394 /* According to the Intel instruction reference, the memory operand 5395 * is read even if it isn't needed (e.g., for type==all) 5396 */ 5397 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), 5398 vmx_instruction_info, false, 5399 sizeof(operand), &gva)) 5400 return 1; 5401 5402 if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) { 5403 kvm_inject_page_fault(vcpu, &e); 5404 return 1; 5405 } 5406 5407 if (operand.pcid >> 12 != 0) { 5408 kvm_inject_gp(vcpu, 0); 5409 return 1; 5410 } 5411 5412 pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE); 5413 5414 switch (type) { 5415 case INVPCID_TYPE_INDIV_ADDR: 5416 if ((!pcid_enabled && (operand.pcid != 0)) || 5417 is_noncanonical_address(operand.gla, vcpu)) { 5418 kvm_inject_gp(vcpu, 0); 5419 return 1; 5420 } 5421 kvm_mmu_invpcid_gva(vcpu, operand.gla, operand.pcid); 5422 return kvm_skip_emulated_instruction(vcpu); 5423 5424 case INVPCID_TYPE_SINGLE_CTXT: 5425 if (!pcid_enabled && (operand.pcid != 0)) { 5426 kvm_inject_gp(vcpu, 0); 5427 return 1; 5428 } 5429 5430 if (kvm_get_active_pcid(vcpu) == operand.pcid) { 5431 kvm_mmu_sync_roots(vcpu); 5432 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 5433 } 5434 5435 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) 5436 if (kvm_get_pcid(vcpu, vcpu->arch.mmu->prev_roots[i].cr3) 5437 == operand.pcid) 5438 roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i); 5439 5440 kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, roots_to_free); 5441 /* 5442 * If neither the current cr3 nor any of the prev_roots use the 5443 * given PCID, then nothing needs to be done here because a 5444 * resync will happen anyway before switching to any other CR3. 5445 */ 5446 5447 return kvm_skip_emulated_instruction(vcpu); 5448 5449 case INVPCID_TYPE_ALL_NON_GLOBAL: 5450 /* 5451 * Currently, KVM doesn't mark global entries in the shadow 5452 * page tables, so a non-global flush just degenerates to a 5453 * global flush. If needed, we could optimize this later by 5454 * keeping track of global entries in shadow page tables. 5455 */ 5456 5457 /* fall-through */ 5458 case INVPCID_TYPE_ALL_INCL_GLOBAL: 5459 kvm_mmu_unload(vcpu); 5460 return kvm_skip_emulated_instruction(vcpu); 5461 5462 default: 5463 BUG(); /* We have already checked above that type <= 3 */ 5464 } 5465 } 5466 5467 static int handle_pml_full(struct kvm_vcpu *vcpu) 5468 { 5469 unsigned long exit_qualification; 5470 5471 trace_kvm_pml_full(vcpu->vcpu_id); 5472 5473 exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 5474 5475 /* 5476 * PML buffer FULL happened while executing iret from NMI, 5477 * "blocked by NMI" bit has to be set before next VM entry. 5478 */ 5479 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && 5480 enable_vnmi && 5481 (exit_qualification & INTR_INFO_UNBLOCK_NMI)) 5482 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, 5483 GUEST_INTR_STATE_NMI); 5484 5485 /* 5486 * PML buffer already flushed at beginning of VMEXIT. Nothing to do 5487 * here.., and there's no userspace involvement needed for PML. 5488 */ 5489 return 1; 5490 } 5491 5492 static int handle_preemption_timer(struct kvm_vcpu *vcpu) 5493 { 5494 struct vcpu_vmx *vmx = to_vmx(vcpu); 5495 5496 if (!vmx->req_immediate_exit && 5497 !unlikely(vmx->loaded_vmcs->hv_timer_soft_disabled)) 5498 kvm_lapic_expired_hv_timer(vcpu); 5499 5500 return 1; 5501 } 5502 5503 /* 5504 * When nested=0, all VMX instruction VM Exits filter here. The handlers 5505 * are overwritten by nested_vmx_setup() when nested=1. 5506 */ 5507 static int handle_vmx_instruction(struct kvm_vcpu *vcpu) 5508 { 5509 kvm_queue_exception(vcpu, UD_VECTOR); 5510 return 1; 5511 } 5512 5513 static int handle_encls(struct kvm_vcpu *vcpu) 5514 { 5515 /* 5516 * SGX virtualization is not yet supported. There is no software 5517 * enable bit for SGX, so we have to trap ENCLS and inject a #UD 5518 * to prevent the guest from executing ENCLS. 5519 */ 5520 kvm_queue_exception(vcpu, UD_VECTOR); 5521 return 1; 5522 } 5523 5524 /* 5525 * The exit handlers return 1 if the exit was handled fully and guest execution 5526 * may resume. Otherwise they set the kvm_run parameter to indicate what needs 5527 * to be done to userspace and return 0. 5528 */ 5529 static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { 5530 [EXIT_REASON_EXCEPTION_NMI] = handle_exception_nmi, 5531 [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, 5532 [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault, 5533 [EXIT_REASON_NMI_WINDOW] = handle_nmi_window, 5534 [EXIT_REASON_IO_INSTRUCTION] = handle_io, 5535 [EXIT_REASON_CR_ACCESS] = handle_cr, 5536 [EXIT_REASON_DR_ACCESS] = handle_dr, 5537 [EXIT_REASON_CPUID] = kvm_emulate_cpuid, 5538 [EXIT_REASON_MSR_READ] = kvm_emulate_rdmsr, 5539 [EXIT_REASON_MSR_WRITE] = kvm_emulate_wrmsr, 5540 [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window, 5541 [EXIT_REASON_HLT] = kvm_emulate_halt, 5542 [EXIT_REASON_INVD] = handle_invd, 5543 [EXIT_REASON_INVLPG] = handle_invlpg, 5544 [EXIT_REASON_RDPMC] = handle_rdpmc, 5545 [EXIT_REASON_VMCALL] = handle_vmcall, 5546 [EXIT_REASON_VMCLEAR] = handle_vmx_instruction, 5547 [EXIT_REASON_VMLAUNCH] = handle_vmx_instruction, 5548 [EXIT_REASON_VMPTRLD] = handle_vmx_instruction, 5549 [EXIT_REASON_VMPTRST] = handle_vmx_instruction, 5550 [EXIT_REASON_VMREAD] = handle_vmx_instruction, 5551 [EXIT_REASON_VMRESUME] = handle_vmx_instruction, 5552 [EXIT_REASON_VMWRITE] = handle_vmx_instruction, 5553 [EXIT_REASON_VMOFF] = handle_vmx_instruction, 5554 [EXIT_REASON_VMON] = handle_vmx_instruction, 5555 [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold, 5556 [EXIT_REASON_APIC_ACCESS] = handle_apic_access, 5557 [EXIT_REASON_APIC_WRITE] = handle_apic_write, 5558 [EXIT_REASON_EOI_INDUCED] = handle_apic_eoi_induced, 5559 [EXIT_REASON_WBINVD] = handle_wbinvd, 5560 [EXIT_REASON_XSETBV] = handle_xsetbv, 5561 [EXIT_REASON_TASK_SWITCH] = handle_task_switch, 5562 [EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check, 5563 [EXIT_REASON_GDTR_IDTR] = handle_desc, 5564 [EXIT_REASON_LDTR_TR] = handle_desc, 5565 [EXIT_REASON_EPT_VIOLATION] = handle_ept_violation, 5566 [EXIT_REASON_EPT_MISCONFIG] = handle_ept_misconfig, 5567 [EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause, 5568 [EXIT_REASON_MWAIT_INSTRUCTION] = handle_mwait, 5569 [EXIT_REASON_MONITOR_TRAP_FLAG] = handle_monitor_trap, 5570 [EXIT_REASON_MONITOR_INSTRUCTION] = handle_monitor, 5571 [EXIT_REASON_INVEPT] = handle_vmx_instruction, 5572 [EXIT_REASON_INVVPID] = handle_vmx_instruction, 5573 [EXIT_REASON_RDRAND] = handle_invalid_op, 5574 [EXIT_REASON_RDSEED] = handle_invalid_op, 5575 [EXIT_REASON_PML_FULL] = handle_pml_full, 5576 [EXIT_REASON_INVPCID] = handle_invpcid, 5577 [EXIT_REASON_VMFUNC] = handle_vmx_instruction, 5578 [EXIT_REASON_PREEMPTION_TIMER] = handle_preemption_timer, 5579 [EXIT_REASON_ENCLS] = handle_encls, 5580 }; 5581 5582 static const int kvm_vmx_max_exit_handlers = 5583 ARRAY_SIZE(kvm_vmx_exit_handlers); 5584 5585 static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2) 5586 { 5587 *info1 = vmcs_readl(EXIT_QUALIFICATION); 5588 *info2 = vmcs_read32(VM_EXIT_INTR_INFO); 5589 } 5590 5591 static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx) 5592 { 5593 if (vmx->pml_pg) { 5594 __free_page(vmx->pml_pg); 5595 vmx->pml_pg = NULL; 5596 } 5597 } 5598 5599 static void vmx_flush_pml_buffer(struct kvm_vcpu *vcpu) 5600 { 5601 struct vcpu_vmx *vmx = to_vmx(vcpu); 5602 u64 *pml_buf; 5603 u16 pml_idx; 5604 5605 pml_idx = vmcs_read16(GUEST_PML_INDEX); 5606 5607 /* Do nothing if PML buffer is empty */ 5608 if (pml_idx == (PML_ENTITY_NUM - 1)) 5609 return; 5610 5611 /* PML index always points to next available PML buffer entity */ 5612 if (pml_idx >= PML_ENTITY_NUM) 5613 pml_idx = 0; 5614 else 5615 pml_idx++; 5616 5617 pml_buf = page_address(vmx->pml_pg); 5618 for (; pml_idx < PML_ENTITY_NUM; pml_idx++) { 5619 u64 gpa; 5620 5621 gpa = pml_buf[pml_idx]; 5622 WARN_ON(gpa & (PAGE_SIZE - 1)); 5623 kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT); 5624 } 5625 5626 /* reset PML index */ 5627 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); 5628 } 5629 5630 /* 5631 * Flush all vcpus' PML buffer and update logged GPAs to dirty_bitmap. 5632 * Called before reporting dirty_bitmap to userspace. 5633 */ 5634 static void kvm_flush_pml_buffers(struct kvm *kvm) 5635 { 5636 int i; 5637 struct kvm_vcpu *vcpu; 5638 /* 5639 * We only need to kick vcpu out of guest mode here, as PML buffer 5640 * is flushed at beginning of all VMEXITs, and it's obvious that only 5641 * vcpus running in guest are possible to have unflushed GPAs in PML 5642 * buffer. 5643 */ 5644 kvm_for_each_vcpu(i, vcpu, kvm) 5645 kvm_vcpu_kick(vcpu); 5646 } 5647 5648 static void vmx_dump_sel(char *name, uint32_t sel) 5649 { 5650 pr_err("%s sel=0x%04x, attr=0x%05x, limit=0x%08x, base=0x%016lx\n", 5651 name, vmcs_read16(sel), 5652 vmcs_read32(sel + GUEST_ES_AR_BYTES - GUEST_ES_SELECTOR), 5653 vmcs_read32(sel + GUEST_ES_LIMIT - GUEST_ES_SELECTOR), 5654 vmcs_readl(sel + GUEST_ES_BASE - GUEST_ES_SELECTOR)); 5655 } 5656 5657 static void vmx_dump_dtsel(char *name, uint32_t limit) 5658 { 5659 pr_err("%s limit=0x%08x, base=0x%016lx\n", 5660 name, vmcs_read32(limit), 5661 vmcs_readl(limit + GUEST_GDTR_BASE - GUEST_GDTR_LIMIT)); 5662 } 5663 5664 void dump_vmcs(void) 5665 { 5666 u32 vmentry_ctl, vmexit_ctl; 5667 u32 cpu_based_exec_ctrl, pin_based_exec_ctrl, secondary_exec_control; 5668 unsigned long cr4; 5669 u64 efer; 5670 int i, n; 5671 5672 if (!dump_invalid_vmcs) { 5673 pr_warn_ratelimited("set kvm_intel.dump_invalid_vmcs=1 to dump internal KVM state.\n"); 5674 return; 5675 } 5676 5677 vmentry_ctl = vmcs_read32(VM_ENTRY_CONTROLS); 5678 vmexit_ctl = vmcs_read32(VM_EXIT_CONTROLS); 5679 cpu_based_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); 5680 pin_based_exec_ctrl = vmcs_read32(PIN_BASED_VM_EXEC_CONTROL); 5681 cr4 = vmcs_readl(GUEST_CR4); 5682 efer = vmcs_read64(GUEST_IA32_EFER); 5683 secondary_exec_control = 0; 5684 if (cpu_has_secondary_exec_ctrls()) 5685 secondary_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); 5686 5687 pr_err("*** Guest State ***\n"); 5688 pr_err("CR0: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n", 5689 vmcs_readl(GUEST_CR0), vmcs_readl(CR0_READ_SHADOW), 5690 vmcs_readl(CR0_GUEST_HOST_MASK)); 5691 pr_err("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n", 5692 cr4, vmcs_readl(CR4_READ_SHADOW), vmcs_readl(CR4_GUEST_HOST_MASK)); 5693 pr_err("CR3 = 0x%016lx\n", vmcs_readl(GUEST_CR3)); 5694 if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT) && 5695 (cr4 & X86_CR4_PAE) && !(efer & EFER_LMA)) 5696 { 5697 pr_err("PDPTR0 = 0x%016llx PDPTR1 = 0x%016llx\n", 5698 vmcs_read64(GUEST_PDPTR0), vmcs_read64(GUEST_PDPTR1)); 5699 pr_err("PDPTR2 = 0x%016llx PDPTR3 = 0x%016llx\n", 5700 vmcs_read64(GUEST_PDPTR2), vmcs_read64(GUEST_PDPTR3)); 5701 } 5702 pr_err("RSP = 0x%016lx RIP = 0x%016lx\n", 5703 vmcs_readl(GUEST_RSP), vmcs_readl(GUEST_RIP)); 5704 pr_err("RFLAGS=0x%08lx DR7 = 0x%016lx\n", 5705 vmcs_readl(GUEST_RFLAGS), vmcs_readl(GUEST_DR7)); 5706 pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n", 5707 vmcs_readl(GUEST_SYSENTER_ESP), 5708 vmcs_read32(GUEST_SYSENTER_CS), vmcs_readl(GUEST_SYSENTER_EIP)); 5709 vmx_dump_sel("CS: ", GUEST_CS_SELECTOR); 5710 vmx_dump_sel("DS: ", GUEST_DS_SELECTOR); 5711 vmx_dump_sel("SS: ", GUEST_SS_SELECTOR); 5712 vmx_dump_sel("ES: ", GUEST_ES_SELECTOR); 5713 vmx_dump_sel("FS: ", GUEST_FS_SELECTOR); 5714 vmx_dump_sel("GS: ", GUEST_GS_SELECTOR); 5715 vmx_dump_dtsel("GDTR:", GUEST_GDTR_LIMIT); 5716 vmx_dump_sel("LDTR:", GUEST_LDTR_SELECTOR); 5717 vmx_dump_dtsel("IDTR:", GUEST_IDTR_LIMIT); 5718 vmx_dump_sel("TR: ", GUEST_TR_SELECTOR); 5719 if ((vmexit_ctl & (VM_EXIT_SAVE_IA32_PAT | VM_EXIT_SAVE_IA32_EFER)) || 5720 (vmentry_ctl & (VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_IA32_EFER))) 5721 pr_err("EFER = 0x%016llx PAT = 0x%016llx\n", 5722 efer, vmcs_read64(GUEST_IA32_PAT)); 5723 pr_err("DebugCtl = 0x%016llx DebugExceptions = 0x%016lx\n", 5724 vmcs_read64(GUEST_IA32_DEBUGCTL), 5725 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS)); 5726 if (cpu_has_load_perf_global_ctrl() && 5727 vmentry_ctl & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) 5728 pr_err("PerfGlobCtl = 0x%016llx\n", 5729 vmcs_read64(GUEST_IA32_PERF_GLOBAL_CTRL)); 5730 if (vmentry_ctl & VM_ENTRY_LOAD_BNDCFGS) 5731 pr_err("BndCfgS = 0x%016llx\n", vmcs_read64(GUEST_BNDCFGS)); 5732 pr_err("Interruptibility = %08x ActivityState = %08x\n", 5733 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO), 5734 vmcs_read32(GUEST_ACTIVITY_STATE)); 5735 if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) 5736 pr_err("InterruptStatus = %04x\n", 5737 vmcs_read16(GUEST_INTR_STATUS)); 5738 5739 pr_err("*** Host State ***\n"); 5740 pr_err("RIP = 0x%016lx RSP = 0x%016lx\n", 5741 vmcs_readl(HOST_RIP), vmcs_readl(HOST_RSP)); 5742 pr_err("CS=%04x SS=%04x DS=%04x ES=%04x FS=%04x GS=%04x TR=%04x\n", 5743 vmcs_read16(HOST_CS_SELECTOR), vmcs_read16(HOST_SS_SELECTOR), 5744 vmcs_read16(HOST_DS_SELECTOR), vmcs_read16(HOST_ES_SELECTOR), 5745 vmcs_read16(HOST_FS_SELECTOR), vmcs_read16(HOST_GS_SELECTOR), 5746 vmcs_read16(HOST_TR_SELECTOR)); 5747 pr_err("FSBase=%016lx GSBase=%016lx TRBase=%016lx\n", 5748 vmcs_readl(HOST_FS_BASE), vmcs_readl(HOST_GS_BASE), 5749 vmcs_readl(HOST_TR_BASE)); 5750 pr_err("GDTBase=%016lx IDTBase=%016lx\n", 5751 vmcs_readl(HOST_GDTR_BASE), vmcs_readl(HOST_IDTR_BASE)); 5752 pr_err("CR0=%016lx CR3=%016lx CR4=%016lx\n", 5753 vmcs_readl(HOST_CR0), vmcs_readl(HOST_CR3), 5754 vmcs_readl(HOST_CR4)); 5755 pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n", 5756 vmcs_readl(HOST_IA32_SYSENTER_ESP), 5757 vmcs_read32(HOST_IA32_SYSENTER_CS), 5758 vmcs_readl(HOST_IA32_SYSENTER_EIP)); 5759 if (vmexit_ctl & (VM_EXIT_LOAD_IA32_PAT | VM_EXIT_LOAD_IA32_EFER)) 5760 pr_err("EFER = 0x%016llx PAT = 0x%016llx\n", 5761 vmcs_read64(HOST_IA32_EFER), 5762 vmcs_read64(HOST_IA32_PAT)); 5763 if (cpu_has_load_perf_global_ctrl() && 5764 vmexit_ctl & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) 5765 pr_err("PerfGlobCtl = 0x%016llx\n", 5766 vmcs_read64(HOST_IA32_PERF_GLOBAL_CTRL)); 5767 5768 pr_err("*** Control State ***\n"); 5769 pr_err("PinBased=%08x CPUBased=%08x SecondaryExec=%08x\n", 5770 pin_based_exec_ctrl, cpu_based_exec_ctrl, secondary_exec_control); 5771 pr_err("EntryControls=%08x ExitControls=%08x\n", vmentry_ctl, vmexit_ctl); 5772 pr_err("ExceptionBitmap=%08x PFECmask=%08x PFECmatch=%08x\n", 5773 vmcs_read32(EXCEPTION_BITMAP), 5774 vmcs_read32(PAGE_FAULT_ERROR_CODE_MASK), 5775 vmcs_read32(PAGE_FAULT_ERROR_CODE_MATCH)); 5776 pr_err("VMEntry: intr_info=%08x errcode=%08x ilen=%08x\n", 5777 vmcs_read32(VM_ENTRY_INTR_INFO_FIELD), 5778 vmcs_read32(VM_ENTRY_EXCEPTION_ERROR_CODE), 5779 vmcs_read32(VM_ENTRY_INSTRUCTION_LEN)); 5780 pr_err("VMExit: intr_info=%08x errcode=%08x ilen=%08x\n", 5781 vmcs_read32(VM_EXIT_INTR_INFO), 5782 vmcs_read32(VM_EXIT_INTR_ERROR_CODE), 5783 vmcs_read32(VM_EXIT_INSTRUCTION_LEN)); 5784 pr_err(" reason=%08x qualification=%016lx\n", 5785 vmcs_read32(VM_EXIT_REASON), vmcs_readl(EXIT_QUALIFICATION)); 5786 pr_err("IDTVectoring: info=%08x errcode=%08x\n", 5787 vmcs_read32(IDT_VECTORING_INFO_FIELD), 5788 vmcs_read32(IDT_VECTORING_ERROR_CODE)); 5789 pr_err("TSC Offset = 0x%016llx\n", vmcs_read64(TSC_OFFSET)); 5790 if (secondary_exec_control & SECONDARY_EXEC_TSC_SCALING) 5791 pr_err("TSC Multiplier = 0x%016llx\n", 5792 vmcs_read64(TSC_MULTIPLIER)); 5793 if (cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW) { 5794 if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) { 5795 u16 status = vmcs_read16(GUEST_INTR_STATUS); 5796 pr_err("SVI|RVI = %02x|%02x ", status >> 8, status & 0xff); 5797 } 5798 pr_cont("TPR Threshold = 0x%02x\n", vmcs_read32(TPR_THRESHOLD)); 5799 if (secondary_exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) 5800 pr_err("APIC-access addr = 0x%016llx ", vmcs_read64(APIC_ACCESS_ADDR)); 5801 pr_cont("virt-APIC addr = 0x%016llx\n", vmcs_read64(VIRTUAL_APIC_PAGE_ADDR)); 5802 } 5803 if (pin_based_exec_ctrl & PIN_BASED_POSTED_INTR) 5804 pr_err("PostedIntrVec = 0x%02x\n", vmcs_read16(POSTED_INTR_NV)); 5805 if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT)) 5806 pr_err("EPT pointer = 0x%016llx\n", vmcs_read64(EPT_POINTER)); 5807 n = vmcs_read32(CR3_TARGET_COUNT); 5808 for (i = 0; i + 1 < n; i += 4) 5809 pr_err("CR3 target%u=%016lx target%u=%016lx\n", 5810 i, vmcs_readl(CR3_TARGET_VALUE0 + i * 2), 5811 i + 1, vmcs_readl(CR3_TARGET_VALUE0 + i * 2 + 2)); 5812 if (i < n) 5813 pr_err("CR3 target%u=%016lx\n", 5814 i, vmcs_readl(CR3_TARGET_VALUE0 + i * 2)); 5815 if (secondary_exec_control & SECONDARY_EXEC_PAUSE_LOOP_EXITING) 5816 pr_err("PLE Gap=%08x Window=%08x\n", 5817 vmcs_read32(PLE_GAP), vmcs_read32(PLE_WINDOW)); 5818 if (secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID) 5819 pr_err("Virtual processor ID = 0x%04x\n", 5820 vmcs_read16(VIRTUAL_PROCESSOR_ID)); 5821 } 5822 5823 /* 5824 * The guest has exited. See if we can fix it or if we need userspace 5825 * assistance. 5826 */ 5827 static int vmx_handle_exit(struct kvm_vcpu *vcpu) 5828 { 5829 struct vcpu_vmx *vmx = to_vmx(vcpu); 5830 u32 exit_reason = vmx->exit_reason; 5831 u32 vectoring_info = vmx->idt_vectoring_info; 5832 5833 trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX); 5834 5835 /* 5836 * Flush logged GPAs PML buffer, this will make dirty_bitmap more 5837 * updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before 5838 * querying dirty_bitmap, we only need to kick all vcpus out of guest 5839 * mode as if vcpus is in root mode, the PML buffer must has been 5840 * flushed already. 5841 */ 5842 if (enable_pml) 5843 vmx_flush_pml_buffer(vcpu); 5844 5845 /* If guest state is invalid, start emulating */ 5846 if (vmx->emulation_required) 5847 return handle_invalid_guest_state(vcpu); 5848 5849 if (is_guest_mode(vcpu) && nested_vmx_exit_reflected(vcpu, exit_reason)) 5850 return nested_vmx_reflect_vmexit(vcpu, exit_reason); 5851 5852 if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) { 5853 dump_vmcs(); 5854 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; 5855 vcpu->run->fail_entry.hardware_entry_failure_reason 5856 = exit_reason; 5857 return 0; 5858 } 5859 5860 if (unlikely(vmx->fail)) { 5861 dump_vmcs(); 5862 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; 5863 vcpu->run->fail_entry.hardware_entry_failure_reason 5864 = vmcs_read32(VM_INSTRUCTION_ERROR); 5865 return 0; 5866 } 5867 5868 /* 5869 * Note: 5870 * Do not try to fix EXIT_REASON_EPT_MISCONFIG if it caused by 5871 * delivery event since it indicates guest is accessing MMIO. 5872 * The vm-exit can be triggered again after return to guest that 5873 * will cause infinite loop. 5874 */ 5875 if ((vectoring_info & VECTORING_INFO_VALID_MASK) && 5876 (exit_reason != EXIT_REASON_EXCEPTION_NMI && 5877 exit_reason != EXIT_REASON_EPT_VIOLATION && 5878 exit_reason != EXIT_REASON_PML_FULL && 5879 exit_reason != EXIT_REASON_TASK_SWITCH)) { 5880 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 5881 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV; 5882 vcpu->run->internal.ndata = 3; 5883 vcpu->run->internal.data[0] = vectoring_info; 5884 vcpu->run->internal.data[1] = exit_reason; 5885 vcpu->run->internal.data[2] = vcpu->arch.exit_qualification; 5886 if (exit_reason == EXIT_REASON_EPT_MISCONFIG) { 5887 vcpu->run->internal.ndata++; 5888 vcpu->run->internal.data[3] = 5889 vmcs_read64(GUEST_PHYSICAL_ADDRESS); 5890 } 5891 return 0; 5892 } 5893 5894 if (unlikely(!enable_vnmi && 5895 vmx->loaded_vmcs->soft_vnmi_blocked)) { 5896 if (vmx_interrupt_allowed(vcpu)) { 5897 vmx->loaded_vmcs->soft_vnmi_blocked = 0; 5898 } else if (vmx->loaded_vmcs->vnmi_blocked_time > 1000000000LL && 5899 vcpu->arch.nmi_pending) { 5900 /* 5901 * This CPU don't support us in finding the end of an 5902 * NMI-blocked window if the guest runs with IRQs 5903 * disabled. So we pull the trigger after 1 s of 5904 * futile waiting, but inform the user about this. 5905 */ 5906 printk(KERN_WARNING "%s: Breaking out of NMI-blocked " 5907 "state on VCPU %d after 1 s timeout\n", 5908 __func__, vcpu->vcpu_id); 5909 vmx->loaded_vmcs->soft_vnmi_blocked = 0; 5910 } 5911 } 5912 5913 if (exit_reason < kvm_vmx_max_exit_handlers 5914 && kvm_vmx_exit_handlers[exit_reason]) { 5915 #ifdef CONFIG_RETPOLINE 5916 if (exit_reason == EXIT_REASON_MSR_WRITE) 5917 return kvm_emulate_wrmsr(vcpu); 5918 else if (exit_reason == EXIT_REASON_PREEMPTION_TIMER) 5919 return handle_preemption_timer(vcpu); 5920 else if (exit_reason == EXIT_REASON_PENDING_INTERRUPT) 5921 return handle_interrupt_window(vcpu); 5922 else if (exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT) 5923 return handle_external_interrupt(vcpu); 5924 else if (exit_reason == EXIT_REASON_HLT) 5925 return kvm_emulate_halt(vcpu); 5926 else if (exit_reason == EXIT_REASON_EPT_MISCONFIG) 5927 return handle_ept_misconfig(vcpu); 5928 #endif 5929 return kvm_vmx_exit_handlers[exit_reason](vcpu); 5930 } else { 5931 vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n", 5932 exit_reason); 5933 dump_vmcs(); 5934 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 5935 vcpu->run->internal.suberror = 5936 KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON; 5937 vcpu->run->internal.ndata = 1; 5938 vcpu->run->internal.data[0] = exit_reason; 5939 return 0; 5940 } 5941 } 5942 5943 /* 5944 * Software based L1D cache flush which is used when microcode providing 5945 * the cache control MSR is not loaded. 5946 * 5947 * The L1D cache is 32 KiB on Nehalem and later microarchitectures, but to 5948 * flush it is required to read in 64 KiB because the replacement algorithm 5949 * is not exactly LRU. This could be sized at runtime via topology 5950 * information but as all relevant affected CPUs have 32KiB L1D cache size 5951 * there is no point in doing so. 5952 */ 5953 static void vmx_l1d_flush(struct kvm_vcpu *vcpu) 5954 { 5955 int size = PAGE_SIZE << L1D_CACHE_ORDER; 5956 5957 /* 5958 * This code is only executed when the the flush mode is 'cond' or 5959 * 'always' 5960 */ 5961 if (static_branch_likely(&vmx_l1d_flush_cond)) { 5962 bool flush_l1d; 5963 5964 /* 5965 * Clear the per-vcpu flush bit, it gets set again 5966 * either from vcpu_run() or from one of the unsafe 5967 * VMEXIT handlers. 5968 */ 5969 flush_l1d = vcpu->arch.l1tf_flush_l1d; 5970 vcpu->arch.l1tf_flush_l1d = false; 5971 5972 /* 5973 * Clear the per-cpu flush bit, it gets set again from 5974 * the interrupt handlers. 5975 */ 5976 flush_l1d |= kvm_get_cpu_l1tf_flush_l1d(); 5977 kvm_clear_cpu_l1tf_flush_l1d(); 5978 5979 if (!flush_l1d) 5980 return; 5981 } 5982 5983 vcpu->stat.l1d_flush++; 5984 5985 if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) { 5986 wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH); 5987 return; 5988 } 5989 5990 asm volatile( 5991 /* First ensure the pages are in the TLB */ 5992 "xorl %%eax, %%eax\n" 5993 ".Lpopulate_tlb:\n\t" 5994 "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t" 5995 "addl $4096, %%eax\n\t" 5996 "cmpl %%eax, %[size]\n\t" 5997 "jne .Lpopulate_tlb\n\t" 5998 "xorl %%eax, %%eax\n\t" 5999 "cpuid\n\t" 6000 /* Now fill the cache */ 6001 "xorl %%eax, %%eax\n" 6002 ".Lfill_cache:\n" 6003 "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t" 6004 "addl $64, %%eax\n\t" 6005 "cmpl %%eax, %[size]\n\t" 6006 "jne .Lfill_cache\n\t" 6007 "lfence\n" 6008 :: [flush_pages] "r" (vmx_l1d_flush_pages), 6009 [size] "r" (size) 6010 : "eax", "ebx", "ecx", "edx"); 6011 } 6012 6013 static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) 6014 { 6015 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 6016 int tpr_threshold; 6017 6018 if (is_guest_mode(vcpu) && 6019 nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) 6020 return; 6021 6022 tpr_threshold = (irr == -1 || tpr < irr) ? 0 : irr; 6023 if (is_guest_mode(vcpu)) 6024 to_vmx(vcpu)->nested.l1_tpr_threshold = tpr_threshold; 6025 else 6026 vmcs_write32(TPR_THRESHOLD, tpr_threshold); 6027 } 6028 6029 void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu) 6030 { 6031 struct vcpu_vmx *vmx = to_vmx(vcpu); 6032 u32 sec_exec_control; 6033 6034 if (!lapic_in_kernel(vcpu)) 6035 return; 6036 6037 if (!flexpriority_enabled && 6038 !cpu_has_vmx_virtualize_x2apic_mode()) 6039 return; 6040 6041 /* Postpone execution until vmcs01 is the current VMCS. */ 6042 if (is_guest_mode(vcpu)) { 6043 vmx->nested.change_vmcs01_virtual_apic_mode = true; 6044 return; 6045 } 6046 6047 sec_exec_control = secondary_exec_controls_get(vmx); 6048 sec_exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 6049 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE); 6050 6051 switch (kvm_get_apic_mode(vcpu)) { 6052 case LAPIC_MODE_INVALID: 6053 WARN_ONCE(true, "Invalid local APIC state"); 6054 case LAPIC_MODE_DISABLED: 6055 break; 6056 case LAPIC_MODE_XAPIC: 6057 if (flexpriority_enabled) { 6058 sec_exec_control |= 6059 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; 6060 vmx_flush_tlb(vcpu, true); 6061 } 6062 break; 6063 case LAPIC_MODE_X2APIC: 6064 if (cpu_has_vmx_virtualize_x2apic_mode()) 6065 sec_exec_control |= 6066 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; 6067 break; 6068 } 6069 secondary_exec_controls_set(vmx, sec_exec_control); 6070 6071 vmx_update_msr_bitmap(vcpu); 6072 } 6073 6074 static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa) 6075 { 6076 if (!is_guest_mode(vcpu)) { 6077 vmcs_write64(APIC_ACCESS_ADDR, hpa); 6078 vmx_flush_tlb(vcpu, true); 6079 } 6080 } 6081 6082 static void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr) 6083 { 6084 u16 status; 6085 u8 old; 6086 6087 if (max_isr == -1) 6088 max_isr = 0; 6089 6090 status = vmcs_read16(GUEST_INTR_STATUS); 6091 old = status >> 8; 6092 if (max_isr != old) { 6093 status &= 0xff; 6094 status |= max_isr << 8; 6095 vmcs_write16(GUEST_INTR_STATUS, status); 6096 } 6097 } 6098 6099 static void vmx_set_rvi(int vector) 6100 { 6101 u16 status; 6102 u8 old; 6103 6104 if (vector == -1) 6105 vector = 0; 6106 6107 status = vmcs_read16(GUEST_INTR_STATUS); 6108 old = (u8)status & 0xff; 6109 if ((u8)vector != old) { 6110 status &= ~0xff; 6111 status |= (u8)vector; 6112 vmcs_write16(GUEST_INTR_STATUS, status); 6113 } 6114 } 6115 6116 static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr) 6117 { 6118 /* 6119 * When running L2, updating RVI is only relevant when 6120 * vmcs12 virtual-interrupt-delivery enabled. 6121 * However, it can be enabled only when L1 also 6122 * intercepts external-interrupts and in that case 6123 * we should not update vmcs02 RVI but instead intercept 6124 * interrupt. Therefore, do nothing when running L2. 6125 */ 6126 if (!is_guest_mode(vcpu)) 6127 vmx_set_rvi(max_irr); 6128 } 6129 6130 static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu) 6131 { 6132 struct vcpu_vmx *vmx = to_vmx(vcpu); 6133 int max_irr; 6134 bool max_irr_updated; 6135 6136 WARN_ON(!vcpu->arch.apicv_active); 6137 if (pi_test_on(&vmx->pi_desc)) { 6138 pi_clear_on(&vmx->pi_desc); 6139 /* 6140 * IOMMU can write to PID.ON, so the barrier matters even on UP. 6141 * But on x86 this is just a compiler barrier anyway. 6142 */ 6143 smp_mb__after_atomic(); 6144 max_irr_updated = 6145 kvm_apic_update_irr(vcpu, vmx->pi_desc.pir, &max_irr); 6146 6147 /* 6148 * If we are running L2 and L1 has a new pending interrupt 6149 * which can be injected, we should re-evaluate 6150 * what should be done with this new L1 interrupt. 6151 * If L1 intercepts external-interrupts, we should 6152 * exit from L2 to L1. Otherwise, interrupt should be 6153 * delivered directly to L2. 6154 */ 6155 if (is_guest_mode(vcpu) && max_irr_updated) { 6156 if (nested_exit_on_intr(vcpu)) 6157 kvm_vcpu_exiting_guest_mode(vcpu); 6158 else 6159 kvm_make_request(KVM_REQ_EVENT, vcpu); 6160 } 6161 } else { 6162 max_irr = kvm_lapic_find_highest_irr(vcpu); 6163 } 6164 vmx_hwapic_irr_update(vcpu, max_irr); 6165 return max_irr; 6166 } 6167 6168 static bool vmx_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu) 6169 { 6170 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); 6171 6172 return pi_test_on(pi_desc) || 6173 (pi_test_sn(pi_desc) && !pi_is_pir_empty(pi_desc)); 6174 } 6175 6176 static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap) 6177 { 6178 if (!kvm_vcpu_apicv_active(vcpu)) 6179 return; 6180 6181 vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]); 6182 vmcs_write64(EOI_EXIT_BITMAP1, eoi_exit_bitmap[1]); 6183 vmcs_write64(EOI_EXIT_BITMAP2, eoi_exit_bitmap[2]); 6184 vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]); 6185 } 6186 6187 static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu) 6188 { 6189 struct vcpu_vmx *vmx = to_vmx(vcpu); 6190 6191 pi_clear_on(&vmx->pi_desc); 6192 memset(vmx->pi_desc.pir, 0, sizeof(vmx->pi_desc.pir)); 6193 } 6194 6195 static void handle_exception_nmi_irqoff(struct vcpu_vmx *vmx) 6196 { 6197 vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); 6198 6199 /* if exit due to PF check for async PF */ 6200 if (is_page_fault(vmx->exit_intr_info)) 6201 vmx->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason(); 6202 6203 /* Handle machine checks before interrupts are enabled */ 6204 if (is_machine_check(vmx->exit_intr_info)) 6205 kvm_machine_check(); 6206 6207 /* We need to handle NMIs before interrupts are enabled */ 6208 if (is_nmi(vmx->exit_intr_info)) { 6209 kvm_before_interrupt(&vmx->vcpu); 6210 asm("int $2"); 6211 kvm_after_interrupt(&vmx->vcpu); 6212 } 6213 } 6214 6215 static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu) 6216 { 6217 unsigned int vector; 6218 unsigned long entry; 6219 #ifdef CONFIG_X86_64 6220 unsigned long tmp; 6221 #endif 6222 gate_desc *desc; 6223 u32 intr_info; 6224 6225 intr_info = vmcs_read32(VM_EXIT_INTR_INFO); 6226 if (WARN_ONCE(!is_external_intr(intr_info), 6227 "KVM: unexpected VM-Exit interrupt info: 0x%x", intr_info)) 6228 return; 6229 6230 vector = intr_info & INTR_INFO_VECTOR_MASK; 6231 desc = (gate_desc *)host_idt_base + vector; 6232 entry = gate_offset(desc); 6233 6234 kvm_before_interrupt(vcpu); 6235 6236 asm volatile( 6237 #ifdef CONFIG_X86_64 6238 "mov %%" _ASM_SP ", %[sp]\n\t" 6239 "and $0xfffffffffffffff0, %%" _ASM_SP "\n\t" 6240 "push $%c[ss]\n\t" 6241 "push %[sp]\n\t" 6242 #endif 6243 "pushf\n\t" 6244 __ASM_SIZE(push) " $%c[cs]\n\t" 6245 CALL_NOSPEC 6246 : 6247 #ifdef CONFIG_X86_64 6248 [sp]"=&r"(tmp), 6249 #endif 6250 ASM_CALL_CONSTRAINT 6251 : 6252 THUNK_TARGET(entry), 6253 [ss]"i"(__KERNEL_DS), 6254 [cs]"i"(__KERNEL_CS) 6255 ); 6256 6257 kvm_after_interrupt(vcpu); 6258 } 6259 STACK_FRAME_NON_STANDARD(handle_external_interrupt_irqoff); 6260 6261 static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu) 6262 { 6263 struct vcpu_vmx *vmx = to_vmx(vcpu); 6264 6265 if (vmx->exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT) 6266 handle_external_interrupt_irqoff(vcpu); 6267 else if (vmx->exit_reason == EXIT_REASON_EXCEPTION_NMI) 6268 handle_exception_nmi_irqoff(vmx); 6269 } 6270 6271 static bool vmx_has_emulated_msr(int index) 6272 { 6273 switch (index) { 6274 case MSR_IA32_SMBASE: 6275 /* 6276 * We cannot do SMM unless we can run the guest in big 6277 * real mode. 6278 */ 6279 return enable_unrestricted_guest || emulate_invalid_guest_state; 6280 case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: 6281 return nested; 6282 case MSR_AMD64_VIRT_SPEC_CTRL: 6283 /* This is AMD only. */ 6284 return false; 6285 default: 6286 return true; 6287 } 6288 } 6289 6290 static bool vmx_pt_supported(void) 6291 { 6292 return pt_mode == PT_MODE_HOST_GUEST; 6293 } 6294 6295 static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx) 6296 { 6297 u32 exit_intr_info; 6298 bool unblock_nmi; 6299 u8 vector; 6300 bool idtv_info_valid; 6301 6302 idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK; 6303 6304 if (enable_vnmi) { 6305 if (vmx->loaded_vmcs->nmi_known_unmasked) 6306 return; 6307 /* 6308 * Can't use vmx->exit_intr_info since we're not sure what 6309 * the exit reason is. 6310 */ 6311 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); 6312 unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0; 6313 vector = exit_intr_info & INTR_INFO_VECTOR_MASK; 6314 /* 6315 * SDM 3: 27.7.1.2 (September 2008) 6316 * Re-set bit "block by NMI" before VM entry if vmexit caused by 6317 * a guest IRET fault. 6318 * SDM 3: 23.2.2 (September 2008) 6319 * Bit 12 is undefined in any of the following cases: 6320 * If the VM exit sets the valid bit in the IDT-vectoring 6321 * information field. 6322 * If the VM exit is due to a double fault. 6323 */ 6324 if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi && 6325 vector != DF_VECTOR && !idtv_info_valid) 6326 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, 6327 GUEST_INTR_STATE_NMI); 6328 else 6329 vmx->loaded_vmcs->nmi_known_unmasked = 6330 !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) 6331 & GUEST_INTR_STATE_NMI); 6332 } else if (unlikely(vmx->loaded_vmcs->soft_vnmi_blocked)) 6333 vmx->loaded_vmcs->vnmi_blocked_time += 6334 ktime_to_ns(ktime_sub(ktime_get(), 6335 vmx->loaded_vmcs->entry_time)); 6336 } 6337 6338 static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu, 6339 u32 idt_vectoring_info, 6340 int instr_len_field, 6341 int error_code_field) 6342 { 6343 u8 vector; 6344 int type; 6345 bool idtv_info_valid; 6346 6347 idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK; 6348 6349 vcpu->arch.nmi_injected = false; 6350 kvm_clear_exception_queue(vcpu); 6351 kvm_clear_interrupt_queue(vcpu); 6352 6353 if (!idtv_info_valid) 6354 return; 6355 6356 kvm_make_request(KVM_REQ_EVENT, vcpu); 6357 6358 vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK; 6359 type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK; 6360 6361 switch (type) { 6362 case INTR_TYPE_NMI_INTR: 6363 vcpu->arch.nmi_injected = true; 6364 /* 6365 * SDM 3: 27.7.1.2 (September 2008) 6366 * Clear bit "block by NMI" before VM entry if a NMI 6367 * delivery faulted. 6368 */ 6369 vmx_set_nmi_mask(vcpu, false); 6370 break; 6371 case INTR_TYPE_SOFT_EXCEPTION: 6372 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); 6373 /* fall through */ 6374 case INTR_TYPE_HARD_EXCEPTION: 6375 if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) { 6376 u32 err = vmcs_read32(error_code_field); 6377 kvm_requeue_exception_e(vcpu, vector, err); 6378 } else 6379 kvm_requeue_exception(vcpu, vector); 6380 break; 6381 case INTR_TYPE_SOFT_INTR: 6382 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); 6383 /* fall through */ 6384 case INTR_TYPE_EXT_INTR: 6385 kvm_queue_interrupt(vcpu, vector, type == INTR_TYPE_SOFT_INTR); 6386 break; 6387 default: 6388 break; 6389 } 6390 } 6391 6392 static void vmx_complete_interrupts(struct vcpu_vmx *vmx) 6393 { 6394 __vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info, 6395 VM_EXIT_INSTRUCTION_LEN, 6396 IDT_VECTORING_ERROR_CODE); 6397 } 6398 6399 static void vmx_cancel_injection(struct kvm_vcpu *vcpu) 6400 { 6401 __vmx_complete_interrupts(vcpu, 6402 vmcs_read32(VM_ENTRY_INTR_INFO_FIELD), 6403 VM_ENTRY_INSTRUCTION_LEN, 6404 VM_ENTRY_EXCEPTION_ERROR_CODE); 6405 6406 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); 6407 } 6408 6409 static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx) 6410 { 6411 int i, nr_msrs; 6412 struct perf_guest_switch_msr *msrs; 6413 6414 msrs = perf_guest_get_msrs(&nr_msrs); 6415 6416 if (!msrs) 6417 return; 6418 6419 for (i = 0; i < nr_msrs; i++) 6420 if (msrs[i].host == msrs[i].guest) 6421 clear_atomic_switch_msr(vmx, msrs[i].msr); 6422 else 6423 add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest, 6424 msrs[i].host, false); 6425 } 6426 6427 static void atomic_switch_umwait_control_msr(struct vcpu_vmx *vmx) 6428 { 6429 u32 host_umwait_control; 6430 6431 if (!vmx_has_waitpkg(vmx)) 6432 return; 6433 6434 host_umwait_control = get_umwait_control_msr(); 6435 6436 if (vmx->msr_ia32_umwait_control != host_umwait_control) 6437 add_atomic_switch_msr(vmx, MSR_IA32_UMWAIT_CONTROL, 6438 vmx->msr_ia32_umwait_control, 6439 host_umwait_control, false); 6440 else 6441 clear_atomic_switch_msr(vmx, MSR_IA32_UMWAIT_CONTROL); 6442 } 6443 6444 static void vmx_update_hv_timer(struct kvm_vcpu *vcpu) 6445 { 6446 struct vcpu_vmx *vmx = to_vmx(vcpu); 6447 u64 tscl; 6448 u32 delta_tsc; 6449 6450 if (vmx->req_immediate_exit) { 6451 vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, 0); 6452 vmx->loaded_vmcs->hv_timer_soft_disabled = false; 6453 } else if (vmx->hv_deadline_tsc != -1) { 6454 tscl = rdtsc(); 6455 if (vmx->hv_deadline_tsc > tscl) 6456 /* set_hv_timer ensures the delta fits in 32-bits */ 6457 delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >> 6458 cpu_preemption_timer_multi); 6459 else 6460 delta_tsc = 0; 6461 6462 vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, delta_tsc); 6463 vmx->loaded_vmcs->hv_timer_soft_disabled = false; 6464 } else if (!vmx->loaded_vmcs->hv_timer_soft_disabled) { 6465 vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, -1); 6466 vmx->loaded_vmcs->hv_timer_soft_disabled = true; 6467 } 6468 } 6469 6470 void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp) 6471 { 6472 if (unlikely(host_rsp != vmx->loaded_vmcs->host_state.rsp)) { 6473 vmx->loaded_vmcs->host_state.rsp = host_rsp; 6474 vmcs_writel(HOST_RSP, host_rsp); 6475 } 6476 } 6477 6478 bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched); 6479 6480 static void vmx_vcpu_run(struct kvm_vcpu *vcpu) 6481 { 6482 struct vcpu_vmx *vmx = to_vmx(vcpu); 6483 unsigned long cr3, cr4; 6484 6485 /* Record the guest's net vcpu time for enforced NMI injections. */ 6486 if (unlikely(!enable_vnmi && 6487 vmx->loaded_vmcs->soft_vnmi_blocked)) 6488 vmx->loaded_vmcs->entry_time = ktime_get(); 6489 6490 /* Don't enter VMX if guest state is invalid, let the exit handler 6491 start emulation until we arrive back to a valid state */ 6492 if (vmx->emulation_required) 6493 return; 6494 6495 if (vmx->ple_window_dirty) { 6496 vmx->ple_window_dirty = false; 6497 vmcs_write32(PLE_WINDOW, vmx->ple_window); 6498 } 6499 6500 if (vmx->nested.need_vmcs12_to_shadow_sync) 6501 nested_sync_vmcs12_to_shadow(vcpu); 6502 6503 if (kvm_register_is_dirty(vcpu, VCPU_REGS_RSP)) 6504 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); 6505 if (kvm_register_is_dirty(vcpu, VCPU_REGS_RIP)) 6506 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); 6507 6508 cr3 = __get_current_cr3_fast(); 6509 if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) { 6510 vmcs_writel(HOST_CR3, cr3); 6511 vmx->loaded_vmcs->host_state.cr3 = cr3; 6512 } 6513 6514 cr4 = cr4_read_shadow(); 6515 if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) { 6516 vmcs_writel(HOST_CR4, cr4); 6517 vmx->loaded_vmcs->host_state.cr4 = cr4; 6518 } 6519 6520 /* When single-stepping over STI and MOV SS, we must clear the 6521 * corresponding interruptibility bits in the guest state. Otherwise 6522 * vmentry fails as it then expects bit 14 (BS) in pending debug 6523 * exceptions being set, but that's not correct for the guest debugging 6524 * case. */ 6525 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) 6526 vmx_set_interrupt_shadow(vcpu, 0); 6527 6528 kvm_load_guest_xsave_state(vcpu); 6529 6530 if (static_cpu_has(X86_FEATURE_PKU) && 6531 kvm_read_cr4_bits(vcpu, X86_CR4_PKE) && 6532 vcpu->arch.pkru != vmx->host_pkru) 6533 __write_pkru(vcpu->arch.pkru); 6534 6535 pt_guest_enter(vmx); 6536 6537 atomic_switch_perf_msrs(vmx); 6538 atomic_switch_umwait_control_msr(vmx); 6539 6540 if (enable_preemption_timer) 6541 vmx_update_hv_timer(vcpu); 6542 6543 if (lapic_in_kernel(vcpu) && 6544 vcpu->arch.apic->lapic_timer.timer_advance_ns) 6545 kvm_wait_lapic_expire(vcpu); 6546 6547 /* 6548 * If this vCPU has touched SPEC_CTRL, restore the guest's value if 6549 * it's non-zero. Since vmentry is serialising on affected CPUs, there 6550 * is no need to worry about the conditional branch over the wrmsr 6551 * being speculatively taken. 6552 */ 6553 x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0); 6554 6555 /* L1D Flush includes CPU buffer clear to mitigate MDS */ 6556 if (static_branch_unlikely(&vmx_l1d_should_flush)) 6557 vmx_l1d_flush(vcpu); 6558 else if (static_branch_unlikely(&mds_user_clear)) 6559 mds_clear_cpu_buffers(); 6560 6561 if (vcpu->arch.cr2 != read_cr2()) 6562 write_cr2(vcpu->arch.cr2); 6563 6564 vmx->fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs, 6565 vmx->loaded_vmcs->launched); 6566 6567 vcpu->arch.cr2 = read_cr2(); 6568 6569 /* 6570 * We do not use IBRS in the kernel. If this vCPU has used the 6571 * SPEC_CTRL MSR it may have left it on; save the value and 6572 * turn it off. This is much more efficient than blindly adding 6573 * it to the atomic save/restore list. Especially as the former 6574 * (Saving guest MSRs on vmexit) doesn't even exist in KVM. 6575 * 6576 * For non-nested case: 6577 * If the L01 MSR bitmap does not intercept the MSR, then we need to 6578 * save it. 6579 * 6580 * For nested case: 6581 * If the L02 MSR bitmap does not intercept the MSR, then we need to 6582 * save it. 6583 */ 6584 if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) 6585 vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); 6586 6587 x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0); 6588 6589 /* All fields are clean at this point */ 6590 if (static_branch_unlikely(&enable_evmcs)) 6591 current_evmcs->hv_clean_fields |= 6592 HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; 6593 6594 if (static_branch_unlikely(&enable_evmcs)) 6595 current_evmcs->hv_vp_id = vcpu->arch.hyperv.vp_index; 6596 6597 /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */ 6598 if (vmx->host_debugctlmsr) 6599 update_debugctlmsr(vmx->host_debugctlmsr); 6600 6601 #ifndef CONFIG_X86_64 6602 /* 6603 * The sysexit path does not restore ds/es, so we must set them to 6604 * a reasonable value ourselves. 6605 * 6606 * We can't defer this to vmx_prepare_switch_to_host() since that 6607 * function may be executed in interrupt context, which saves and 6608 * restore segments around it, nullifying its effect. 6609 */ 6610 loadsegment(ds, __USER_DS); 6611 loadsegment(es, __USER_DS); 6612 #endif 6613 6614 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP) 6615 | (1 << VCPU_EXREG_RFLAGS) 6616 | (1 << VCPU_EXREG_PDPTR) 6617 | (1 << VCPU_EXREG_SEGMENTS) 6618 | (1 << VCPU_EXREG_CR3)); 6619 vcpu->arch.regs_dirty = 0; 6620 6621 pt_guest_exit(vmx); 6622 6623 /* 6624 * eager fpu is enabled if PKEY is supported and CR4 is switched 6625 * back on host, so it is safe to read guest PKRU from current 6626 * XSAVE. 6627 */ 6628 if (static_cpu_has(X86_FEATURE_PKU) && 6629 kvm_read_cr4_bits(vcpu, X86_CR4_PKE)) { 6630 vcpu->arch.pkru = rdpkru(); 6631 if (vcpu->arch.pkru != vmx->host_pkru) 6632 __write_pkru(vmx->host_pkru); 6633 } 6634 6635 kvm_load_host_xsave_state(vcpu); 6636 6637 vmx->nested.nested_run_pending = 0; 6638 vmx->idt_vectoring_info = 0; 6639 6640 vmx->exit_reason = vmx->fail ? 0xdead : vmcs_read32(VM_EXIT_REASON); 6641 if ((u16)vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY) 6642 kvm_machine_check(); 6643 6644 if (vmx->fail || (vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) 6645 return; 6646 6647 vmx->loaded_vmcs->launched = 1; 6648 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); 6649 6650 vmx_recover_nmi_blocking(vmx); 6651 vmx_complete_interrupts(vmx); 6652 } 6653 6654 static struct kvm *vmx_vm_alloc(void) 6655 { 6656 struct kvm_vmx *kvm_vmx = __vmalloc(sizeof(struct kvm_vmx), 6657 GFP_KERNEL_ACCOUNT | __GFP_ZERO, 6658 PAGE_KERNEL); 6659 return &kvm_vmx->kvm; 6660 } 6661 6662 static void vmx_vm_free(struct kvm *kvm) 6663 { 6664 kfree(kvm->arch.hyperv.hv_pa_pg); 6665 vfree(to_kvm_vmx(kvm)); 6666 } 6667 6668 static void vmx_free_vcpu(struct kvm_vcpu *vcpu) 6669 { 6670 struct vcpu_vmx *vmx = to_vmx(vcpu); 6671 6672 if (enable_pml) 6673 vmx_destroy_pml_buffer(vmx); 6674 free_vpid(vmx->vpid); 6675 nested_vmx_free_vcpu(vcpu); 6676 free_loaded_vmcs(vmx->loaded_vmcs); 6677 kfree(vmx->guest_msrs); 6678 kvm_vcpu_uninit(vcpu); 6679 kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.user_fpu); 6680 kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.guest_fpu); 6681 kmem_cache_free(kvm_vcpu_cache, vmx); 6682 } 6683 6684 static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) 6685 { 6686 int err; 6687 struct vcpu_vmx *vmx; 6688 unsigned long *msr_bitmap; 6689 int i, cpu; 6690 6691 BUILD_BUG_ON_MSG(offsetof(struct vcpu_vmx, vcpu) != 0, 6692 "struct kvm_vcpu must be at offset 0 for arch usercopy region"); 6693 6694 vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT); 6695 if (!vmx) 6696 return ERR_PTR(-ENOMEM); 6697 6698 vmx->vcpu.arch.user_fpu = kmem_cache_zalloc(x86_fpu_cache, 6699 GFP_KERNEL_ACCOUNT); 6700 if (!vmx->vcpu.arch.user_fpu) { 6701 printk(KERN_ERR "kvm: failed to allocate kvm userspace's fpu\n"); 6702 err = -ENOMEM; 6703 goto free_partial_vcpu; 6704 } 6705 6706 vmx->vcpu.arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache, 6707 GFP_KERNEL_ACCOUNT); 6708 if (!vmx->vcpu.arch.guest_fpu) { 6709 printk(KERN_ERR "kvm: failed to allocate vcpu's fpu\n"); 6710 err = -ENOMEM; 6711 goto free_user_fpu; 6712 } 6713 6714 vmx->vpid = allocate_vpid(); 6715 6716 err = kvm_vcpu_init(&vmx->vcpu, kvm, id); 6717 if (err) 6718 goto free_vcpu; 6719 6720 err = -ENOMEM; 6721 6722 /* 6723 * If PML is turned on, failure on enabling PML just results in failure 6724 * of creating the vcpu, therefore we can simplify PML logic (by 6725 * avoiding dealing with cases, such as enabling PML partially on vcpus 6726 * for the guest, etc. 6727 */ 6728 if (enable_pml) { 6729 vmx->pml_pg = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); 6730 if (!vmx->pml_pg) 6731 goto uninit_vcpu; 6732 } 6733 6734 vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT); 6735 BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) * sizeof(vmx->guest_msrs[0]) 6736 > PAGE_SIZE); 6737 6738 if (!vmx->guest_msrs) 6739 goto free_pml; 6740 6741 for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) { 6742 u32 index = vmx_msr_index[i]; 6743 u32 data_low, data_high; 6744 int j = vmx->nmsrs; 6745 6746 if (rdmsr_safe(index, &data_low, &data_high) < 0) 6747 continue; 6748 if (wrmsr_safe(index, data_low, data_high) < 0) 6749 continue; 6750 6751 vmx->guest_msrs[j].index = i; 6752 vmx->guest_msrs[j].data = 0; 6753 switch (index) { 6754 case MSR_IA32_TSX_CTRL: 6755 /* 6756 * No need to pass TSX_CTRL_CPUID_CLEAR through, so 6757 * let's avoid changing CPUID bits under the host 6758 * kernel's feet. 6759 */ 6760 vmx->guest_msrs[j].mask = ~(u64)TSX_CTRL_CPUID_CLEAR; 6761 break; 6762 default: 6763 vmx->guest_msrs[j].mask = -1ull; 6764 break; 6765 } 6766 ++vmx->nmsrs; 6767 } 6768 6769 err = alloc_loaded_vmcs(&vmx->vmcs01); 6770 if (err < 0) 6771 goto free_msrs; 6772 6773 msr_bitmap = vmx->vmcs01.msr_bitmap; 6774 vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_TSC, MSR_TYPE_R); 6775 vmx_disable_intercept_for_msr(msr_bitmap, MSR_FS_BASE, MSR_TYPE_RW); 6776 vmx_disable_intercept_for_msr(msr_bitmap, MSR_GS_BASE, MSR_TYPE_RW); 6777 vmx_disable_intercept_for_msr(msr_bitmap, MSR_KERNEL_GS_BASE, MSR_TYPE_RW); 6778 vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW); 6779 vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW); 6780 vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW); 6781 if (kvm_cstate_in_guest(kvm)) { 6782 vmx_disable_intercept_for_msr(msr_bitmap, MSR_CORE_C1_RES, MSR_TYPE_R); 6783 vmx_disable_intercept_for_msr(msr_bitmap, MSR_CORE_C3_RESIDENCY, MSR_TYPE_R); 6784 vmx_disable_intercept_for_msr(msr_bitmap, MSR_CORE_C6_RESIDENCY, MSR_TYPE_R); 6785 vmx_disable_intercept_for_msr(msr_bitmap, MSR_CORE_C7_RESIDENCY, MSR_TYPE_R); 6786 } 6787 vmx->msr_bitmap_mode = 0; 6788 6789 vmx->loaded_vmcs = &vmx->vmcs01; 6790 cpu = get_cpu(); 6791 vmx_vcpu_load(&vmx->vcpu, cpu); 6792 vmx->vcpu.cpu = cpu; 6793 init_vmcs(vmx); 6794 vmx_vcpu_put(&vmx->vcpu); 6795 put_cpu(); 6796 if (cpu_need_virtualize_apic_accesses(&vmx->vcpu)) { 6797 err = alloc_apic_access_page(kvm); 6798 if (err) 6799 goto free_vmcs; 6800 } 6801 6802 if (enable_ept && !enable_unrestricted_guest) { 6803 err = init_rmode_identity_map(kvm); 6804 if (err) 6805 goto free_vmcs; 6806 } 6807 6808 if (nested) 6809 nested_vmx_setup_ctls_msrs(&vmx->nested.msrs, 6810 vmx_capability.ept, 6811 kvm_vcpu_apicv_active(&vmx->vcpu)); 6812 else 6813 memset(&vmx->nested.msrs, 0, sizeof(vmx->nested.msrs)); 6814 6815 vmx->nested.posted_intr_nv = -1; 6816 vmx->nested.current_vmptr = -1ull; 6817 6818 vmx->msr_ia32_feature_control_valid_bits = FEATURE_CONTROL_LOCKED; 6819 6820 /* 6821 * Enforce invariant: pi_desc.nv is always either POSTED_INTR_VECTOR 6822 * or POSTED_INTR_WAKEUP_VECTOR. 6823 */ 6824 vmx->pi_desc.nv = POSTED_INTR_VECTOR; 6825 vmx->pi_desc.sn = 1; 6826 6827 vmx->ept_pointer = INVALID_PAGE; 6828 6829 return &vmx->vcpu; 6830 6831 free_vmcs: 6832 free_loaded_vmcs(vmx->loaded_vmcs); 6833 free_msrs: 6834 kfree(vmx->guest_msrs); 6835 free_pml: 6836 vmx_destroy_pml_buffer(vmx); 6837 uninit_vcpu: 6838 kvm_vcpu_uninit(&vmx->vcpu); 6839 free_vcpu: 6840 free_vpid(vmx->vpid); 6841 kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.guest_fpu); 6842 free_user_fpu: 6843 kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.user_fpu); 6844 free_partial_vcpu: 6845 kmem_cache_free(kvm_vcpu_cache, vmx); 6846 return ERR_PTR(err); 6847 } 6848 6849 #define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n" 6850 #define L1TF_MSG_L1D "L1TF CPU bug present and virtualization mitigation disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n" 6851 6852 static int vmx_vm_init(struct kvm *kvm) 6853 { 6854 spin_lock_init(&to_kvm_vmx(kvm)->ept_pointer_lock); 6855 6856 if (!ple_gap) 6857 kvm->arch.pause_in_guest = true; 6858 6859 if (boot_cpu_has(X86_BUG_L1TF) && enable_ept) { 6860 switch (l1tf_mitigation) { 6861 case L1TF_MITIGATION_OFF: 6862 case L1TF_MITIGATION_FLUSH_NOWARN: 6863 /* 'I explicitly don't care' is set */ 6864 break; 6865 case L1TF_MITIGATION_FLUSH: 6866 case L1TF_MITIGATION_FLUSH_NOSMT: 6867 case L1TF_MITIGATION_FULL: 6868 /* 6869 * Warn upon starting the first VM in a potentially 6870 * insecure environment. 6871 */ 6872 if (sched_smt_active()) 6873 pr_warn_once(L1TF_MSG_SMT); 6874 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER) 6875 pr_warn_once(L1TF_MSG_L1D); 6876 break; 6877 case L1TF_MITIGATION_FULL_FORCE: 6878 /* Flush is enforced */ 6879 break; 6880 } 6881 } 6882 return 0; 6883 } 6884 6885 static int __init vmx_check_processor_compat(void) 6886 { 6887 struct vmcs_config vmcs_conf; 6888 struct vmx_capability vmx_cap; 6889 6890 if (setup_vmcs_config(&vmcs_conf, &vmx_cap) < 0) 6891 return -EIO; 6892 if (nested) 6893 nested_vmx_setup_ctls_msrs(&vmcs_conf.nested, vmx_cap.ept, 6894 enable_apicv); 6895 if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) { 6896 printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n", 6897 smp_processor_id()); 6898 return -EIO; 6899 } 6900 return 0; 6901 } 6902 6903 static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) 6904 { 6905 u8 cache; 6906 u64 ipat = 0; 6907 6908 /* For VT-d and EPT combination 6909 * 1. MMIO: always map as UC 6910 * 2. EPT with VT-d: 6911 * a. VT-d without snooping control feature: can't guarantee the 6912 * result, try to trust guest. 6913 * b. VT-d with snooping control feature: snooping control feature of 6914 * VT-d engine can guarantee the cache correctness. Just set it 6915 * to WB to keep consistent with host. So the same as item 3. 6916 * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep 6917 * consistent with host MTRR 6918 */ 6919 if (is_mmio) { 6920 cache = MTRR_TYPE_UNCACHABLE; 6921 goto exit; 6922 } 6923 6924 if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) { 6925 ipat = VMX_EPT_IPAT_BIT; 6926 cache = MTRR_TYPE_WRBACK; 6927 goto exit; 6928 } 6929 6930 if (kvm_read_cr0(vcpu) & X86_CR0_CD) { 6931 ipat = VMX_EPT_IPAT_BIT; 6932 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) 6933 cache = MTRR_TYPE_WRBACK; 6934 else 6935 cache = MTRR_TYPE_UNCACHABLE; 6936 goto exit; 6937 } 6938 6939 cache = kvm_mtrr_get_guest_memory_type(vcpu, gfn); 6940 6941 exit: 6942 return (cache << VMX_EPT_MT_EPTE_SHIFT) | ipat; 6943 } 6944 6945 static int vmx_get_lpage_level(void) 6946 { 6947 if (enable_ept && !cpu_has_vmx_ept_1g_page()) 6948 return PT_DIRECTORY_LEVEL; 6949 else 6950 /* For shadow and EPT supported 1GB page */ 6951 return PT_PDPE_LEVEL; 6952 } 6953 6954 static void vmcs_set_secondary_exec_control(struct vcpu_vmx *vmx) 6955 { 6956 /* 6957 * These bits in the secondary execution controls field 6958 * are dynamic, the others are mostly based on the hypervisor 6959 * architecture and the guest's CPUID. Do not touch the 6960 * dynamic bits. 6961 */ 6962 u32 mask = 6963 SECONDARY_EXEC_SHADOW_VMCS | 6964 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 6965 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 6966 SECONDARY_EXEC_DESC; 6967 6968 u32 new_ctl = vmx->secondary_exec_control; 6969 u32 cur_ctl = secondary_exec_controls_get(vmx); 6970 6971 secondary_exec_controls_set(vmx, (new_ctl & ~mask) | (cur_ctl & mask)); 6972 } 6973 6974 /* 6975 * Generate MSR_IA32_VMX_CR{0,4}_FIXED1 according to CPUID. Only set bits 6976 * (indicating "allowed-1") if they are supported in the guest's CPUID. 6977 */ 6978 static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu) 6979 { 6980 struct vcpu_vmx *vmx = to_vmx(vcpu); 6981 struct kvm_cpuid_entry2 *entry; 6982 6983 vmx->nested.msrs.cr0_fixed1 = 0xffffffff; 6984 vmx->nested.msrs.cr4_fixed1 = X86_CR4_PCE; 6985 6986 #define cr4_fixed1_update(_cr4_mask, _reg, _cpuid_mask) do { \ 6987 if (entry && (entry->_reg & (_cpuid_mask))) \ 6988 vmx->nested.msrs.cr4_fixed1 |= (_cr4_mask); \ 6989 } while (0) 6990 6991 entry = kvm_find_cpuid_entry(vcpu, 0x1, 0); 6992 cr4_fixed1_update(X86_CR4_VME, edx, bit(X86_FEATURE_VME)); 6993 cr4_fixed1_update(X86_CR4_PVI, edx, bit(X86_FEATURE_VME)); 6994 cr4_fixed1_update(X86_CR4_TSD, edx, bit(X86_FEATURE_TSC)); 6995 cr4_fixed1_update(X86_CR4_DE, edx, bit(X86_FEATURE_DE)); 6996 cr4_fixed1_update(X86_CR4_PSE, edx, bit(X86_FEATURE_PSE)); 6997 cr4_fixed1_update(X86_CR4_PAE, edx, bit(X86_FEATURE_PAE)); 6998 cr4_fixed1_update(X86_CR4_MCE, edx, bit(X86_FEATURE_MCE)); 6999 cr4_fixed1_update(X86_CR4_PGE, edx, bit(X86_FEATURE_PGE)); 7000 cr4_fixed1_update(X86_CR4_OSFXSR, edx, bit(X86_FEATURE_FXSR)); 7001 cr4_fixed1_update(X86_CR4_OSXMMEXCPT, edx, bit(X86_FEATURE_XMM)); 7002 cr4_fixed1_update(X86_CR4_VMXE, ecx, bit(X86_FEATURE_VMX)); 7003 cr4_fixed1_update(X86_CR4_SMXE, ecx, bit(X86_FEATURE_SMX)); 7004 cr4_fixed1_update(X86_CR4_PCIDE, ecx, bit(X86_FEATURE_PCID)); 7005 cr4_fixed1_update(X86_CR4_OSXSAVE, ecx, bit(X86_FEATURE_XSAVE)); 7006 7007 entry = kvm_find_cpuid_entry(vcpu, 0x7, 0); 7008 cr4_fixed1_update(X86_CR4_FSGSBASE, ebx, bit(X86_FEATURE_FSGSBASE)); 7009 cr4_fixed1_update(X86_CR4_SMEP, ebx, bit(X86_FEATURE_SMEP)); 7010 cr4_fixed1_update(X86_CR4_SMAP, ebx, bit(X86_FEATURE_SMAP)); 7011 cr4_fixed1_update(X86_CR4_PKE, ecx, bit(X86_FEATURE_PKU)); 7012 cr4_fixed1_update(X86_CR4_UMIP, ecx, bit(X86_FEATURE_UMIP)); 7013 cr4_fixed1_update(X86_CR4_LA57, ecx, bit(X86_FEATURE_LA57)); 7014 7015 #undef cr4_fixed1_update 7016 } 7017 7018 static void nested_vmx_entry_exit_ctls_update(struct kvm_vcpu *vcpu) 7019 { 7020 struct vcpu_vmx *vmx = to_vmx(vcpu); 7021 7022 if (kvm_mpx_supported()) { 7023 bool mpx_enabled = guest_cpuid_has(vcpu, X86_FEATURE_MPX); 7024 7025 if (mpx_enabled) { 7026 vmx->nested.msrs.entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS; 7027 vmx->nested.msrs.exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS; 7028 } else { 7029 vmx->nested.msrs.entry_ctls_high &= ~VM_ENTRY_LOAD_BNDCFGS; 7030 vmx->nested.msrs.exit_ctls_high &= ~VM_EXIT_CLEAR_BNDCFGS; 7031 } 7032 } 7033 } 7034 7035 static void update_intel_pt_cfg(struct kvm_vcpu *vcpu) 7036 { 7037 struct vcpu_vmx *vmx = to_vmx(vcpu); 7038 struct kvm_cpuid_entry2 *best = NULL; 7039 int i; 7040 7041 for (i = 0; i < PT_CPUID_LEAVES; i++) { 7042 best = kvm_find_cpuid_entry(vcpu, 0x14, i); 7043 if (!best) 7044 return; 7045 vmx->pt_desc.caps[CPUID_EAX + i*PT_CPUID_REGS_NUM] = best->eax; 7046 vmx->pt_desc.caps[CPUID_EBX + i*PT_CPUID_REGS_NUM] = best->ebx; 7047 vmx->pt_desc.caps[CPUID_ECX + i*PT_CPUID_REGS_NUM] = best->ecx; 7048 vmx->pt_desc.caps[CPUID_EDX + i*PT_CPUID_REGS_NUM] = best->edx; 7049 } 7050 7051 /* Get the number of configurable Address Ranges for filtering */ 7052 vmx->pt_desc.addr_range = intel_pt_validate_cap(vmx->pt_desc.caps, 7053 PT_CAP_num_address_ranges); 7054 7055 /* Initialize and clear the no dependency bits */ 7056 vmx->pt_desc.ctl_bitmask = ~(RTIT_CTL_TRACEEN | RTIT_CTL_OS | 7057 RTIT_CTL_USR | RTIT_CTL_TSC_EN | RTIT_CTL_DISRETC); 7058 7059 /* 7060 * If CPUID.(EAX=14H,ECX=0):EBX[0]=1 CR3Filter can be set otherwise 7061 * will inject an #GP 7062 */ 7063 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_cr3_filtering)) 7064 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_CR3EN; 7065 7066 /* 7067 * If CPUID.(EAX=14H,ECX=0):EBX[1]=1 CYCEn, CycThresh and 7068 * PSBFreq can be set 7069 */ 7070 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc)) 7071 vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_CYCLEACC | 7072 RTIT_CTL_CYC_THRESH | RTIT_CTL_PSB_FREQ); 7073 7074 /* 7075 * If CPUID.(EAX=14H,ECX=0):EBX[3]=1 MTCEn BranchEn and 7076 * MTCFreq can be set 7077 */ 7078 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc)) 7079 vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_MTC_EN | 7080 RTIT_CTL_BRANCH_EN | RTIT_CTL_MTC_RANGE); 7081 7082 /* If CPUID.(EAX=14H,ECX=0):EBX[4]=1 FUPonPTW and PTWEn can be set */ 7083 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_ptwrite)) 7084 vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_FUP_ON_PTW | 7085 RTIT_CTL_PTW_EN); 7086 7087 /* If CPUID.(EAX=14H,ECX=0):EBX[5]=1 PwrEvEn can be set */ 7088 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_power_event_trace)) 7089 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_PWR_EVT_EN; 7090 7091 /* If CPUID.(EAX=14H,ECX=0):ECX[0]=1 ToPA can be set */ 7092 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_topa_output)) 7093 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_TOPA; 7094 7095 /* If CPUID.(EAX=14H,ECX=0):ECX[3]=1 FabircEn can be set */ 7096 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_output_subsys)) 7097 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_FABRIC_EN; 7098 7099 /* unmask address range configure area */ 7100 for (i = 0; i < vmx->pt_desc.addr_range; i++) 7101 vmx->pt_desc.ctl_bitmask &= ~(0xfULL << (32 + i * 4)); 7102 } 7103 7104 static void vmx_cpuid_update(struct kvm_vcpu *vcpu) 7105 { 7106 struct vcpu_vmx *vmx = to_vmx(vcpu); 7107 7108 /* xsaves_enabled is recomputed in vmx_compute_secondary_exec_control(). */ 7109 vcpu->arch.xsaves_enabled = false; 7110 7111 if (cpu_has_secondary_exec_ctrls()) { 7112 vmx_compute_secondary_exec_control(vmx); 7113 vmcs_set_secondary_exec_control(vmx); 7114 } 7115 7116 if (nested_vmx_allowed(vcpu)) 7117 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |= 7118 FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX | 7119 FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; 7120 else 7121 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &= 7122 ~(FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX | 7123 FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX); 7124 7125 if (nested_vmx_allowed(vcpu)) { 7126 nested_vmx_cr_fixed1_bits_update(vcpu); 7127 nested_vmx_entry_exit_ctls_update(vcpu); 7128 } 7129 7130 if (boot_cpu_has(X86_FEATURE_INTEL_PT) && 7131 guest_cpuid_has(vcpu, X86_FEATURE_INTEL_PT)) 7132 update_intel_pt_cfg(vcpu); 7133 7134 if (boot_cpu_has(X86_FEATURE_RTM)) { 7135 struct shared_msr_entry *msr; 7136 msr = find_msr_entry(vmx, MSR_IA32_TSX_CTRL); 7137 if (msr) { 7138 bool enabled = guest_cpuid_has(vcpu, X86_FEATURE_RTM); 7139 vmx_set_guest_msr(vmx, msr, enabled ? 0 : TSX_CTRL_RTM_DISABLE); 7140 } 7141 } 7142 } 7143 7144 static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry) 7145 { 7146 if (func == 1 && nested) 7147 entry->ecx |= bit(X86_FEATURE_VMX); 7148 } 7149 7150 static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu) 7151 { 7152 to_vmx(vcpu)->req_immediate_exit = true; 7153 } 7154 7155 static int vmx_check_intercept(struct kvm_vcpu *vcpu, 7156 struct x86_instruction_info *info, 7157 enum x86_intercept_stage stage) 7158 { 7159 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 7160 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; 7161 7162 /* 7163 * RDPID causes #UD if disabled through secondary execution controls. 7164 * Because it is marked as EmulateOnUD, we need to intercept it here. 7165 */ 7166 if (info->intercept == x86_intercept_rdtscp && 7167 !nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDTSCP)) { 7168 ctxt->exception.vector = UD_VECTOR; 7169 ctxt->exception.error_code_valid = false; 7170 return X86EMUL_PROPAGATE_FAULT; 7171 } 7172 7173 /* TODO: check more intercepts... */ 7174 return X86EMUL_CONTINUE; 7175 } 7176 7177 #ifdef CONFIG_X86_64 7178 /* (a << shift) / divisor, return 1 if overflow otherwise 0 */ 7179 static inline int u64_shl_div_u64(u64 a, unsigned int shift, 7180 u64 divisor, u64 *result) 7181 { 7182 u64 low = a << shift, high = a >> (64 - shift); 7183 7184 /* To avoid the overflow on divq */ 7185 if (high >= divisor) 7186 return 1; 7187 7188 /* Low hold the result, high hold rem which is discarded */ 7189 asm("divq %2\n\t" : "=a" (low), "=d" (high) : 7190 "rm" (divisor), "0" (low), "1" (high)); 7191 *result = low; 7192 7193 return 0; 7194 } 7195 7196 static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc, 7197 bool *expired) 7198 { 7199 struct vcpu_vmx *vmx; 7200 u64 tscl, guest_tscl, delta_tsc, lapic_timer_advance_cycles; 7201 struct kvm_timer *ktimer = &vcpu->arch.apic->lapic_timer; 7202 7203 if (kvm_mwait_in_guest(vcpu->kvm) || 7204 kvm_can_post_timer_interrupt(vcpu)) 7205 return -EOPNOTSUPP; 7206 7207 vmx = to_vmx(vcpu); 7208 tscl = rdtsc(); 7209 guest_tscl = kvm_read_l1_tsc(vcpu, tscl); 7210 delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl; 7211 lapic_timer_advance_cycles = nsec_to_cycles(vcpu, 7212 ktimer->timer_advance_ns); 7213 7214 if (delta_tsc > lapic_timer_advance_cycles) 7215 delta_tsc -= lapic_timer_advance_cycles; 7216 else 7217 delta_tsc = 0; 7218 7219 /* Convert to host delta tsc if tsc scaling is enabled */ 7220 if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio && 7221 delta_tsc && u64_shl_div_u64(delta_tsc, 7222 kvm_tsc_scaling_ratio_frac_bits, 7223 vcpu->arch.tsc_scaling_ratio, &delta_tsc)) 7224 return -ERANGE; 7225 7226 /* 7227 * If the delta tsc can't fit in the 32 bit after the multi shift, 7228 * we can't use the preemption timer. 7229 * It's possible that it fits on later vmentries, but checking 7230 * on every vmentry is costly so we just use an hrtimer. 7231 */ 7232 if (delta_tsc >> (cpu_preemption_timer_multi + 32)) 7233 return -ERANGE; 7234 7235 vmx->hv_deadline_tsc = tscl + delta_tsc; 7236 *expired = !delta_tsc; 7237 return 0; 7238 } 7239 7240 static void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu) 7241 { 7242 to_vmx(vcpu)->hv_deadline_tsc = -1; 7243 } 7244 #endif 7245 7246 static void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu) 7247 { 7248 if (!kvm_pause_in_guest(vcpu->kvm)) 7249 shrink_ple_window(vcpu); 7250 } 7251 7252 static void vmx_slot_enable_log_dirty(struct kvm *kvm, 7253 struct kvm_memory_slot *slot) 7254 { 7255 kvm_mmu_slot_leaf_clear_dirty(kvm, slot); 7256 kvm_mmu_slot_largepage_remove_write_access(kvm, slot); 7257 } 7258 7259 static void vmx_slot_disable_log_dirty(struct kvm *kvm, 7260 struct kvm_memory_slot *slot) 7261 { 7262 kvm_mmu_slot_set_dirty(kvm, slot); 7263 } 7264 7265 static void vmx_flush_log_dirty(struct kvm *kvm) 7266 { 7267 kvm_flush_pml_buffers(kvm); 7268 } 7269 7270 static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu) 7271 { 7272 struct vmcs12 *vmcs12; 7273 struct vcpu_vmx *vmx = to_vmx(vcpu); 7274 gpa_t gpa, dst; 7275 7276 if (is_guest_mode(vcpu)) { 7277 WARN_ON_ONCE(vmx->nested.pml_full); 7278 7279 /* 7280 * Check if PML is enabled for the nested guest. 7281 * Whether eptp bit 6 is set is already checked 7282 * as part of A/D emulation. 7283 */ 7284 vmcs12 = get_vmcs12(vcpu); 7285 if (!nested_cpu_has_pml(vmcs12)) 7286 return 0; 7287 7288 if (vmcs12->guest_pml_index >= PML_ENTITY_NUM) { 7289 vmx->nested.pml_full = true; 7290 return 1; 7291 } 7292 7293 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS) & ~0xFFFull; 7294 dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index; 7295 7296 if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa, 7297 offset_in_page(dst), sizeof(gpa))) 7298 return 0; 7299 7300 vmcs12->guest_pml_index--; 7301 } 7302 7303 return 0; 7304 } 7305 7306 static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm, 7307 struct kvm_memory_slot *memslot, 7308 gfn_t offset, unsigned long mask) 7309 { 7310 kvm_mmu_clear_dirty_pt_masked(kvm, memslot, offset, mask); 7311 } 7312 7313 static void __pi_post_block(struct kvm_vcpu *vcpu) 7314 { 7315 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); 7316 struct pi_desc old, new; 7317 unsigned int dest; 7318 7319 do { 7320 old.control = new.control = pi_desc->control; 7321 WARN(old.nv != POSTED_INTR_WAKEUP_VECTOR, 7322 "Wakeup handler not enabled while the VCPU is blocked\n"); 7323 7324 dest = cpu_physical_id(vcpu->cpu); 7325 7326 if (x2apic_enabled()) 7327 new.ndst = dest; 7328 else 7329 new.ndst = (dest << 8) & 0xFF00; 7330 7331 /* set 'NV' to 'notification vector' */ 7332 new.nv = POSTED_INTR_VECTOR; 7333 } while (cmpxchg64(&pi_desc->control, old.control, 7334 new.control) != old.control); 7335 7336 if (!WARN_ON_ONCE(vcpu->pre_pcpu == -1)) { 7337 spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); 7338 list_del(&vcpu->blocked_vcpu_list); 7339 spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); 7340 vcpu->pre_pcpu = -1; 7341 } 7342 } 7343 7344 /* 7345 * This routine does the following things for vCPU which is going 7346 * to be blocked if VT-d PI is enabled. 7347 * - Store the vCPU to the wakeup list, so when interrupts happen 7348 * we can find the right vCPU to wake up. 7349 * - Change the Posted-interrupt descriptor as below: 7350 * 'NDST' <-- vcpu->pre_pcpu 7351 * 'NV' <-- POSTED_INTR_WAKEUP_VECTOR 7352 * - If 'ON' is set during this process, which means at least one 7353 * interrupt is posted for this vCPU, we cannot block it, in 7354 * this case, return 1, otherwise, return 0. 7355 * 7356 */ 7357 static int pi_pre_block(struct kvm_vcpu *vcpu) 7358 { 7359 unsigned int dest; 7360 struct pi_desc old, new; 7361 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); 7362 7363 if (!kvm_arch_has_assigned_device(vcpu->kvm) || 7364 !irq_remapping_cap(IRQ_POSTING_CAP) || 7365 !kvm_vcpu_apicv_active(vcpu)) 7366 return 0; 7367 7368 WARN_ON(irqs_disabled()); 7369 local_irq_disable(); 7370 if (!WARN_ON_ONCE(vcpu->pre_pcpu != -1)) { 7371 vcpu->pre_pcpu = vcpu->cpu; 7372 spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); 7373 list_add_tail(&vcpu->blocked_vcpu_list, 7374 &per_cpu(blocked_vcpu_on_cpu, 7375 vcpu->pre_pcpu)); 7376 spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); 7377 } 7378 7379 do { 7380 old.control = new.control = pi_desc->control; 7381 7382 WARN((pi_desc->sn == 1), 7383 "Warning: SN field of posted-interrupts " 7384 "is set before blocking\n"); 7385 7386 /* 7387 * Since vCPU can be preempted during this process, 7388 * vcpu->cpu could be different with pre_pcpu, we 7389 * need to set pre_pcpu as the destination of wakeup 7390 * notification event, then we can find the right vCPU 7391 * to wakeup in wakeup handler if interrupts happen 7392 * when the vCPU is in blocked state. 7393 */ 7394 dest = cpu_physical_id(vcpu->pre_pcpu); 7395 7396 if (x2apic_enabled()) 7397 new.ndst = dest; 7398 else 7399 new.ndst = (dest << 8) & 0xFF00; 7400 7401 /* set 'NV' to 'wakeup vector' */ 7402 new.nv = POSTED_INTR_WAKEUP_VECTOR; 7403 } while (cmpxchg64(&pi_desc->control, old.control, 7404 new.control) != old.control); 7405 7406 /* We should not block the vCPU if an interrupt is posted for it. */ 7407 if (pi_test_on(pi_desc) == 1) 7408 __pi_post_block(vcpu); 7409 7410 local_irq_enable(); 7411 return (vcpu->pre_pcpu == -1); 7412 } 7413 7414 static int vmx_pre_block(struct kvm_vcpu *vcpu) 7415 { 7416 if (pi_pre_block(vcpu)) 7417 return 1; 7418 7419 if (kvm_lapic_hv_timer_in_use(vcpu)) 7420 kvm_lapic_switch_to_sw_timer(vcpu); 7421 7422 return 0; 7423 } 7424 7425 static void pi_post_block(struct kvm_vcpu *vcpu) 7426 { 7427 if (vcpu->pre_pcpu == -1) 7428 return; 7429 7430 WARN_ON(irqs_disabled()); 7431 local_irq_disable(); 7432 __pi_post_block(vcpu); 7433 local_irq_enable(); 7434 } 7435 7436 static void vmx_post_block(struct kvm_vcpu *vcpu) 7437 { 7438 if (kvm_x86_ops->set_hv_timer) 7439 kvm_lapic_switch_to_hv_timer(vcpu); 7440 7441 pi_post_block(vcpu); 7442 } 7443 7444 /* 7445 * vmx_update_pi_irte - set IRTE for Posted-Interrupts 7446 * 7447 * @kvm: kvm 7448 * @host_irq: host irq of the interrupt 7449 * @guest_irq: gsi of the interrupt 7450 * @set: set or unset PI 7451 * returns 0 on success, < 0 on failure 7452 */ 7453 static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq, 7454 uint32_t guest_irq, bool set) 7455 { 7456 struct kvm_kernel_irq_routing_entry *e; 7457 struct kvm_irq_routing_table *irq_rt; 7458 struct kvm_lapic_irq irq; 7459 struct kvm_vcpu *vcpu; 7460 struct vcpu_data vcpu_info; 7461 int idx, ret = 0; 7462 7463 if (!kvm_arch_has_assigned_device(kvm) || 7464 !irq_remapping_cap(IRQ_POSTING_CAP) || 7465 !kvm_vcpu_apicv_active(kvm->vcpus[0])) 7466 return 0; 7467 7468 idx = srcu_read_lock(&kvm->irq_srcu); 7469 irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu); 7470 if (guest_irq >= irq_rt->nr_rt_entries || 7471 hlist_empty(&irq_rt->map[guest_irq])) { 7472 pr_warn_once("no route for guest_irq %u/%u (broken user space?)\n", 7473 guest_irq, irq_rt->nr_rt_entries); 7474 goto out; 7475 } 7476 7477 hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) { 7478 if (e->type != KVM_IRQ_ROUTING_MSI) 7479 continue; 7480 /* 7481 * VT-d PI cannot support posting multicast/broadcast 7482 * interrupts to a vCPU, we still use interrupt remapping 7483 * for these kind of interrupts. 7484 * 7485 * For lowest-priority interrupts, we only support 7486 * those with single CPU as the destination, e.g. user 7487 * configures the interrupts via /proc/irq or uses 7488 * irqbalance to make the interrupts single-CPU. 7489 * 7490 * We will support full lowest-priority interrupt later. 7491 * 7492 * In addition, we can only inject generic interrupts using 7493 * the PI mechanism, refuse to route others through it. 7494 */ 7495 7496 kvm_set_msi_irq(kvm, e, &irq); 7497 if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu) || 7498 !kvm_irq_is_postable(&irq)) { 7499 /* 7500 * Make sure the IRTE is in remapped mode if 7501 * we don't handle it in posted mode. 7502 */ 7503 ret = irq_set_vcpu_affinity(host_irq, NULL); 7504 if (ret < 0) { 7505 printk(KERN_INFO 7506 "failed to back to remapped mode, irq: %u\n", 7507 host_irq); 7508 goto out; 7509 } 7510 7511 continue; 7512 } 7513 7514 vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu)); 7515 vcpu_info.vector = irq.vector; 7516 7517 trace_kvm_pi_irte_update(host_irq, vcpu->vcpu_id, e->gsi, 7518 vcpu_info.vector, vcpu_info.pi_desc_addr, set); 7519 7520 if (set) 7521 ret = irq_set_vcpu_affinity(host_irq, &vcpu_info); 7522 else 7523 ret = irq_set_vcpu_affinity(host_irq, NULL); 7524 7525 if (ret < 0) { 7526 printk(KERN_INFO "%s: failed to update PI IRTE\n", 7527 __func__); 7528 goto out; 7529 } 7530 } 7531 7532 ret = 0; 7533 out: 7534 srcu_read_unlock(&kvm->irq_srcu, idx); 7535 return ret; 7536 } 7537 7538 static void vmx_setup_mce(struct kvm_vcpu *vcpu) 7539 { 7540 if (vcpu->arch.mcg_cap & MCG_LMCE_P) 7541 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |= 7542 FEATURE_CONTROL_LMCE; 7543 else 7544 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &= 7545 ~FEATURE_CONTROL_LMCE; 7546 } 7547 7548 static int vmx_smi_allowed(struct kvm_vcpu *vcpu) 7549 { 7550 /* we need a nested vmexit to enter SMM, postpone if run is pending */ 7551 if (to_vmx(vcpu)->nested.nested_run_pending) 7552 return 0; 7553 return 1; 7554 } 7555 7556 static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate) 7557 { 7558 struct vcpu_vmx *vmx = to_vmx(vcpu); 7559 7560 vmx->nested.smm.guest_mode = is_guest_mode(vcpu); 7561 if (vmx->nested.smm.guest_mode) 7562 nested_vmx_vmexit(vcpu, -1, 0, 0); 7563 7564 vmx->nested.smm.vmxon = vmx->nested.vmxon; 7565 vmx->nested.vmxon = false; 7566 vmx_clear_hlt(vcpu); 7567 return 0; 7568 } 7569 7570 static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) 7571 { 7572 struct vcpu_vmx *vmx = to_vmx(vcpu); 7573 int ret; 7574 7575 if (vmx->nested.smm.vmxon) { 7576 vmx->nested.vmxon = true; 7577 vmx->nested.smm.vmxon = false; 7578 } 7579 7580 if (vmx->nested.smm.guest_mode) { 7581 ret = nested_vmx_enter_non_root_mode(vcpu, false); 7582 if (ret) 7583 return ret; 7584 7585 vmx->nested.smm.guest_mode = false; 7586 } 7587 return 0; 7588 } 7589 7590 static int enable_smi_window(struct kvm_vcpu *vcpu) 7591 { 7592 return 0; 7593 } 7594 7595 static bool vmx_need_emulation_on_page_fault(struct kvm_vcpu *vcpu) 7596 { 7597 return false; 7598 } 7599 7600 static bool vmx_apic_init_signal_blocked(struct kvm_vcpu *vcpu) 7601 { 7602 return to_vmx(vcpu)->nested.vmxon; 7603 } 7604 7605 static __init int hardware_setup(void) 7606 { 7607 unsigned long host_bndcfgs; 7608 struct desc_ptr dt; 7609 int r, i; 7610 7611 rdmsrl_safe(MSR_EFER, &host_efer); 7612 7613 store_idt(&dt); 7614 host_idt_base = dt.address; 7615 7616 for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) 7617 kvm_define_shared_msr(i, vmx_msr_index[i]); 7618 7619 if (setup_vmcs_config(&vmcs_config, &vmx_capability) < 0) 7620 return -EIO; 7621 7622 if (boot_cpu_has(X86_FEATURE_NX)) 7623 kvm_enable_efer_bits(EFER_NX); 7624 7625 if (boot_cpu_has(X86_FEATURE_MPX)) { 7626 rdmsrl(MSR_IA32_BNDCFGS, host_bndcfgs); 7627 WARN_ONCE(host_bndcfgs, "KVM: BNDCFGS in host will be lost"); 7628 } 7629 7630 if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() || 7631 !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global())) 7632 enable_vpid = 0; 7633 7634 if (!cpu_has_vmx_ept() || 7635 !cpu_has_vmx_ept_4levels() || 7636 !cpu_has_vmx_ept_mt_wb() || 7637 !cpu_has_vmx_invept_global()) 7638 enable_ept = 0; 7639 7640 if (!cpu_has_vmx_ept_ad_bits() || !enable_ept) 7641 enable_ept_ad_bits = 0; 7642 7643 if (!cpu_has_vmx_unrestricted_guest() || !enable_ept) 7644 enable_unrestricted_guest = 0; 7645 7646 if (!cpu_has_vmx_flexpriority()) 7647 flexpriority_enabled = 0; 7648 7649 if (!cpu_has_virtual_nmis()) 7650 enable_vnmi = 0; 7651 7652 /* 7653 * set_apic_access_page_addr() is used to reload apic access 7654 * page upon invalidation. No need to do anything if not 7655 * using the APIC_ACCESS_ADDR VMCS field. 7656 */ 7657 if (!flexpriority_enabled) 7658 kvm_x86_ops->set_apic_access_page_addr = NULL; 7659 7660 if (!cpu_has_vmx_tpr_shadow()) 7661 kvm_x86_ops->update_cr8_intercept = NULL; 7662 7663 if (enable_ept && !cpu_has_vmx_ept_2m_page()) 7664 kvm_disable_largepages(); 7665 7666 #if IS_ENABLED(CONFIG_HYPERV) 7667 if (ms_hyperv.nested_features & HV_X64_NESTED_GUEST_MAPPING_FLUSH 7668 && enable_ept) { 7669 kvm_x86_ops->tlb_remote_flush = hv_remote_flush_tlb; 7670 kvm_x86_ops->tlb_remote_flush_with_range = 7671 hv_remote_flush_tlb_with_range; 7672 } 7673 #endif 7674 7675 if (!cpu_has_vmx_ple()) { 7676 ple_gap = 0; 7677 ple_window = 0; 7678 ple_window_grow = 0; 7679 ple_window_max = 0; 7680 ple_window_shrink = 0; 7681 } 7682 7683 if (!cpu_has_vmx_apicv()) { 7684 enable_apicv = 0; 7685 kvm_x86_ops->sync_pir_to_irr = NULL; 7686 } 7687 7688 if (cpu_has_vmx_tsc_scaling()) { 7689 kvm_has_tsc_control = true; 7690 kvm_max_tsc_scaling_ratio = KVM_VMX_TSC_MULTIPLIER_MAX; 7691 kvm_tsc_scaling_ratio_frac_bits = 48; 7692 } 7693 7694 set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */ 7695 7696 if (enable_ept) 7697 vmx_enable_tdp(); 7698 else 7699 kvm_disable_tdp(); 7700 7701 /* 7702 * Only enable PML when hardware supports PML feature, and both EPT 7703 * and EPT A/D bit features are enabled -- PML depends on them to work. 7704 */ 7705 if (!enable_ept || !enable_ept_ad_bits || !cpu_has_vmx_pml()) 7706 enable_pml = 0; 7707 7708 if (!enable_pml) { 7709 kvm_x86_ops->slot_enable_log_dirty = NULL; 7710 kvm_x86_ops->slot_disable_log_dirty = NULL; 7711 kvm_x86_ops->flush_log_dirty = NULL; 7712 kvm_x86_ops->enable_log_dirty_pt_masked = NULL; 7713 } 7714 7715 if (!cpu_has_vmx_preemption_timer()) 7716 enable_preemption_timer = false; 7717 7718 if (enable_preemption_timer) { 7719 u64 use_timer_freq = 5000ULL * 1000 * 1000; 7720 u64 vmx_msr; 7721 7722 rdmsrl(MSR_IA32_VMX_MISC, vmx_msr); 7723 cpu_preemption_timer_multi = 7724 vmx_msr & VMX_MISC_PREEMPTION_TIMER_RATE_MASK; 7725 7726 if (tsc_khz) 7727 use_timer_freq = (u64)tsc_khz * 1000; 7728 use_timer_freq >>= cpu_preemption_timer_multi; 7729 7730 /* 7731 * KVM "disables" the preemption timer by setting it to its max 7732 * value. Don't use the timer if it might cause spurious exits 7733 * at a rate faster than 0.1 Hz (of uninterrupted guest time). 7734 */ 7735 if (use_timer_freq > 0xffffffffu / 10) 7736 enable_preemption_timer = false; 7737 } 7738 7739 if (!enable_preemption_timer) { 7740 kvm_x86_ops->set_hv_timer = NULL; 7741 kvm_x86_ops->cancel_hv_timer = NULL; 7742 kvm_x86_ops->request_immediate_exit = __kvm_request_immediate_exit; 7743 } 7744 7745 kvm_set_posted_intr_wakeup_handler(wakeup_handler); 7746 7747 kvm_mce_cap_supported |= MCG_LMCE_P; 7748 7749 if (pt_mode != PT_MODE_SYSTEM && pt_mode != PT_MODE_HOST_GUEST) 7750 return -EINVAL; 7751 if (!enable_ept || !cpu_has_vmx_intel_pt()) 7752 pt_mode = PT_MODE_SYSTEM; 7753 7754 if (nested) { 7755 nested_vmx_setup_ctls_msrs(&vmcs_config.nested, 7756 vmx_capability.ept, enable_apicv); 7757 7758 r = nested_vmx_hardware_setup(kvm_vmx_exit_handlers); 7759 if (r) 7760 return r; 7761 } 7762 7763 r = alloc_kvm_area(); 7764 if (r) 7765 nested_vmx_hardware_unsetup(); 7766 return r; 7767 } 7768 7769 static __exit void hardware_unsetup(void) 7770 { 7771 if (nested) 7772 nested_vmx_hardware_unsetup(); 7773 7774 free_kvm_area(); 7775 } 7776 7777 static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { 7778 .cpu_has_kvm_support = cpu_has_kvm_support, 7779 .disabled_by_bios = vmx_disabled_by_bios, 7780 .hardware_setup = hardware_setup, 7781 .hardware_unsetup = hardware_unsetup, 7782 .check_processor_compatibility = vmx_check_processor_compat, 7783 .hardware_enable = hardware_enable, 7784 .hardware_disable = hardware_disable, 7785 .cpu_has_accelerated_tpr = report_flexpriority, 7786 .has_emulated_msr = vmx_has_emulated_msr, 7787 7788 .vm_init = vmx_vm_init, 7789 .vm_alloc = vmx_vm_alloc, 7790 .vm_free = vmx_vm_free, 7791 7792 .vcpu_create = vmx_create_vcpu, 7793 .vcpu_free = vmx_free_vcpu, 7794 .vcpu_reset = vmx_vcpu_reset, 7795 7796 .prepare_guest_switch = vmx_prepare_switch_to_guest, 7797 .vcpu_load = vmx_vcpu_load, 7798 .vcpu_put = vmx_vcpu_put, 7799 7800 .update_bp_intercept = update_exception_bitmap, 7801 .get_msr_feature = vmx_get_msr_feature, 7802 .get_msr = vmx_get_msr, 7803 .set_msr = vmx_set_msr, 7804 .get_segment_base = vmx_get_segment_base, 7805 .get_segment = vmx_get_segment, 7806 .set_segment = vmx_set_segment, 7807 .get_cpl = vmx_get_cpl, 7808 .get_cs_db_l_bits = vmx_get_cs_db_l_bits, 7809 .decache_cr0_guest_bits = vmx_decache_cr0_guest_bits, 7810 .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits, 7811 .set_cr0 = vmx_set_cr0, 7812 .set_cr3 = vmx_set_cr3, 7813 .set_cr4 = vmx_set_cr4, 7814 .set_efer = vmx_set_efer, 7815 .get_idt = vmx_get_idt, 7816 .set_idt = vmx_set_idt, 7817 .get_gdt = vmx_get_gdt, 7818 .set_gdt = vmx_set_gdt, 7819 .get_dr6 = vmx_get_dr6, 7820 .set_dr6 = vmx_set_dr6, 7821 .set_dr7 = vmx_set_dr7, 7822 .sync_dirty_debug_regs = vmx_sync_dirty_debug_regs, 7823 .cache_reg = vmx_cache_reg, 7824 .get_rflags = vmx_get_rflags, 7825 .set_rflags = vmx_set_rflags, 7826 7827 .tlb_flush = vmx_flush_tlb, 7828 .tlb_flush_gva = vmx_flush_tlb_gva, 7829 7830 .run = vmx_vcpu_run, 7831 .handle_exit = vmx_handle_exit, 7832 .skip_emulated_instruction = skip_emulated_instruction, 7833 .set_interrupt_shadow = vmx_set_interrupt_shadow, 7834 .get_interrupt_shadow = vmx_get_interrupt_shadow, 7835 .patch_hypercall = vmx_patch_hypercall, 7836 .set_irq = vmx_inject_irq, 7837 .set_nmi = vmx_inject_nmi, 7838 .queue_exception = vmx_queue_exception, 7839 .cancel_injection = vmx_cancel_injection, 7840 .interrupt_allowed = vmx_interrupt_allowed, 7841 .nmi_allowed = vmx_nmi_allowed, 7842 .get_nmi_mask = vmx_get_nmi_mask, 7843 .set_nmi_mask = vmx_set_nmi_mask, 7844 .enable_nmi_window = enable_nmi_window, 7845 .enable_irq_window = enable_irq_window, 7846 .update_cr8_intercept = update_cr8_intercept, 7847 .set_virtual_apic_mode = vmx_set_virtual_apic_mode, 7848 .set_apic_access_page_addr = vmx_set_apic_access_page_addr, 7849 .get_enable_apicv = vmx_get_enable_apicv, 7850 .refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl, 7851 .load_eoi_exitmap = vmx_load_eoi_exitmap, 7852 .apicv_post_state_restore = vmx_apicv_post_state_restore, 7853 .hwapic_irr_update = vmx_hwapic_irr_update, 7854 .hwapic_isr_update = vmx_hwapic_isr_update, 7855 .guest_apic_has_interrupt = vmx_guest_apic_has_interrupt, 7856 .sync_pir_to_irr = vmx_sync_pir_to_irr, 7857 .deliver_posted_interrupt = vmx_deliver_posted_interrupt, 7858 .dy_apicv_has_pending_interrupt = vmx_dy_apicv_has_pending_interrupt, 7859 7860 .set_tss_addr = vmx_set_tss_addr, 7861 .set_identity_map_addr = vmx_set_identity_map_addr, 7862 .get_tdp_level = get_ept_level, 7863 .get_mt_mask = vmx_get_mt_mask, 7864 7865 .get_exit_info = vmx_get_exit_info, 7866 7867 .get_lpage_level = vmx_get_lpage_level, 7868 7869 .cpuid_update = vmx_cpuid_update, 7870 7871 .rdtscp_supported = vmx_rdtscp_supported, 7872 .invpcid_supported = vmx_invpcid_supported, 7873 7874 .set_supported_cpuid = vmx_set_supported_cpuid, 7875 7876 .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit, 7877 7878 .read_l1_tsc_offset = vmx_read_l1_tsc_offset, 7879 .write_l1_tsc_offset = vmx_write_l1_tsc_offset, 7880 7881 .set_tdp_cr3 = vmx_set_cr3, 7882 7883 .check_intercept = vmx_check_intercept, 7884 .handle_exit_irqoff = vmx_handle_exit_irqoff, 7885 .mpx_supported = vmx_mpx_supported, 7886 .xsaves_supported = vmx_xsaves_supported, 7887 .umip_emulated = vmx_umip_emulated, 7888 .pt_supported = vmx_pt_supported, 7889 7890 .request_immediate_exit = vmx_request_immediate_exit, 7891 7892 .sched_in = vmx_sched_in, 7893 7894 .slot_enable_log_dirty = vmx_slot_enable_log_dirty, 7895 .slot_disable_log_dirty = vmx_slot_disable_log_dirty, 7896 .flush_log_dirty = vmx_flush_log_dirty, 7897 .enable_log_dirty_pt_masked = vmx_enable_log_dirty_pt_masked, 7898 .write_log_dirty = vmx_write_pml_buffer, 7899 7900 .pre_block = vmx_pre_block, 7901 .post_block = vmx_post_block, 7902 7903 .pmu_ops = &intel_pmu_ops, 7904 7905 .update_pi_irte = vmx_update_pi_irte, 7906 7907 #ifdef CONFIG_X86_64 7908 .set_hv_timer = vmx_set_hv_timer, 7909 .cancel_hv_timer = vmx_cancel_hv_timer, 7910 #endif 7911 7912 .setup_mce = vmx_setup_mce, 7913 7914 .smi_allowed = vmx_smi_allowed, 7915 .pre_enter_smm = vmx_pre_enter_smm, 7916 .pre_leave_smm = vmx_pre_leave_smm, 7917 .enable_smi_window = enable_smi_window, 7918 7919 .check_nested_events = NULL, 7920 .get_nested_state = NULL, 7921 .set_nested_state = NULL, 7922 .get_vmcs12_pages = NULL, 7923 .nested_enable_evmcs = NULL, 7924 .nested_get_evmcs_version = NULL, 7925 .need_emulation_on_page_fault = vmx_need_emulation_on_page_fault, 7926 .apic_init_signal_blocked = vmx_apic_init_signal_blocked, 7927 }; 7928 7929 static void vmx_cleanup_l1d_flush(void) 7930 { 7931 if (vmx_l1d_flush_pages) { 7932 free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER); 7933 vmx_l1d_flush_pages = NULL; 7934 } 7935 /* Restore state so sysfs ignores VMX */ 7936 l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; 7937 } 7938 7939 static void vmx_exit(void) 7940 { 7941 #ifdef CONFIG_KEXEC_CORE 7942 RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL); 7943 synchronize_rcu(); 7944 #endif 7945 7946 kvm_exit(); 7947 7948 #if IS_ENABLED(CONFIG_HYPERV) 7949 if (static_branch_unlikely(&enable_evmcs)) { 7950 int cpu; 7951 struct hv_vp_assist_page *vp_ap; 7952 /* 7953 * Reset everything to support using non-enlightened VMCS 7954 * access later (e.g. when we reload the module with 7955 * enlightened_vmcs=0) 7956 */ 7957 for_each_online_cpu(cpu) { 7958 vp_ap = hv_get_vp_assist_page(cpu); 7959 7960 if (!vp_ap) 7961 continue; 7962 7963 vp_ap->nested_control.features.directhypercall = 0; 7964 vp_ap->current_nested_vmcs = 0; 7965 vp_ap->enlighten_vmentry = 0; 7966 } 7967 7968 static_branch_disable(&enable_evmcs); 7969 } 7970 #endif 7971 vmx_cleanup_l1d_flush(); 7972 } 7973 module_exit(vmx_exit); 7974 7975 static int __init vmx_init(void) 7976 { 7977 int r; 7978 7979 #if IS_ENABLED(CONFIG_HYPERV) 7980 /* 7981 * Enlightened VMCS usage should be recommended and the host needs 7982 * to support eVMCS v1 or above. We can also disable eVMCS support 7983 * with module parameter. 7984 */ 7985 if (enlightened_vmcs && 7986 ms_hyperv.hints & HV_X64_ENLIGHTENED_VMCS_RECOMMENDED && 7987 (ms_hyperv.nested_features & HV_X64_ENLIGHTENED_VMCS_VERSION) >= 7988 KVM_EVMCS_VERSION) { 7989 int cpu; 7990 7991 /* Check that we have assist pages on all online CPUs */ 7992 for_each_online_cpu(cpu) { 7993 if (!hv_get_vp_assist_page(cpu)) { 7994 enlightened_vmcs = false; 7995 break; 7996 } 7997 } 7998 7999 if (enlightened_vmcs) { 8000 pr_info("KVM: vmx: using Hyper-V Enlightened VMCS\n"); 8001 static_branch_enable(&enable_evmcs); 8002 } 8003 8004 if (ms_hyperv.nested_features & HV_X64_NESTED_DIRECT_FLUSH) 8005 vmx_x86_ops.enable_direct_tlbflush 8006 = hv_enable_direct_tlbflush; 8007 8008 } else { 8009 enlightened_vmcs = false; 8010 } 8011 #endif 8012 8013 r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), 8014 __alignof__(struct vcpu_vmx), THIS_MODULE); 8015 if (r) 8016 return r; 8017 8018 /* 8019 * Must be called after kvm_init() so enable_ept is properly set 8020 * up. Hand the parameter mitigation value in which was stored in 8021 * the pre module init parser. If no parameter was given, it will 8022 * contain 'auto' which will be turned into the default 'cond' 8023 * mitigation mode. 8024 */ 8025 r = vmx_setup_l1d_flush(vmentry_l1d_flush_param); 8026 if (r) { 8027 vmx_exit(); 8028 return r; 8029 } 8030 8031 #ifdef CONFIG_KEXEC_CORE 8032 rcu_assign_pointer(crash_vmclear_loaded_vmcss, 8033 crash_vmclear_local_loaded_vmcss); 8034 #endif 8035 vmx_check_vmcs12_offsets(); 8036 8037 return 0; 8038 } 8039 module_init(vmx_init); 8040