1 /* 2 * Kernel-based Virtual Machine driver for Linux 3 * 4 * This module enables machines with Intel VT-x extensions to run virtual 5 * machines without emulation or binary translation. 6 * 7 * Copyright (C) 2006 Qumranet, Inc. 8 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 9 * 10 * Authors: 11 * Avi Kivity <avi@qumranet.com> 12 * Yaniv Kamay <yaniv@qumranet.com> 13 * 14 * This work is licensed under the terms of the GNU GPL, version 2. See 15 * the COPYING file in the top-level directory. 16 * 17 */ 18 19 #include <linux/frame.h> 20 #include <linux/highmem.h> 21 #include <linux/hrtimer.h> 22 #include <linux/kernel.h> 23 #include <linux/kvm_host.h> 24 #include <linux/module.h> 25 #include <linux/moduleparam.h> 26 #include <linux/mod_devicetable.h> 27 #include <linux/mm.h> 28 #include <linux/sched.h> 29 #include <linux/sched/smt.h> 30 #include <linux/slab.h> 31 #include <linux/tboot.h> 32 #include <linux/trace_events.h> 33 34 #include <asm/apic.h> 35 #include <asm/asm.h> 36 #include <asm/cpu.h> 37 #include <asm/debugreg.h> 38 #include <asm/desc.h> 39 #include <asm/fpu/internal.h> 40 #include <asm/io.h> 41 #include <asm/irq_remapping.h> 42 #include <asm/kexec.h> 43 #include <asm/perf_event.h> 44 #include <asm/mce.h> 45 #include <asm/mmu_context.h> 46 #include <asm/mshyperv.h> 47 #include <asm/spec-ctrl.h> 48 #include <asm/virtext.h> 49 #include <asm/vmx.h> 50 51 #include "capabilities.h" 52 #include "cpuid.h" 53 #include "evmcs.h" 54 #include "irq.h" 55 #include "kvm_cache_regs.h" 56 #include "lapic.h" 57 #include "mmu.h" 58 #include "nested.h" 59 #include "ops.h" 60 #include "pmu.h" 61 #include "trace.h" 62 #include "vmcs.h" 63 #include "vmcs12.h" 64 #include "vmx.h" 65 #include "x86.h" 66 67 MODULE_AUTHOR("Qumranet"); 68 MODULE_LICENSE("GPL"); 69 70 static const struct x86_cpu_id vmx_cpu_id[] = { 71 X86_FEATURE_MATCH(X86_FEATURE_VMX), 72 {} 73 }; 74 MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id); 75 76 bool __read_mostly enable_vpid = 1; 77 module_param_named(vpid, enable_vpid, bool, 0444); 78 79 static bool __read_mostly enable_vnmi = 1; 80 module_param_named(vnmi, enable_vnmi, bool, S_IRUGO); 81 82 bool __read_mostly flexpriority_enabled = 1; 83 module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO); 84 85 bool __read_mostly enable_ept = 1; 86 module_param_named(ept, enable_ept, bool, S_IRUGO); 87 88 bool __read_mostly enable_unrestricted_guest = 1; 89 module_param_named(unrestricted_guest, 90 enable_unrestricted_guest, bool, S_IRUGO); 91 92 bool __read_mostly enable_ept_ad_bits = 1; 93 module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO); 94 95 static bool __read_mostly emulate_invalid_guest_state = true; 96 module_param(emulate_invalid_guest_state, bool, S_IRUGO); 97 98 static bool __read_mostly fasteoi = 1; 99 module_param(fasteoi, bool, S_IRUGO); 100 101 static bool __read_mostly enable_apicv = 1; 102 module_param(enable_apicv, bool, S_IRUGO); 103 104 /* 105 * If nested=1, nested virtualization is supported, i.e., guests may use 106 * VMX and be a hypervisor for its own guests. If nested=0, guests may not 107 * use VMX instructions. 108 */ 109 static bool __read_mostly nested = 1; 110 module_param(nested, bool, S_IRUGO); 111 112 static u64 __read_mostly host_xss; 113 114 bool __read_mostly enable_pml = 1; 115 module_param_named(pml, enable_pml, bool, S_IRUGO); 116 117 #define MSR_BITMAP_MODE_X2APIC 1 118 #define MSR_BITMAP_MODE_X2APIC_APICV 2 119 120 #define KVM_VMX_TSC_MULTIPLIER_MAX 0xffffffffffffffffULL 121 122 /* Guest_tsc -> host_tsc conversion requires 64-bit division. */ 123 static int __read_mostly cpu_preemption_timer_multi; 124 static bool __read_mostly enable_preemption_timer = 1; 125 #ifdef CONFIG_X86_64 126 module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO); 127 #endif 128 129 #define KVM_VM_CR0_ALWAYS_OFF (X86_CR0_NW | X86_CR0_CD) 130 #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR0_NE 131 #define KVM_VM_CR0_ALWAYS_ON \ 132 (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | \ 133 X86_CR0_WP | X86_CR0_PG | X86_CR0_PE) 134 #define KVM_CR4_GUEST_OWNED_BITS \ 135 (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \ 136 | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_TSD) 137 138 #define KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR4_VMXE 139 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) 140 #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE) 141 142 #define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM)) 143 144 #define MSR_IA32_RTIT_STATUS_MASK (~(RTIT_STATUS_FILTEREN | \ 145 RTIT_STATUS_CONTEXTEN | RTIT_STATUS_TRIGGEREN | \ 146 RTIT_STATUS_ERROR | RTIT_STATUS_STOPPED | \ 147 RTIT_STATUS_BYTECNT)) 148 149 #define MSR_IA32_RTIT_OUTPUT_BASE_MASK \ 150 (~((1UL << cpuid_query_maxphyaddr(vcpu)) - 1) | 0x7f) 151 152 /* 153 * These 2 parameters are used to config the controls for Pause-Loop Exiting: 154 * ple_gap: upper bound on the amount of time between two successive 155 * executions of PAUSE in a loop. Also indicate if ple enabled. 156 * According to test, this time is usually smaller than 128 cycles. 157 * ple_window: upper bound on the amount of time a guest is allowed to execute 158 * in a PAUSE loop. Tests indicate that most spinlocks are held for 159 * less than 2^12 cycles 160 * Time is measured based on a counter that runs at the same rate as the TSC, 161 * refer SDM volume 3b section 21.6.13 & 22.1.3. 162 */ 163 static unsigned int ple_gap = KVM_DEFAULT_PLE_GAP; 164 module_param(ple_gap, uint, 0444); 165 166 static unsigned int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW; 167 module_param(ple_window, uint, 0444); 168 169 /* Default doubles per-vcpu window every exit. */ 170 static unsigned int ple_window_grow = KVM_DEFAULT_PLE_WINDOW_GROW; 171 module_param(ple_window_grow, uint, 0444); 172 173 /* Default resets per-vcpu window every exit to ple_window. */ 174 static unsigned int ple_window_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK; 175 module_param(ple_window_shrink, uint, 0444); 176 177 /* Default is to compute the maximum so we can never overflow. */ 178 static unsigned int ple_window_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX; 179 module_param(ple_window_max, uint, 0444); 180 181 /* Default is SYSTEM mode, 1 for host-guest mode */ 182 int __read_mostly pt_mode = PT_MODE_SYSTEM; 183 module_param(pt_mode, int, S_IRUGO); 184 185 static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush); 186 static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond); 187 static DEFINE_MUTEX(vmx_l1d_flush_mutex); 188 189 /* Storage for pre module init parameter parsing */ 190 static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_L1D_FLUSH_AUTO; 191 192 static const struct { 193 const char *option; 194 bool for_parse; 195 } vmentry_l1d_param[] = { 196 [VMENTER_L1D_FLUSH_AUTO] = {"auto", true}, 197 [VMENTER_L1D_FLUSH_NEVER] = {"never", true}, 198 [VMENTER_L1D_FLUSH_COND] = {"cond", true}, 199 [VMENTER_L1D_FLUSH_ALWAYS] = {"always", true}, 200 [VMENTER_L1D_FLUSH_EPT_DISABLED] = {"EPT disabled", false}, 201 [VMENTER_L1D_FLUSH_NOT_REQUIRED] = {"not required", false}, 202 }; 203 204 #define L1D_CACHE_ORDER 4 205 static void *vmx_l1d_flush_pages; 206 207 static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf) 208 { 209 struct page *page; 210 unsigned int i; 211 212 if (!enable_ept) { 213 l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED; 214 return 0; 215 } 216 217 if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) { 218 u64 msr; 219 220 rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr); 221 if (msr & ARCH_CAP_SKIP_VMENTRY_L1DFLUSH) { 222 l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED; 223 return 0; 224 } 225 } 226 227 /* If set to auto use the default l1tf mitigation method */ 228 if (l1tf == VMENTER_L1D_FLUSH_AUTO) { 229 switch (l1tf_mitigation) { 230 case L1TF_MITIGATION_OFF: 231 l1tf = VMENTER_L1D_FLUSH_NEVER; 232 break; 233 case L1TF_MITIGATION_FLUSH_NOWARN: 234 case L1TF_MITIGATION_FLUSH: 235 case L1TF_MITIGATION_FLUSH_NOSMT: 236 l1tf = VMENTER_L1D_FLUSH_COND; 237 break; 238 case L1TF_MITIGATION_FULL: 239 case L1TF_MITIGATION_FULL_FORCE: 240 l1tf = VMENTER_L1D_FLUSH_ALWAYS; 241 break; 242 } 243 } else if (l1tf_mitigation == L1TF_MITIGATION_FULL_FORCE) { 244 l1tf = VMENTER_L1D_FLUSH_ALWAYS; 245 } 246 247 if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages && 248 !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) { 249 page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER); 250 if (!page) 251 return -ENOMEM; 252 vmx_l1d_flush_pages = page_address(page); 253 254 /* 255 * Initialize each page with a different pattern in 256 * order to protect against KSM in the nested 257 * virtualization case. 258 */ 259 for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) { 260 memset(vmx_l1d_flush_pages + i * PAGE_SIZE, i + 1, 261 PAGE_SIZE); 262 } 263 } 264 265 l1tf_vmx_mitigation = l1tf; 266 267 if (l1tf != VMENTER_L1D_FLUSH_NEVER) 268 static_branch_enable(&vmx_l1d_should_flush); 269 else 270 static_branch_disable(&vmx_l1d_should_flush); 271 272 if (l1tf == VMENTER_L1D_FLUSH_COND) 273 static_branch_enable(&vmx_l1d_flush_cond); 274 else 275 static_branch_disable(&vmx_l1d_flush_cond); 276 return 0; 277 } 278 279 static int vmentry_l1d_flush_parse(const char *s) 280 { 281 unsigned int i; 282 283 if (s) { 284 for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) { 285 if (vmentry_l1d_param[i].for_parse && 286 sysfs_streq(s, vmentry_l1d_param[i].option)) 287 return i; 288 } 289 } 290 return -EINVAL; 291 } 292 293 static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp) 294 { 295 int l1tf, ret; 296 297 l1tf = vmentry_l1d_flush_parse(s); 298 if (l1tf < 0) 299 return l1tf; 300 301 if (!boot_cpu_has(X86_BUG_L1TF)) 302 return 0; 303 304 /* 305 * Has vmx_init() run already? If not then this is the pre init 306 * parameter parsing. In that case just store the value and let 307 * vmx_init() do the proper setup after enable_ept has been 308 * established. 309 */ 310 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) { 311 vmentry_l1d_flush_param = l1tf; 312 return 0; 313 } 314 315 mutex_lock(&vmx_l1d_flush_mutex); 316 ret = vmx_setup_l1d_flush(l1tf); 317 mutex_unlock(&vmx_l1d_flush_mutex); 318 return ret; 319 } 320 321 static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp) 322 { 323 if (WARN_ON_ONCE(l1tf_vmx_mitigation >= ARRAY_SIZE(vmentry_l1d_param))) 324 return sprintf(s, "???\n"); 325 326 return sprintf(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option); 327 } 328 329 static const struct kernel_param_ops vmentry_l1d_flush_ops = { 330 .set = vmentry_l1d_flush_set, 331 .get = vmentry_l1d_flush_get, 332 }; 333 module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, 0644); 334 335 static bool guest_state_valid(struct kvm_vcpu *vcpu); 336 static u32 vmx_segment_access_rights(struct kvm_segment *var); 337 static __always_inline void vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, 338 u32 msr, int type); 339 340 void vmx_vmexit(void); 341 342 static DEFINE_PER_CPU(struct vmcs *, vmxarea); 343 DEFINE_PER_CPU(struct vmcs *, current_vmcs); 344 /* 345 * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed 346 * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it. 347 */ 348 static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu); 349 350 /* 351 * We maintian a per-CPU linked-list of vCPU, so in wakeup_handler() we 352 * can find which vCPU should be waken up. 353 */ 354 static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu); 355 static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock); 356 357 static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS); 358 static DEFINE_SPINLOCK(vmx_vpid_lock); 359 360 struct vmcs_config vmcs_config; 361 struct vmx_capability vmx_capability; 362 363 #define VMX_SEGMENT_FIELD(seg) \ 364 [VCPU_SREG_##seg] = { \ 365 .selector = GUEST_##seg##_SELECTOR, \ 366 .base = GUEST_##seg##_BASE, \ 367 .limit = GUEST_##seg##_LIMIT, \ 368 .ar_bytes = GUEST_##seg##_AR_BYTES, \ 369 } 370 371 static const struct kvm_vmx_segment_field { 372 unsigned selector; 373 unsigned base; 374 unsigned limit; 375 unsigned ar_bytes; 376 } kvm_vmx_segment_fields[] = { 377 VMX_SEGMENT_FIELD(CS), 378 VMX_SEGMENT_FIELD(DS), 379 VMX_SEGMENT_FIELD(ES), 380 VMX_SEGMENT_FIELD(FS), 381 VMX_SEGMENT_FIELD(GS), 382 VMX_SEGMENT_FIELD(SS), 383 VMX_SEGMENT_FIELD(TR), 384 VMX_SEGMENT_FIELD(LDTR), 385 }; 386 387 u64 host_efer; 388 389 /* 390 * Though SYSCALL is only supported in 64-bit mode on Intel CPUs, kvm 391 * will emulate SYSCALL in legacy mode if the vendor string in guest 392 * CPUID.0:{EBX,ECX,EDX} is "AuthenticAMD" or "AMDisbetter!" To 393 * support this emulation, IA32_STAR must always be included in 394 * vmx_msr_index[], even in i386 builds. 395 */ 396 const u32 vmx_msr_index[] = { 397 #ifdef CONFIG_X86_64 398 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, 399 #endif 400 MSR_EFER, MSR_TSC_AUX, MSR_STAR, 401 }; 402 403 #if IS_ENABLED(CONFIG_HYPERV) 404 static bool __read_mostly enlightened_vmcs = true; 405 module_param(enlightened_vmcs, bool, 0444); 406 407 /* check_ept_pointer() should be under protection of ept_pointer_lock. */ 408 static void check_ept_pointer_match(struct kvm *kvm) 409 { 410 struct kvm_vcpu *vcpu; 411 u64 tmp_eptp = INVALID_PAGE; 412 int i; 413 414 kvm_for_each_vcpu(i, vcpu, kvm) { 415 if (!VALID_PAGE(tmp_eptp)) { 416 tmp_eptp = to_vmx(vcpu)->ept_pointer; 417 } else if (tmp_eptp != to_vmx(vcpu)->ept_pointer) { 418 to_kvm_vmx(kvm)->ept_pointers_match 419 = EPT_POINTERS_MISMATCH; 420 return; 421 } 422 } 423 424 to_kvm_vmx(kvm)->ept_pointers_match = EPT_POINTERS_MATCH; 425 } 426 427 static int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush, 428 void *data) 429 { 430 struct kvm_tlb_range *range = data; 431 432 return hyperv_fill_flush_guest_mapping_list(flush, range->start_gfn, 433 range->pages); 434 } 435 436 static inline int __hv_remote_flush_tlb_with_range(struct kvm *kvm, 437 struct kvm_vcpu *vcpu, struct kvm_tlb_range *range) 438 { 439 u64 ept_pointer = to_vmx(vcpu)->ept_pointer; 440 441 /* 442 * FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE hypercall needs address 443 * of the base of EPT PML4 table, strip off EPT configuration 444 * information. 445 */ 446 if (range) 447 return hyperv_flush_guest_mapping_range(ept_pointer & PAGE_MASK, 448 kvm_fill_hv_flush_list_func, (void *)range); 449 else 450 return hyperv_flush_guest_mapping(ept_pointer & PAGE_MASK); 451 } 452 453 static int hv_remote_flush_tlb_with_range(struct kvm *kvm, 454 struct kvm_tlb_range *range) 455 { 456 struct kvm_vcpu *vcpu; 457 int ret = 0, i; 458 459 spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock); 460 461 if (to_kvm_vmx(kvm)->ept_pointers_match == EPT_POINTERS_CHECK) 462 check_ept_pointer_match(kvm); 463 464 if (to_kvm_vmx(kvm)->ept_pointers_match != EPT_POINTERS_MATCH) { 465 kvm_for_each_vcpu(i, vcpu, kvm) { 466 /* If ept_pointer is invalid pointer, bypass flush request. */ 467 if (VALID_PAGE(to_vmx(vcpu)->ept_pointer)) 468 ret |= __hv_remote_flush_tlb_with_range( 469 kvm, vcpu, range); 470 } 471 } else { 472 ret = __hv_remote_flush_tlb_with_range(kvm, 473 kvm_get_vcpu(kvm, 0), range); 474 } 475 476 spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock); 477 return ret; 478 } 479 static int hv_remote_flush_tlb(struct kvm *kvm) 480 { 481 return hv_remote_flush_tlb_with_range(kvm, NULL); 482 } 483 484 #endif /* IS_ENABLED(CONFIG_HYPERV) */ 485 486 /* 487 * Comment's format: document - errata name - stepping - processor name. 488 * Refer from 489 * https://www.virtualbox.org/svn/vbox/trunk/src/VBox/VMM/VMMR0/HMR0.cpp 490 */ 491 static u32 vmx_preemption_cpu_tfms[] = { 492 /* 323344.pdf - BA86 - D0 - Xeon 7500 Series */ 493 0x000206E6, 494 /* 323056.pdf - AAX65 - C2 - Xeon L3406 */ 495 /* 322814.pdf - AAT59 - C2 - i7-600, i5-500, i5-400 and i3-300 Mobile */ 496 /* 322911.pdf - AAU65 - C2 - i5-600, i3-500 Desktop and Pentium G6950 */ 497 0x00020652, 498 /* 322911.pdf - AAU65 - K0 - i5-600, i3-500 Desktop and Pentium G6950 */ 499 0x00020655, 500 /* 322373.pdf - AAO95 - B1 - Xeon 3400 Series */ 501 /* 322166.pdf - AAN92 - B1 - i7-800 and i5-700 Desktop */ 502 /* 503 * 320767.pdf - AAP86 - B1 - 504 * i7-900 Mobile Extreme, i7-800 and i7-700 Mobile 505 */ 506 0x000106E5, 507 /* 321333.pdf - AAM126 - C0 - Xeon 3500 */ 508 0x000106A0, 509 /* 321333.pdf - AAM126 - C1 - Xeon 3500 */ 510 0x000106A1, 511 /* 320836.pdf - AAJ124 - C0 - i7-900 Desktop Extreme and i7-900 Desktop */ 512 0x000106A4, 513 /* 321333.pdf - AAM126 - D0 - Xeon 3500 */ 514 /* 321324.pdf - AAK139 - D0 - Xeon 5500 */ 515 /* 320836.pdf - AAJ124 - D0 - i7-900 Extreme and i7-900 Desktop */ 516 0x000106A5, 517 /* Xeon E3-1220 V2 */ 518 0x000306A8, 519 }; 520 521 static inline bool cpu_has_broken_vmx_preemption_timer(void) 522 { 523 u32 eax = cpuid_eax(0x00000001), i; 524 525 /* Clear the reserved bits */ 526 eax &= ~(0x3U << 14 | 0xfU << 28); 527 for (i = 0; i < ARRAY_SIZE(vmx_preemption_cpu_tfms); i++) 528 if (eax == vmx_preemption_cpu_tfms[i]) 529 return true; 530 531 return false; 532 } 533 534 static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu) 535 { 536 return flexpriority_enabled && lapic_in_kernel(vcpu); 537 } 538 539 static inline bool report_flexpriority(void) 540 { 541 return flexpriority_enabled; 542 } 543 544 static inline int __find_msr_index(struct vcpu_vmx *vmx, u32 msr) 545 { 546 int i; 547 548 for (i = 0; i < vmx->nmsrs; ++i) 549 if (vmx_msr_index[vmx->guest_msrs[i].index] == msr) 550 return i; 551 return -1; 552 } 553 554 struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr) 555 { 556 int i; 557 558 i = __find_msr_index(vmx, msr); 559 if (i >= 0) 560 return &vmx->guest_msrs[i]; 561 return NULL; 562 } 563 564 void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs) 565 { 566 vmcs_clear(loaded_vmcs->vmcs); 567 if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched) 568 vmcs_clear(loaded_vmcs->shadow_vmcs); 569 loaded_vmcs->cpu = -1; 570 loaded_vmcs->launched = 0; 571 } 572 573 #ifdef CONFIG_KEXEC_CORE 574 /* 575 * This bitmap is used to indicate whether the vmclear 576 * operation is enabled on all cpus. All disabled by 577 * default. 578 */ 579 static cpumask_t crash_vmclear_enabled_bitmap = CPU_MASK_NONE; 580 581 static inline void crash_enable_local_vmclear(int cpu) 582 { 583 cpumask_set_cpu(cpu, &crash_vmclear_enabled_bitmap); 584 } 585 586 static inline void crash_disable_local_vmclear(int cpu) 587 { 588 cpumask_clear_cpu(cpu, &crash_vmclear_enabled_bitmap); 589 } 590 591 static inline int crash_local_vmclear_enabled(int cpu) 592 { 593 return cpumask_test_cpu(cpu, &crash_vmclear_enabled_bitmap); 594 } 595 596 static void crash_vmclear_local_loaded_vmcss(void) 597 { 598 int cpu = raw_smp_processor_id(); 599 struct loaded_vmcs *v; 600 601 if (!crash_local_vmclear_enabled(cpu)) 602 return; 603 604 list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu), 605 loaded_vmcss_on_cpu_link) 606 vmcs_clear(v->vmcs); 607 } 608 #else 609 static inline void crash_enable_local_vmclear(int cpu) { } 610 static inline void crash_disable_local_vmclear(int cpu) { } 611 #endif /* CONFIG_KEXEC_CORE */ 612 613 static void __loaded_vmcs_clear(void *arg) 614 { 615 struct loaded_vmcs *loaded_vmcs = arg; 616 int cpu = raw_smp_processor_id(); 617 618 if (loaded_vmcs->cpu != cpu) 619 return; /* vcpu migration can race with cpu offline */ 620 if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs) 621 per_cpu(current_vmcs, cpu) = NULL; 622 crash_disable_local_vmclear(cpu); 623 list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link); 624 625 /* 626 * we should ensure updating loaded_vmcs->loaded_vmcss_on_cpu_link 627 * is before setting loaded_vmcs->vcpu to -1 which is done in 628 * loaded_vmcs_init. Otherwise, other cpu can see vcpu = -1 fist 629 * then adds the vmcs into percpu list before it is deleted. 630 */ 631 smp_wmb(); 632 633 loaded_vmcs_init(loaded_vmcs); 634 crash_enable_local_vmclear(cpu); 635 } 636 637 void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs) 638 { 639 int cpu = loaded_vmcs->cpu; 640 641 if (cpu != -1) 642 smp_call_function_single(cpu, 643 __loaded_vmcs_clear, loaded_vmcs, 1); 644 } 645 646 static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg, 647 unsigned field) 648 { 649 bool ret; 650 u32 mask = 1 << (seg * SEG_FIELD_NR + field); 651 652 if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) { 653 vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS); 654 vmx->segment_cache.bitmask = 0; 655 } 656 ret = vmx->segment_cache.bitmask & mask; 657 vmx->segment_cache.bitmask |= mask; 658 return ret; 659 } 660 661 static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg) 662 { 663 u16 *p = &vmx->segment_cache.seg[seg].selector; 664 665 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL)) 666 *p = vmcs_read16(kvm_vmx_segment_fields[seg].selector); 667 return *p; 668 } 669 670 static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg) 671 { 672 ulong *p = &vmx->segment_cache.seg[seg].base; 673 674 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE)) 675 *p = vmcs_readl(kvm_vmx_segment_fields[seg].base); 676 return *p; 677 } 678 679 static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg) 680 { 681 u32 *p = &vmx->segment_cache.seg[seg].limit; 682 683 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT)) 684 *p = vmcs_read32(kvm_vmx_segment_fields[seg].limit); 685 return *p; 686 } 687 688 static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg) 689 { 690 u32 *p = &vmx->segment_cache.seg[seg].ar; 691 692 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR)) 693 *p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes); 694 return *p; 695 } 696 697 void update_exception_bitmap(struct kvm_vcpu *vcpu) 698 { 699 u32 eb; 700 701 eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) | 702 (1u << DB_VECTOR) | (1u << AC_VECTOR); 703 /* 704 * Guest access to VMware backdoor ports could legitimately 705 * trigger #GP because of TSS I/O permission bitmap. 706 * We intercept those #GP and allow access to them anyway 707 * as VMware does. 708 */ 709 if (enable_vmware_backdoor) 710 eb |= (1u << GP_VECTOR); 711 if ((vcpu->guest_debug & 712 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) == 713 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) 714 eb |= 1u << BP_VECTOR; 715 if (to_vmx(vcpu)->rmode.vm86_active) 716 eb = ~0; 717 if (enable_ept) 718 eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */ 719 720 /* When we are running a nested L2 guest and L1 specified for it a 721 * certain exception bitmap, we must trap the same exceptions and pass 722 * them to L1. When running L2, we will only handle the exceptions 723 * specified above if L1 did not want them. 724 */ 725 if (is_guest_mode(vcpu)) 726 eb |= get_vmcs12(vcpu)->exception_bitmap; 727 728 vmcs_write32(EXCEPTION_BITMAP, eb); 729 } 730 731 /* 732 * Check if MSR is intercepted for currently loaded MSR bitmap. 733 */ 734 static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr) 735 { 736 unsigned long *msr_bitmap; 737 int f = sizeof(unsigned long); 738 739 if (!cpu_has_vmx_msr_bitmap()) 740 return true; 741 742 msr_bitmap = to_vmx(vcpu)->loaded_vmcs->msr_bitmap; 743 744 if (msr <= 0x1fff) { 745 return !!test_bit(msr, msr_bitmap + 0x800 / f); 746 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { 747 msr &= 0x1fff; 748 return !!test_bit(msr, msr_bitmap + 0xc00 / f); 749 } 750 751 return true; 752 } 753 754 static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx, 755 unsigned long entry, unsigned long exit) 756 { 757 vm_entry_controls_clearbit(vmx, entry); 758 vm_exit_controls_clearbit(vmx, exit); 759 } 760 761 static int find_msr(struct vmx_msrs *m, unsigned int msr) 762 { 763 unsigned int i; 764 765 for (i = 0; i < m->nr; ++i) { 766 if (m->val[i].index == msr) 767 return i; 768 } 769 return -ENOENT; 770 } 771 772 static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) 773 { 774 int i; 775 struct msr_autoload *m = &vmx->msr_autoload; 776 777 switch (msr) { 778 case MSR_EFER: 779 if (cpu_has_load_ia32_efer()) { 780 clear_atomic_switch_msr_special(vmx, 781 VM_ENTRY_LOAD_IA32_EFER, 782 VM_EXIT_LOAD_IA32_EFER); 783 return; 784 } 785 break; 786 case MSR_CORE_PERF_GLOBAL_CTRL: 787 if (cpu_has_load_perf_global_ctrl()) { 788 clear_atomic_switch_msr_special(vmx, 789 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, 790 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL); 791 return; 792 } 793 break; 794 } 795 i = find_msr(&m->guest, msr); 796 if (i < 0) 797 goto skip_guest; 798 --m->guest.nr; 799 m->guest.val[i] = m->guest.val[m->guest.nr]; 800 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr); 801 802 skip_guest: 803 i = find_msr(&m->host, msr); 804 if (i < 0) 805 return; 806 807 --m->host.nr; 808 m->host.val[i] = m->host.val[m->host.nr]; 809 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr); 810 } 811 812 static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx, 813 unsigned long entry, unsigned long exit, 814 unsigned long guest_val_vmcs, unsigned long host_val_vmcs, 815 u64 guest_val, u64 host_val) 816 { 817 vmcs_write64(guest_val_vmcs, guest_val); 818 if (host_val_vmcs != HOST_IA32_EFER) 819 vmcs_write64(host_val_vmcs, host_val); 820 vm_entry_controls_setbit(vmx, entry); 821 vm_exit_controls_setbit(vmx, exit); 822 } 823 824 static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, 825 u64 guest_val, u64 host_val, bool entry_only) 826 { 827 int i, j = 0; 828 struct msr_autoload *m = &vmx->msr_autoload; 829 830 switch (msr) { 831 case MSR_EFER: 832 if (cpu_has_load_ia32_efer()) { 833 add_atomic_switch_msr_special(vmx, 834 VM_ENTRY_LOAD_IA32_EFER, 835 VM_EXIT_LOAD_IA32_EFER, 836 GUEST_IA32_EFER, 837 HOST_IA32_EFER, 838 guest_val, host_val); 839 return; 840 } 841 break; 842 case MSR_CORE_PERF_GLOBAL_CTRL: 843 if (cpu_has_load_perf_global_ctrl()) { 844 add_atomic_switch_msr_special(vmx, 845 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, 846 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL, 847 GUEST_IA32_PERF_GLOBAL_CTRL, 848 HOST_IA32_PERF_GLOBAL_CTRL, 849 guest_val, host_val); 850 return; 851 } 852 break; 853 case MSR_IA32_PEBS_ENABLE: 854 /* PEBS needs a quiescent period after being disabled (to write 855 * a record). Disabling PEBS through VMX MSR swapping doesn't 856 * provide that period, so a CPU could write host's record into 857 * guest's memory. 858 */ 859 wrmsrl(MSR_IA32_PEBS_ENABLE, 0); 860 } 861 862 i = find_msr(&m->guest, msr); 863 if (!entry_only) 864 j = find_msr(&m->host, msr); 865 866 if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) { 867 printk_once(KERN_WARNING "Not enough msr switch entries. " 868 "Can't add msr %x\n", msr); 869 return; 870 } 871 if (i < 0) { 872 i = m->guest.nr++; 873 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr); 874 } 875 m->guest.val[i].index = msr; 876 m->guest.val[i].value = guest_val; 877 878 if (entry_only) 879 return; 880 881 if (j < 0) { 882 j = m->host.nr++; 883 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr); 884 } 885 m->host.val[j].index = msr; 886 m->host.val[j].value = host_val; 887 } 888 889 static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) 890 { 891 u64 guest_efer = vmx->vcpu.arch.efer; 892 u64 ignore_bits = 0; 893 894 if (!enable_ept) { 895 /* 896 * NX is needed to handle CR0.WP=1, CR4.SMEP=1. Testing 897 * host CPUID is more efficient than testing guest CPUID 898 * or CR4. Host SMEP is anyway a requirement for guest SMEP. 899 */ 900 if (boot_cpu_has(X86_FEATURE_SMEP)) 901 guest_efer |= EFER_NX; 902 else if (!(guest_efer & EFER_NX)) 903 ignore_bits |= EFER_NX; 904 } 905 906 /* 907 * LMA and LME handled by hardware; SCE meaningless outside long mode. 908 */ 909 ignore_bits |= EFER_SCE; 910 #ifdef CONFIG_X86_64 911 ignore_bits |= EFER_LMA | EFER_LME; 912 /* SCE is meaningful only in long mode on Intel */ 913 if (guest_efer & EFER_LMA) 914 ignore_bits &= ~(u64)EFER_SCE; 915 #endif 916 917 /* 918 * On EPT, we can't emulate NX, so we must switch EFER atomically. 919 * On CPUs that support "load IA32_EFER", always switch EFER 920 * atomically, since it's faster than switching it manually. 921 */ 922 if (cpu_has_load_ia32_efer() || 923 (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) { 924 if (!(guest_efer & EFER_LMA)) 925 guest_efer &= ~EFER_LME; 926 if (guest_efer != host_efer) 927 add_atomic_switch_msr(vmx, MSR_EFER, 928 guest_efer, host_efer, false); 929 else 930 clear_atomic_switch_msr(vmx, MSR_EFER); 931 return false; 932 } else { 933 clear_atomic_switch_msr(vmx, MSR_EFER); 934 935 guest_efer &= ~ignore_bits; 936 guest_efer |= host_efer & ignore_bits; 937 938 vmx->guest_msrs[efer_offset].data = guest_efer; 939 vmx->guest_msrs[efer_offset].mask = ~ignore_bits; 940 941 return true; 942 } 943 } 944 945 #ifdef CONFIG_X86_32 946 /* 947 * On 32-bit kernels, VM exits still load the FS and GS bases from the 948 * VMCS rather than the segment table. KVM uses this helper to figure 949 * out the current bases to poke them into the VMCS before entry. 950 */ 951 static unsigned long segment_base(u16 selector) 952 { 953 struct desc_struct *table; 954 unsigned long v; 955 956 if (!(selector & ~SEGMENT_RPL_MASK)) 957 return 0; 958 959 table = get_current_gdt_ro(); 960 961 if ((selector & SEGMENT_TI_MASK) == SEGMENT_LDT) { 962 u16 ldt_selector = kvm_read_ldt(); 963 964 if (!(ldt_selector & ~SEGMENT_RPL_MASK)) 965 return 0; 966 967 table = (struct desc_struct *)segment_base(ldt_selector); 968 } 969 v = get_desc_base(&table[selector >> 3]); 970 return v; 971 } 972 #endif 973 974 static inline void pt_load_msr(struct pt_ctx *ctx, u32 addr_range) 975 { 976 u32 i; 977 978 wrmsrl(MSR_IA32_RTIT_STATUS, ctx->status); 979 wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base); 980 wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask); 981 wrmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match); 982 for (i = 0; i < addr_range; i++) { 983 wrmsrl(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]); 984 wrmsrl(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]); 985 } 986 } 987 988 static inline void pt_save_msr(struct pt_ctx *ctx, u32 addr_range) 989 { 990 u32 i; 991 992 rdmsrl(MSR_IA32_RTIT_STATUS, ctx->status); 993 rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base); 994 rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask); 995 rdmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match); 996 for (i = 0; i < addr_range; i++) { 997 rdmsrl(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]); 998 rdmsrl(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]); 999 } 1000 } 1001 1002 static void pt_guest_enter(struct vcpu_vmx *vmx) 1003 { 1004 if (pt_mode == PT_MODE_SYSTEM) 1005 return; 1006 1007 /* 1008 * GUEST_IA32_RTIT_CTL is already set in the VMCS. 1009 * Save host state before VM entry. 1010 */ 1011 rdmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl); 1012 if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) { 1013 wrmsrl(MSR_IA32_RTIT_CTL, 0); 1014 pt_save_msr(&vmx->pt_desc.host, vmx->pt_desc.addr_range); 1015 pt_load_msr(&vmx->pt_desc.guest, vmx->pt_desc.addr_range); 1016 } 1017 } 1018 1019 static void pt_guest_exit(struct vcpu_vmx *vmx) 1020 { 1021 if (pt_mode == PT_MODE_SYSTEM) 1022 return; 1023 1024 if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) { 1025 pt_save_msr(&vmx->pt_desc.guest, vmx->pt_desc.addr_range); 1026 pt_load_msr(&vmx->pt_desc.host, vmx->pt_desc.addr_range); 1027 } 1028 1029 /* Reload host state (IA32_RTIT_CTL will be cleared on VM exit). */ 1030 wrmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl); 1031 } 1032 1033 void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) 1034 { 1035 struct vcpu_vmx *vmx = to_vmx(vcpu); 1036 struct vmcs_host_state *host_state; 1037 #ifdef CONFIG_X86_64 1038 int cpu = raw_smp_processor_id(); 1039 #endif 1040 unsigned long fs_base, gs_base; 1041 u16 fs_sel, gs_sel; 1042 int i; 1043 1044 vmx->req_immediate_exit = false; 1045 1046 /* 1047 * Note that guest MSRs to be saved/restored can also be changed 1048 * when guest state is loaded. This happens when guest transitions 1049 * to/from long-mode by setting MSR_EFER.LMA. 1050 */ 1051 if (!vmx->loaded_cpu_state || vmx->guest_msrs_dirty) { 1052 vmx->guest_msrs_dirty = false; 1053 for (i = 0; i < vmx->save_nmsrs; ++i) 1054 kvm_set_shared_msr(vmx->guest_msrs[i].index, 1055 vmx->guest_msrs[i].data, 1056 vmx->guest_msrs[i].mask); 1057 1058 } 1059 1060 if (vmx->loaded_cpu_state) 1061 return; 1062 1063 vmx->loaded_cpu_state = vmx->loaded_vmcs; 1064 host_state = &vmx->loaded_cpu_state->host_state; 1065 1066 /* 1067 * Set host fs and gs selectors. Unfortunately, 22.2.3 does not 1068 * allow segment selectors with cpl > 0 or ti == 1. 1069 */ 1070 host_state->ldt_sel = kvm_read_ldt(); 1071 1072 #ifdef CONFIG_X86_64 1073 savesegment(ds, host_state->ds_sel); 1074 savesegment(es, host_state->es_sel); 1075 1076 gs_base = cpu_kernelmode_gs_base(cpu); 1077 if (likely(is_64bit_mm(current->mm))) { 1078 save_fsgs_for_kvm(); 1079 fs_sel = current->thread.fsindex; 1080 gs_sel = current->thread.gsindex; 1081 fs_base = current->thread.fsbase; 1082 vmx->msr_host_kernel_gs_base = current->thread.gsbase; 1083 } else { 1084 savesegment(fs, fs_sel); 1085 savesegment(gs, gs_sel); 1086 fs_base = read_msr(MSR_FS_BASE); 1087 vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE); 1088 } 1089 1090 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); 1091 #else 1092 savesegment(fs, fs_sel); 1093 savesegment(gs, gs_sel); 1094 fs_base = segment_base(fs_sel); 1095 gs_base = segment_base(gs_sel); 1096 #endif 1097 1098 if (unlikely(fs_sel != host_state->fs_sel)) { 1099 if (!(fs_sel & 7)) 1100 vmcs_write16(HOST_FS_SELECTOR, fs_sel); 1101 else 1102 vmcs_write16(HOST_FS_SELECTOR, 0); 1103 host_state->fs_sel = fs_sel; 1104 } 1105 if (unlikely(gs_sel != host_state->gs_sel)) { 1106 if (!(gs_sel & 7)) 1107 vmcs_write16(HOST_GS_SELECTOR, gs_sel); 1108 else 1109 vmcs_write16(HOST_GS_SELECTOR, 0); 1110 host_state->gs_sel = gs_sel; 1111 } 1112 if (unlikely(fs_base != host_state->fs_base)) { 1113 vmcs_writel(HOST_FS_BASE, fs_base); 1114 host_state->fs_base = fs_base; 1115 } 1116 if (unlikely(gs_base != host_state->gs_base)) { 1117 vmcs_writel(HOST_GS_BASE, gs_base); 1118 host_state->gs_base = gs_base; 1119 } 1120 } 1121 1122 static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx) 1123 { 1124 struct vmcs_host_state *host_state; 1125 1126 if (!vmx->loaded_cpu_state) 1127 return; 1128 1129 WARN_ON_ONCE(vmx->loaded_cpu_state != vmx->loaded_vmcs); 1130 host_state = &vmx->loaded_cpu_state->host_state; 1131 1132 ++vmx->vcpu.stat.host_state_reload; 1133 vmx->loaded_cpu_state = NULL; 1134 1135 #ifdef CONFIG_X86_64 1136 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); 1137 #endif 1138 if (host_state->ldt_sel || (host_state->gs_sel & 7)) { 1139 kvm_load_ldt(host_state->ldt_sel); 1140 #ifdef CONFIG_X86_64 1141 load_gs_index(host_state->gs_sel); 1142 #else 1143 loadsegment(gs, host_state->gs_sel); 1144 #endif 1145 } 1146 if (host_state->fs_sel & 7) 1147 loadsegment(fs, host_state->fs_sel); 1148 #ifdef CONFIG_X86_64 1149 if (unlikely(host_state->ds_sel | host_state->es_sel)) { 1150 loadsegment(ds, host_state->ds_sel); 1151 loadsegment(es, host_state->es_sel); 1152 } 1153 #endif 1154 invalidate_tss_limit(); 1155 #ifdef CONFIG_X86_64 1156 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); 1157 #endif 1158 load_fixmap_gdt(raw_smp_processor_id()); 1159 } 1160 1161 #ifdef CONFIG_X86_64 1162 static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx) 1163 { 1164 preempt_disable(); 1165 if (vmx->loaded_cpu_state) 1166 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); 1167 preempt_enable(); 1168 return vmx->msr_guest_kernel_gs_base; 1169 } 1170 1171 static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data) 1172 { 1173 preempt_disable(); 1174 if (vmx->loaded_cpu_state) 1175 wrmsrl(MSR_KERNEL_GS_BASE, data); 1176 preempt_enable(); 1177 vmx->msr_guest_kernel_gs_base = data; 1178 } 1179 #endif 1180 1181 static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu) 1182 { 1183 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); 1184 struct pi_desc old, new; 1185 unsigned int dest; 1186 1187 /* 1188 * In case of hot-plug or hot-unplug, we may have to undo 1189 * vmx_vcpu_pi_put even if there is no assigned device. And we 1190 * always keep PI.NDST up to date for simplicity: it makes the 1191 * code easier, and CPU migration is not a fast path. 1192 */ 1193 if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu) 1194 return; 1195 1196 /* 1197 * First handle the simple case where no cmpxchg is necessary; just 1198 * allow posting non-urgent interrupts. 1199 * 1200 * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change 1201 * PI.NDST: pi_post_block will do it for us and the wakeup_handler 1202 * expects the VCPU to be on the blocked_vcpu_list that matches 1203 * PI.NDST. 1204 */ 1205 if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR || 1206 vcpu->cpu == cpu) { 1207 pi_clear_sn(pi_desc); 1208 return; 1209 } 1210 1211 /* The full case. */ 1212 do { 1213 old.control = new.control = pi_desc->control; 1214 1215 dest = cpu_physical_id(cpu); 1216 1217 if (x2apic_enabled()) 1218 new.ndst = dest; 1219 else 1220 new.ndst = (dest << 8) & 0xFF00; 1221 1222 new.sn = 0; 1223 } while (cmpxchg64(&pi_desc->control, old.control, 1224 new.control) != old.control); 1225 } 1226 1227 /* 1228 * Switches to specified vcpu, until a matching vcpu_put(), but assumes 1229 * vcpu mutex is already taken. 1230 */ 1231 void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 1232 { 1233 struct vcpu_vmx *vmx = to_vmx(vcpu); 1234 bool already_loaded = vmx->loaded_vmcs->cpu == cpu; 1235 1236 if (!already_loaded) { 1237 loaded_vmcs_clear(vmx->loaded_vmcs); 1238 local_irq_disable(); 1239 crash_disable_local_vmclear(cpu); 1240 1241 /* 1242 * Read loaded_vmcs->cpu should be before fetching 1243 * loaded_vmcs->loaded_vmcss_on_cpu_link. 1244 * See the comments in __loaded_vmcs_clear(). 1245 */ 1246 smp_rmb(); 1247 1248 list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link, 1249 &per_cpu(loaded_vmcss_on_cpu, cpu)); 1250 crash_enable_local_vmclear(cpu); 1251 local_irq_enable(); 1252 } 1253 1254 if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) { 1255 per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs; 1256 vmcs_load(vmx->loaded_vmcs->vmcs); 1257 indirect_branch_prediction_barrier(); 1258 } 1259 1260 if (!already_loaded) { 1261 void *gdt = get_current_gdt_ro(); 1262 unsigned long sysenter_esp; 1263 1264 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 1265 1266 /* 1267 * Linux uses per-cpu TSS and GDT, so set these when switching 1268 * processors. See 22.2.4. 1269 */ 1270 vmcs_writel(HOST_TR_BASE, 1271 (unsigned long)&get_cpu_entry_area(cpu)->tss.x86_tss); 1272 vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt); /* 22.2.4 */ 1273 1274 /* 1275 * VM exits change the host TR limit to 0x67 after a VM 1276 * exit. This is okay, since 0x67 covers everything except 1277 * the IO bitmap and have have code to handle the IO bitmap 1278 * being lost after a VM exit. 1279 */ 1280 BUILD_BUG_ON(IO_BITMAP_OFFSET - 1 != 0x67); 1281 1282 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp); 1283 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */ 1284 1285 vmx->loaded_vmcs->cpu = cpu; 1286 } 1287 1288 /* Setup TSC multiplier */ 1289 if (kvm_has_tsc_control && 1290 vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio) 1291 decache_tsc_multiplier(vmx); 1292 1293 vmx_vcpu_pi_load(vcpu, cpu); 1294 vmx->host_pkru = read_pkru(); 1295 vmx->host_debugctlmsr = get_debugctlmsr(); 1296 } 1297 1298 static void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu) 1299 { 1300 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); 1301 1302 if (!kvm_arch_has_assigned_device(vcpu->kvm) || 1303 !irq_remapping_cap(IRQ_POSTING_CAP) || 1304 !kvm_vcpu_apicv_active(vcpu)) 1305 return; 1306 1307 /* Set SN when the vCPU is preempted */ 1308 if (vcpu->preempted) 1309 pi_set_sn(pi_desc); 1310 } 1311 1312 void vmx_vcpu_put(struct kvm_vcpu *vcpu) 1313 { 1314 vmx_vcpu_pi_put(vcpu); 1315 1316 vmx_prepare_switch_to_host(to_vmx(vcpu)); 1317 } 1318 1319 static bool emulation_required(struct kvm_vcpu *vcpu) 1320 { 1321 return emulate_invalid_guest_state && !guest_state_valid(vcpu); 1322 } 1323 1324 static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu); 1325 1326 unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) 1327 { 1328 unsigned long rflags, save_rflags; 1329 1330 if (!test_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail)) { 1331 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail); 1332 rflags = vmcs_readl(GUEST_RFLAGS); 1333 if (to_vmx(vcpu)->rmode.vm86_active) { 1334 rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS; 1335 save_rflags = to_vmx(vcpu)->rmode.save_rflags; 1336 rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; 1337 } 1338 to_vmx(vcpu)->rflags = rflags; 1339 } 1340 return to_vmx(vcpu)->rflags; 1341 } 1342 1343 void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) 1344 { 1345 unsigned long old_rflags = vmx_get_rflags(vcpu); 1346 1347 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail); 1348 to_vmx(vcpu)->rflags = rflags; 1349 if (to_vmx(vcpu)->rmode.vm86_active) { 1350 to_vmx(vcpu)->rmode.save_rflags = rflags; 1351 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; 1352 } 1353 vmcs_writel(GUEST_RFLAGS, rflags); 1354 1355 if ((old_rflags ^ to_vmx(vcpu)->rflags) & X86_EFLAGS_VM) 1356 to_vmx(vcpu)->emulation_required = emulation_required(vcpu); 1357 } 1358 1359 u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu) 1360 { 1361 u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); 1362 int ret = 0; 1363 1364 if (interruptibility & GUEST_INTR_STATE_STI) 1365 ret |= KVM_X86_SHADOW_INT_STI; 1366 if (interruptibility & GUEST_INTR_STATE_MOV_SS) 1367 ret |= KVM_X86_SHADOW_INT_MOV_SS; 1368 1369 return ret; 1370 } 1371 1372 void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) 1373 { 1374 u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); 1375 u32 interruptibility = interruptibility_old; 1376 1377 interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS); 1378 1379 if (mask & KVM_X86_SHADOW_INT_MOV_SS) 1380 interruptibility |= GUEST_INTR_STATE_MOV_SS; 1381 else if (mask & KVM_X86_SHADOW_INT_STI) 1382 interruptibility |= GUEST_INTR_STATE_STI; 1383 1384 if ((interruptibility != interruptibility_old)) 1385 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility); 1386 } 1387 1388 static int vmx_rtit_ctl_check(struct kvm_vcpu *vcpu, u64 data) 1389 { 1390 struct vcpu_vmx *vmx = to_vmx(vcpu); 1391 unsigned long value; 1392 1393 /* 1394 * Any MSR write that attempts to change bits marked reserved will 1395 * case a #GP fault. 1396 */ 1397 if (data & vmx->pt_desc.ctl_bitmask) 1398 return 1; 1399 1400 /* 1401 * Any attempt to modify IA32_RTIT_CTL while TraceEn is set will 1402 * result in a #GP unless the same write also clears TraceEn. 1403 */ 1404 if ((vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) && 1405 ((vmx->pt_desc.guest.ctl ^ data) & ~RTIT_CTL_TRACEEN)) 1406 return 1; 1407 1408 /* 1409 * WRMSR to IA32_RTIT_CTL that sets TraceEn but clears this bit 1410 * and FabricEn would cause #GP, if 1411 * CPUID.(EAX=14H, ECX=0):ECX.SNGLRGNOUT[bit 2] = 0 1412 */ 1413 if ((data & RTIT_CTL_TRACEEN) && !(data & RTIT_CTL_TOPA) && 1414 !(data & RTIT_CTL_FABRIC_EN) && 1415 !intel_pt_validate_cap(vmx->pt_desc.caps, 1416 PT_CAP_single_range_output)) 1417 return 1; 1418 1419 /* 1420 * MTCFreq, CycThresh and PSBFreq encodings check, any MSR write that 1421 * utilize encodings marked reserved will casue a #GP fault. 1422 */ 1423 value = intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc_periods); 1424 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc) && 1425 !test_bit((data & RTIT_CTL_MTC_RANGE) >> 1426 RTIT_CTL_MTC_RANGE_OFFSET, &value)) 1427 return 1; 1428 value = intel_pt_validate_cap(vmx->pt_desc.caps, 1429 PT_CAP_cycle_thresholds); 1430 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc) && 1431 !test_bit((data & RTIT_CTL_CYC_THRESH) >> 1432 RTIT_CTL_CYC_THRESH_OFFSET, &value)) 1433 return 1; 1434 value = intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_periods); 1435 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc) && 1436 !test_bit((data & RTIT_CTL_PSB_FREQ) >> 1437 RTIT_CTL_PSB_FREQ_OFFSET, &value)) 1438 return 1; 1439 1440 /* 1441 * If ADDRx_CFG is reserved or the encodings is >2 will 1442 * cause a #GP fault. 1443 */ 1444 value = (data & RTIT_CTL_ADDR0) >> RTIT_CTL_ADDR0_OFFSET; 1445 if ((value && (vmx->pt_desc.addr_range < 1)) || (value > 2)) 1446 return 1; 1447 value = (data & RTIT_CTL_ADDR1) >> RTIT_CTL_ADDR1_OFFSET; 1448 if ((value && (vmx->pt_desc.addr_range < 2)) || (value > 2)) 1449 return 1; 1450 value = (data & RTIT_CTL_ADDR2) >> RTIT_CTL_ADDR2_OFFSET; 1451 if ((value && (vmx->pt_desc.addr_range < 3)) || (value > 2)) 1452 return 1; 1453 value = (data & RTIT_CTL_ADDR3) >> RTIT_CTL_ADDR3_OFFSET; 1454 if ((value && (vmx->pt_desc.addr_range < 4)) || (value > 2)) 1455 return 1; 1456 1457 return 0; 1458 } 1459 1460 1461 static void skip_emulated_instruction(struct kvm_vcpu *vcpu) 1462 { 1463 unsigned long rip; 1464 1465 rip = kvm_rip_read(vcpu); 1466 rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN); 1467 kvm_rip_write(vcpu, rip); 1468 1469 /* skipping an emulated instruction also counts */ 1470 vmx_set_interrupt_shadow(vcpu, 0); 1471 } 1472 1473 static void vmx_clear_hlt(struct kvm_vcpu *vcpu) 1474 { 1475 /* 1476 * Ensure that we clear the HLT state in the VMCS. We don't need to 1477 * explicitly skip the instruction because if the HLT state is set, 1478 * then the instruction is already executing and RIP has already been 1479 * advanced. 1480 */ 1481 if (kvm_hlt_in_guest(vcpu->kvm) && 1482 vmcs_read32(GUEST_ACTIVITY_STATE) == GUEST_ACTIVITY_HLT) 1483 vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE); 1484 } 1485 1486 static void vmx_queue_exception(struct kvm_vcpu *vcpu) 1487 { 1488 struct vcpu_vmx *vmx = to_vmx(vcpu); 1489 unsigned nr = vcpu->arch.exception.nr; 1490 bool has_error_code = vcpu->arch.exception.has_error_code; 1491 u32 error_code = vcpu->arch.exception.error_code; 1492 u32 intr_info = nr | INTR_INFO_VALID_MASK; 1493 1494 kvm_deliver_exception_payload(vcpu); 1495 1496 if (has_error_code) { 1497 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); 1498 intr_info |= INTR_INFO_DELIVER_CODE_MASK; 1499 } 1500 1501 if (vmx->rmode.vm86_active) { 1502 int inc_eip = 0; 1503 if (kvm_exception_is_soft(nr)) 1504 inc_eip = vcpu->arch.event_exit_inst_len; 1505 if (kvm_inject_realmode_interrupt(vcpu, nr, inc_eip) != EMULATE_DONE) 1506 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 1507 return; 1508 } 1509 1510 WARN_ON_ONCE(vmx->emulation_required); 1511 1512 if (kvm_exception_is_soft(nr)) { 1513 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1514 vmx->vcpu.arch.event_exit_inst_len); 1515 intr_info |= INTR_TYPE_SOFT_EXCEPTION; 1516 } else 1517 intr_info |= INTR_TYPE_HARD_EXCEPTION; 1518 1519 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info); 1520 1521 vmx_clear_hlt(vcpu); 1522 } 1523 1524 static bool vmx_rdtscp_supported(void) 1525 { 1526 return cpu_has_vmx_rdtscp(); 1527 } 1528 1529 static bool vmx_invpcid_supported(void) 1530 { 1531 return cpu_has_vmx_invpcid(); 1532 } 1533 1534 /* 1535 * Swap MSR entry in host/guest MSR entry array. 1536 */ 1537 static void move_msr_up(struct vcpu_vmx *vmx, int from, int to) 1538 { 1539 struct shared_msr_entry tmp; 1540 1541 tmp = vmx->guest_msrs[to]; 1542 vmx->guest_msrs[to] = vmx->guest_msrs[from]; 1543 vmx->guest_msrs[from] = tmp; 1544 } 1545 1546 /* 1547 * Set up the vmcs to automatically save and restore system 1548 * msrs. Don't touch the 64-bit msrs if the guest is in legacy 1549 * mode, as fiddling with msrs is very expensive. 1550 */ 1551 static void setup_msrs(struct vcpu_vmx *vmx) 1552 { 1553 int save_nmsrs, index; 1554 1555 save_nmsrs = 0; 1556 #ifdef CONFIG_X86_64 1557 /* 1558 * The SYSCALL MSRs are only needed on long mode guests, and only 1559 * when EFER.SCE is set. 1560 */ 1561 if (is_long_mode(&vmx->vcpu) && (vmx->vcpu.arch.efer & EFER_SCE)) { 1562 index = __find_msr_index(vmx, MSR_STAR); 1563 if (index >= 0) 1564 move_msr_up(vmx, index, save_nmsrs++); 1565 index = __find_msr_index(vmx, MSR_LSTAR); 1566 if (index >= 0) 1567 move_msr_up(vmx, index, save_nmsrs++); 1568 index = __find_msr_index(vmx, MSR_SYSCALL_MASK); 1569 if (index >= 0) 1570 move_msr_up(vmx, index, save_nmsrs++); 1571 } 1572 #endif 1573 index = __find_msr_index(vmx, MSR_EFER); 1574 if (index >= 0 && update_transition_efer(vmx, index)) 1575 move_msr_up(vmx, index, save_nmsrs++); 1576 index = __find_msr_index(vmx, MSR_TSC_AUX); 1577 if (index >= 0 && guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP)) 1578 move_msr_up(vmx, index, save_nmsrs++); 1579 1580 vmx->save_nmsrs = save_nmsrs; 1581 vmx->guest_msrs_dirty = true; 1582 1583 if (cpu_has_vmx_msr_bitmap()) 1584 vmx_update_msr_bitmap(&vmx->vcpu); 1585 } 1586 1587 static u64 vmx_read_l1_tsc_offset(struct kvm_vcpu *vcpu) 1588 { 1589 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 1590 1591 if (is_guest_mode(vcpu) && 1592 (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)) 1593 return vcpu->arch.tsc_offset - vmcs12->tsc_offset; 1594 1595 return vcpu->arch.tsc_offset; 1596 } 1597 1598 static u64 vmx_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) 1599 { 1600 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 1601 u64 g_tsc_offset = 0; 1602 1603 /* 1604 * We're here if L1 chose not to trap WRMSR to TSC. According 1605 * to the spec, this should set L1's TSC; The offset that L1 1606 * set for L2 remains unchanged, and still needs to be added 1607 * to the newly set TSC to get L2's TSC. 1608 */ 1609 if (is_guest_mode(vcpu) && 1610 (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)) 1611 g_tsc_offset = vmcs12->tsc_offset; 1612 1613 trace_kvm_write_tsc_offset(vcpu->vcpu_id, 1614 vcpu->arch.tsc_offset - g_tsc_offset, 1615 offset); 1616 vmcs_write64(TSC_OFFSET, offset + g_tsc_offset); 1617 return offset + g_tsc_offset; 1618 } 1619 1620 /* 1621 * nested_vmx_allowed() checks whether a guest should be allowed to use VMX 1622 * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for 1623 * all guests if the "nested" module option is off, and can also be disabled 1624 * for a single guest by disabling its VMX cpuid bit. 1625 */ 1626 bool nested_vmx_allowed(struct kvm_vcpu *vcpu) 1627 { 1628 return nested && guest_cpuid_has(vcpu, X86_FEATURE_VMX); 1629 } 1630 1631 static inline bool vmx_feature_control_msr_valid(struct kvm_vcpu *vcpu, 1632 uint64_t val) 1633 { 1634 uint64_t valid_bits = to_vmx(vcpu)->msr_ia32_feature_control_valid_bits; 1635 1636 return !(val & ~valid_bits); 1637 } 1638 1639 static int vmx_get_msr_feature(struct kvm_msr_entry *msr) 1640 { 1641 switch (msr->index) { 1642 case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: 1643 if (!nested) 1644 return 1; 1645 return vmx_get_vmx_msr(&vmcs_config.nested, msr->index, &msr->data); 1646 default: 1647 return 1; 1648 } 1649 1650 return 0; 1651 } 1652 1653 /* 1654 * Reads an msr value (of 'msr_index') into 'pdata'. 1655 * Returns 0 on success, non-0 otherwise. 1656 * Assumes vcpu_load() was already called. 1657 */ 1658 static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 1659 { 1660 struct vcpu_vmx *vmx = to_vmx(vcpu); 1661 struct shared_msr_entry *msr; 1662 u32 index; 1663 1664 switch (msr_info->index) { 1665 #ifdef CONFIG_X86_64 1666 case MSR_FS_BASE: 1667 msr_info->data = vmcs_readl(GUEST_FS_BASE); 1668 break; 1669 case MSR_GS_BASE: 1670 msr_info->data = vmcs_readl(GUEST_GS_BASE); 1671 break; 1672 case MSR_KERNEL_GS_BASE: 1673 msr_info->data = vmx_read_guest_kernel_gs_base(vmx); 1674 break; 1675 #endif 1676 case MSR_EFER: 1677 return kvm_get_msr_common(vcpu, msr_info); 1678 case MSR_IA32_SPEC_CTRL: 1679 if (!msr_info->host_initiated && 1680 !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) 1681 return 1; 1682 1683 msr_info->data = to_vmx(vcpu)->spec_ctrl; 1684 break; 1685 case MSR_IA32_ARCH_CAPABILITIES: 1686 if (!msr_info->host_initiated && 1687 !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES)) 1688 return 1; 1689 msr_info->data = to_vmx(vcpu)->arch_capabilities; 1690 break; 1691 case MSR_IA32_SYSENTER_CS: 1692 msr_info->data = vmcs_read32(GUEST_SYSENTER_CS); 1693 break; 1694 case MSR_IA32_SYSENTER_EIP: 1695 msr_info->data = vmcs_readl(GUEST_SYSENTER_EIP); 1696 break; 1697 case MSR_IA32_SYSENTER_ESP: 1698 msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP); 1699 break; 1700 case MSR_IA32_BNDCFGS: 1701 if (!kvm_mpx_supported() || 1702 (!msr_info->host_initiated && 1703 !guest_cpuid_has(vcpu, X86_FEATURE_MPX))) 1704 return 1; 1705 msr_info->data = vmcs_read64(GUEST_BNDCFGS); 1706 break; 1707 case MSR_IA32_MCG_EXT_CTL: 1708 if (!msr_info->host_initiated && 1709 !(vmx->msr_ia32_feature_control & 1710 FEATURE_CONTROL_LMCE)) 1711 return 1; 1712 msr_info->data = vcpu->arch.mcg_ext_ctl; 1713 break; 1714 case MSR_IA32_FEATURE_CONTROL: 1715 msr_info->data = vmx->msr_ia32_feature_control; 1716 break; 1717 case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: 1718 if (!nested_vmx_allowed(vcpu)) 1719 return 1; 1720 return vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index, 1721 &msr_info->data); 1722 case MSR_IA32_XSS: 1723 if (!vmx_xsaves_supported()) 1724 return 1; 1725 msr_info->data = vcpu->arch.ia32_xss; 1726 break; 1727 case MSR_IA32_RTIT_CTL: 1728 if (pt_mode != PT_MODE_HOST_GUEST) 1729 return 1; 1730 msr_info->data = vmx->pt_desc.guest.ctl; 1731 break; 1732 case MSR_IA32_RTIT_STATUS: 1733 if (pt_mode != PT_MODE_HOST_GUEST) 1734 return 1; 1735 msr_info->data = vmx->pt_desc.guest.status; 1736 break; 1737 case MSR_IA32_RTIT_CR3_MATCH: 1738 if ((pt_mode != PT_MODE_HOST_GUEST) || 1739 !intel_pt_validate_cap(vmx->pt_desc.caps, 1740 PT_CAP_cr3_filtering)) 1741 return 1; 1742 msr_info->data = vmx->pt_desc.guest.cr3_match; 1743 break; 1744 case MSR_IA32_RTIT_OUTPUT_BASE: 1745 if ((pt_mode != PT_MODE_HOST_GUEST) || 1746 (!intel_pt_validate_cap(vmx->pt_desc.caps, 1747 PT_CAP_topa_output) && 1748 !intel_pt_validate_cap(vmx->pt_desc.caps, 1749 PT_CAP_single_range_output))) 1750 return 1; 1751 msr_info->data = vmx->pt_desc.guest.output_base; 1752 break; 1753 case MSR_IA32_RTIT_OUTPUT_MASK: 1754 if ((pt_mode != PT_MODE_HOST_GUEST) || 1755 (!intel_pt_validate_cap(vmx->pt_desc.caps, 1756 PT_CAP_topa_output) && 1757 !intel_pt_validate_cap(vmx->pt_desc.caps, 1758 PT_CAP_single_range_output))) 1759 return 1; 1760 msr_info->data = vmx->pt_desc.guest.output_mask; 1761 break; 1762 case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: 1763 index = msr_info->index - MSR_IA32_RTIT_ADDR0_A; 1764 if ((pt_mode != PT_MODE_HOST_GUEST) || 1765 (index >= 2 * intel_pt_validate_cap(vmx->pt_desc.caps, 1766 PT_CAP_num_address_ranges))) 1767 return 1; 1768 if (index % 2) 1769 msr_info->data = vmx->pt_desc.guest.addr_b[index / 2]; 1770 else 1771 msr_info->data = vmx->pt_desc.guest.addr_a[index / 2]; 1772 break; 1773 case MSR_TSC_AUX: 1774 if (!msr_info->host_initiated && 1775 !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP)) 1776 return 1; 1777 /* Else, falls through */ 1778 default: 1779 msr = find_msr_entry(vmx, msr_info->index); 1780 if (msr) { 1781 msr_info->data = msr->data; 1782 break; 1783 } 1784 return kvm_get_msr_common(vcpu, msr_info); 1785 } 1786 1787 return 0; 1788 } 1789 1790 /* 1791 * Writes msr value into into the appropriate "register". 1792 * Returns 0 on success, non-0 otherwise. 1793 * Assumes vcpu_load() was already called. 1794 */ 1795 static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 1796 { 1797 struct vcpu_vmx *vmx = to_vmx(vcpu); 1798 struct shared_msr_entry *msr; 1799 int ret = 0; 1800 u32 msr_index = msr_info->index; 1801 u64 data = msr_info->data; 1802 u32 index; 1803 1804 switch (msr_index) { 1805 case MSR_EFER: 1806 ret = kvm_set_msr_common(vcpu, msr_info); 1807 break; 1808 #ifdef CONFIG_X86_64 1809 case MSR_FS_BASE: 1810 vmx_segment_cache_clear(vmx); 1811 vmcs_writel(GUEST_FS_BASE, data); 1812 break; 1813 case MSR_GS_BASE: 1814 vmx_segment_cache_clear(vmx); 1815 vmcs_writel(GUEST_GS_BASE, data); 1816 break; 1817 case MSR_KERNEL_GS_BASE: 1818 vmx_write_guest_kernel_gs_base(vmx, data); 1819 break; 1820 #endif 1821 case MSR_IA32_SYSENTER_CS: 1822 vmcs_write32(GUEST_SYSENTER_CS, data); 1823 break; 1824 case MSR_IA32_SYSENTER_EIP: 1825 vmcs_writel(GUEST_SYSENTER_EIP, data); 1826 break; 1827 case MSR_IA32_SYSENTER_ESP: 1828 vmcs_writel(GUEST_SYSENTER_ESP, data); 1829 break; 1830 case MSR_IA32_BNDCFGS: 1831 if (!kvm_mpx_supported() || 1832 (!msr_info->host_initiated && 1833 !guest_cpuid_has(vcpu, X86_FEATURE_MPX))) 1834 return 1; 1835 if (is_noncanonical_address(data & PAGE_MASK, vcpu) || 1836 (data & MSR_IA32_BNDCFGS_RSVD)) 1837 return 1; 1838 vmcs_write64(GUEST_BNDCFGS, data); 1839 break; 1840 case MSR_IA32_SPEC_CTRL: 1841 if (!msr_info->host_initiated && 1842 !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) 1843 return 1; 1844 1845 /* The STIBP bit doesn't fault even if it's not advertised */ 1846 if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD)) 1847 return 1; 1848 1849 vmx->spec_ctrl = data; 1850 1851 if (!data) 1852 break; 1853 1854 /* 1855 * For non-nested: 1856 * When it's written (to non-zero) for the first time, pass 1857 * it through. 1858 * 1859 * For nested: 1860 * The handling of the MSR bitmap for L2 guests is done in 1861 * nested_vmx_merge_msr_bitmap. We should not touch the 1862 * vmcs02.msr_bitmap here since it gets completely overwritten 1863 * in the merging. We update the vmcs01 here for L1 as well 1864 * since it will end up touching the MSR anyway now. 1865 */ 1866 vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, 1867 MSR_IA32_SPEC_CTRL, 1868 MSR_TYPE_RW); 1869 break; 1870 case MSR_IA32_PRED_CMD: 1871 if (!msr_info->host_initiated && 1872 !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) 1873 return 1; 1874 1875 if (data & ~PRED_CMD_IBPB) 1876 return 1; 1877 1878 if (!data) 1879 break; 1880 1881 wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB); 1882 1883 /* 1884 * For non-nested: 1885 * When it's written (to non-zero) for the first time, pass 1886 * it through. 1887 * 1888 * For nested: 1889 * The handling of the MSR bitmap for L2 guests is done in 1890 * nested_vmx_merge_msr_bitmap. We should not touch the 1891 * vmcs02.msr_bitmap here since it gets completely overwritten 1892 * in the merging. 1893 */ 1894 vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, MSR_IA32_PRED_CMD, 1895 MSR_TYPE_W); 1896 break; 1897 case MSR_IA32_ARCH_CAPABILITIES: 1898 if (!msr_info->host_initiated) 1899 return 1; 1900 vmx->arch_capabilities = data; 1901 break; 1902 case MSR_IA32_CR_PAT: 1903 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { 1904 if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data)) 1905 return 1; 1906 vmcs_write64(GUEST_IA32_PAT, data); 1907 vcpu->arch.pat = data; 1908 break; 1909 } 1910 ret = kvm_set_msr_common(vcpu, msr_info); 1911 break; 1912 case MSR_IA32_TSC_ADJUST: 1913 ret = kvm_set_msr_common(vcpu, msr_info); 1914 break; 1915 case MSR_IA32_MCG_EXT_CTL: 1916 if ((!msr_info->host_initiated && 1917 !(to_vmx(vcpu)->msr_ia32_feature_control & 1918 FEATURE_CONTROL_LMCE)) || 1919 (data & ~MCG_EXT_CTL_LMCE_EN)) 1920 return 1; 1921 vcpu->arch.mcg_ext_ctl = data; 1922 break; 1923 case MSR_IA32_FEATURE_CONTROL: 1924 if (!vmx_feature_control_msr_valid(vcpu, data) || 1925 (to_vmx(vcpu)->msr_ia32_feature_control & 1926 FEATURE_CONTROL_LOCKED && !msr_info->host_initiated)) 1927 return 1; 1928 vmx->msr_ia32_feature_control = data; 1929 if (msr_info->host_initiated && data == 0) 1930 vmx_leave_nested(vcpu); 1931 break; 1932 case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: 1933 if (!msr_info->host_initiated) 1934 return 1; /* they are read-only */ 1935 if (!nested_vmx_allowed(vcpu)) 1936 return 1; 1937 return vmx_set_vmx_msr(vcpu, msr_index, data); 1938 case MSR_IA32_XSS: 1939 if (!vmx_xsaves_supported()) 1940 return 1; 1941 /* 1942 * The only supported bit as of Skylake is bit 8, but 1943 * it is not supported on KVM. 1944 */ 1945 if (data != 0) 1946 return 1; 1947 vcpu->arch.ia32_xss = data; 1948 if (vcpu->arch.ia32_xss != host_xss) 1949 add_atomic_switch_msr(vmx, MSR_IA32_XSS, 1950 vcpu->arch.ia32_xss, host_xss, false); 1951 else 1952 clear_atomic_switch_msr(vmx, MSR_IA32_XSS); 1953 break; 1954 case MSR_IA32_RTIT_CTL: 1955 if ((pt_mode != PT_MODE_HOST_GUEST) || 1956 vmx_rtit_ctl_check(vcpu, data) || 1957 vmx->nested.vmxon) 1958 return 1; 1959 vmcs_write64(GUEST_IA32_RTIT_CTL, data); 1960 vmx->pt_desc.guest.ctl = data; 1961 pt_update_intercept_for_msr(vmx); 1962 break; 1963 case MSR_IA32_RTIT_STATUS: 1964 if ((pt_mode != PT_MODE_HOST_GUEST) || 1965 (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) || 1966 (data & MSR_IA32_RTIT_STATUS_MASK)) 1967 return 1; 1968 vmx->pt_desc.guest.status = data; 1969 break; 1970 case MSR_IA32_RTIT_CR3_MATCH: 1971 if ((pt_mode != PT_MODE_HOST_GUEST) || 1972 (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) || 1973 !intel_pt_validate_cap(vmx->pt_desc.caps, 1974 PT_CAP_cr3_filtering)) 1975 return 1; 1976 vmx->pt_desc.guest.cr3_match = data; 1977 break; 1978 case MSR_IA32_RTIT_OUTPUT_BASE: 1979 if ((pt_mode != PT_MODE_HOST_GUEST) || 1980 (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) || 1981 (!intel_pt_validate_cap(vmx->pt_desc.caps, 1982 PT_CAP_topa_output) && 1983 !intel_pt_validate_cap(vmx->pt_desc.caps, 1984 PT_CAP_single_range_output)) || 1985 (data & MSR_IA32_RTIT_OUTPUT_BASE_MASK)) 1986 return 1; 1987 vmx->pt_desc.guest.output_base = data; 1988 break; 1989 case MSR_IA32_RTIT_OUTPUT_MASK: 1990 if ((pt_mode != PT_MODE_HOST_GUEST) || 1991 (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) || 1992 (!intel_pt_validate_cap(vmx->pt_desc.caps, 1993 PT_CAP_topa_output) && 1994 !intel_pt_validate_cap(vmx->pt_desc.caps, 1995 PT_CAP_single_range_output))) 1996 return 1; 1997 vmx->pt_desc.guest.output_mask = data; 1998 break; 1999 case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: 2000 index = msr_info->index - MSR_IA32_RTIT_ADDR0_A; 2001 if ((pt_mode != PT_MODE_HOST_GUEST) || 2002 (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) || 2003 (index >= 2 * intel_pt_validate_cap(vmx->pt_desc.caps, 2004 PT_CAP_num_address_ranges))) 2005 return 1; 2006 if (index % 2) 2007 vmx->pt_desc.guest.addr_b[index / 2] = data; 2008 else 2009 vmx->pt_desc.guest.addr_a[index / 2] = data; 2010 break; 2011 case MSR_TSC_AUX: 2012 if (!msr_info->host_initiated && 2013 !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP)) 2014 return 1; 2015 /* Check reserved bit, higher 32 bits should be zero */ 2016 if ((data >> 32) != 0) 2017 return 1; 2018 /* Else, falls through */ 2019 default: 2020 msr = find_msr_entry(vmx, msr_index); 2021 if (msr) { 2022 u64 old_msr_data = msr->data; 2023 msr->data = data; 2024 if (msr - vmx->guest_msrs < vmx->save_nmsrs) { 2025 preempt_disable(); 2026 ret = kvm_set_shared_msr(msr->index, msr->data, 2027 msr->mask); 2028 preempt_enable(); 2029 if (ret) 2030 msr->data = old_msr_data; 2031 } 2032 break; 2033 } 2034 ret = kvm_set_msr_common(vcpu, msr_info); 2035 } 2036 2037 return ret; 2038 } 2039 2040 static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) 2041 { 2042 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); 2043 switch (reg) { 2044 case VCPU_REGS_RSP: 2045 vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP); 2046 break; 2047 case VCPU_REGS_RIP: 2048 vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP); 2049 break; 2050 case VCPU_EXREG_PDPTR: 2051 if (enable_ept) 2052 ept_save_pdptrs(vcpu); 2053 break; 2054 default: 2055 break; 2056 } 2057 } 2058 2059 static __init int cpu_has_kvm_support(void) 2060 { 2061 return cpu_has_vmx(); 2062 } 2063 2064 static __init int vmx_disabled_by_bios(void) 2065 { 2066 u64 msr; 2067 2068 rdmsrl(MSR_IA32_FEATURE_CONTROL, msr); 2069 if (msr & FEATURE_CONTROL_LOCKED) { 2070 /* launched w/ TXT and VMX disabled */ 2071 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX) 2072 && tboot_enabled()) 2073 return 1; 2074 /* launched w/o TXT and VMX only enabled w/ TXT */ 2075 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX) 2076 && (msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX) 2077 && !tboot_enabled()) { 2078 printk(KERN_WARNING "kvm: disable TXT in the BIOS or " 2079 "activate TXT before enabling KVM\n"); 2080 return 1; 2081 } 2082 /* launched w/o TXT and VMX disabled */ 2083 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX) 2084 && !tboot_enabled()) 2085 return 1; 2086 } 2087 2088 return 0; 2089 } 2090 2091 static void kvm_cpu_vmxon(u64 addr) 2092 { 2093 cr4_set_bits(X86_CR4_VMXE); 2094 intel_pt_handle_vmx(1); 2095 2096 asm volatile ("vmxon %0" : : "m"(addr)); 2097 } 2098 2099 static int hardware_enable(void) 2100 { 2101 int cpu = raw_smp_processor_id(); 2102 u64 phys_addr = __pa(per_cpu(vmxarea, cpu)); 2103 u64 old, test_bits; 2104 2105 if (cr4_read_shadow() & X86_CR4_VMXE) 2106 return -EBUSY; 2107 2108 /* 2109 * This can happen if we hot-added a CPU but failed to allocate 2110 * VP assist page for it. 2111 */ 2112 if (static_branch_unlikely(&enable_evmcs) && 2113 !hv_get_vp_assist_page(cpu)) 2114 return -EFAULT; 2115 2116 INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu)); 2117 INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu)); 2118 spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); 2119 2120 /* 2121 * Now we can enable the vmclear operation in kdump 2122 * since the loaded_vmcss_on_cpu list on this cpu 2123 * has been initialized. 2124 * 2125 * Though the cpu is not in VMX operation now, there 2126 * is no problem to enable the vmclear operation 2127 * for the loaded_vmcss_on_cpu list is empty! 2128 */ 2129 crash_enable_local_vmclear(cpu); 2130 2131 rdmsrl(MSR_IA32_FEATURE_CONTROL, old); 2132 2133 test_bits = FEATURE_CONTROL_LOCKED; 2134 test_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; 2135 if (tboot_enabled()) 2136 test_bits |= FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX; 2137 2138 if ((old & test_bits) != test_bits) { 2139 /* enable and lock */ 2140 wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits); 2141 } 2142 kvm_cpu_vmxon(phys_addr); 2143 if (enable_ept) 2144 ept_sync_global(); 2145 2146 return 0; 2147 } 2148 2149 static void vmclear_local_loaded_vmcss(void) 2150 { 2151 int cpu = raw_smp_processor_id(); 2152 struct loaded_vmcs *v, *n; 2153 2154 list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu), 2155 loaded_vmcss_on_cpu_link) 2156 __loaded_vmcs_clear(v); 2157 } 2158 2159 2160 /* Just like cpu_vmxoff(), but with the __kvm_handle_fault_on_reboot() 2161 * tricks. 2162 */ 2163 static void kvm_cpu_vmxoff(void) 2164 { 2165 asm volatile (__ex("vmxoff")); 2166 2167 intel_pt_handle_vmx(0); 2168 cr4_clear_bits(X86_CR4_VMXE); 2169 } 2170 2171 static void hardware_disable(void) 2172 { 2173 vmclear_local_loaded_vmcss(); 2174 kvm_cpu_vmxoff(); 2175 } 2176 2177 static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, 2178 u32 msr, u32 *result) 2179 { 2180 u32 vmx_msr_low, vmx_msr_high; 2181 u32 ctl = ctl_min | ctl_opt; 2182 2183 rdmsr(msr, vmx_msr_low, vmx_msr_high); 2184 2185 ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */ 2186 ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */ 2187 2188 /* Ensure minimum (required) set of control bits are supported. */ 2189 if (ctl_min & ~ctl) 2190 return -EIO; 2191 2192 *result = ctl; 2193 return 0; 2194 } 2195 2196 static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf, 2197 struct vmx_capability *vmx_cap) 2198 { 2199 u32 vmx_msr_low, vmx_msr_high; 2200 u32 min, opt, min2, opt2; 2201 u32 _pin_based_exec_control = 0; 2202 u32 _cpu_based_exec_control = 0; 2203 u32 _cpu_based_2nd_exec_control = 0; 2204 u32 _vmexit_control = 0; 2205 u32 _vmentry_control = 0; 2206 2207 memset(vmcs_conf, 0, sizeof(*vmcs_conf)); 2208 min = CPU_BASED_HLT_EXITING | 2209 #ifdef CONFIG_X86_64 2210 CPU_BASED_CR8_LOAD_EXITING | 2211 CPU_BASED_CR8_STORE_EXITING | 2212 #endif 2213 CPU_BASED_CR3_LOAD_EXITING | 2214 CPU_BASED_CR3_STORE_EXITING | 2215 CPU_BASED_UNCOND_IO_EXITING | 2216 CPU_BASED_MOV_DR_EXITING | 2217 CPU_BASED_USE_TSC_OFFSETING | 2218 CPU_BASED_MWAIT_EXITING | 2219 CPU_BASED_MONITOR_EXITING | 2220 CPU_BASED_INVLPG_EXITING | 2221 CPU_BASED_RDPMC_EXITING; 2222 2223 opt = CPU_BASED_TPR_SHADOW | 2224 CPU_BASED_USE_MSR_BITMAPS | 2225 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; 2226 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS, 2227 &_cpu_based_exec_control) < 0) 2228 return -EIO; 2229 #ifdef CONFIG_X86_64 2230 if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW)) 2231 _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING & 2232 ~CPU_BASED_CR8_STORE_EXITING; 2233 #endif 2234 if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) { 2235 min2 = 0; 2236 opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2237 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2238 SECONDARY_EXEC_WBINVD_EXITING | 2239 SECONDARY_EXEC_ENABLE_VPID | 2240 SECONDARY_EXEC_ENABLE_EPT | 2241 SECONDARY_EXEC_UNRESTRICTED_GUEST | 2242 SECONDARY_EXEC_PAUSE_LOOP_EXITING | 2243 SECONDARY_EXEC_DESC | 2244 SECONDARY_EXEC_RDTSCP | 2245 SECONDARY_EXEC_ENABLE_INVPCID | 2246 SECONDARY_EXEC_APIC_REGISTER_VIRT | 2247 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2248 SECONDARY_EXEC_SHADOW_VMCS | 2249 SECONDARY_EXEC_XSAVES | 2250 SECONDARY_EXEC_RDSEED_EXITING | 2251 SECONDARY_EXEC_RDRAND_EXITING | 2252 SECONDARY_EXEC_ENABLE_PML | 2253 SECONDARY_EXEC_TSC_SCALING | 2254 SECONDARY_EXEC_PT_USE_GPA | 2255 SECONDARY_EXEC_PT_CONCEAL_VMX | 2256 SECONDARY_EXEC_ENABLE_VMFUNC | 2257 SECONDARY_EXEC_ENCLS_EXITING; 2258 if (adjust_vmx_controls(min2, opt2, 2259 MSR_IA32_VMX_PROCBASED_CTLS2, 2260 &_cpu_based_2nd_exec_control) < 0) 2261 return -EIO; 2262 } 2263 #ifndef CONFIG_X86_64 2264 if (!(_cpu_based_2nd_exec_control & 2265 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) 2266 _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW; 2267 #endif 2268 2269 if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW)) 2270 _cpu_based_2nd_exec_control &= ~( 2271 SECONDARY_EXEC_APIC_REGISTER_VIRT | 2272 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2273 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); 2274 2275 rdmsr_safe(MSR_IA32_VMX_EPT_VPID_CAP, 2276 &vmx_cap->ept, &vmx_cap->vpid); 2277 2278 if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) { 2279 /* CR3 accesses and invlpg don't need to cause VM Exits when EPT 2280 enabled */ 2281 _cpu_based_exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING | 2282 CPU_BASED_CR3_STORE_EXITING | 2283 CPU_BASED_INVLPG_EXITING); 2284 } else if (vmx_cap->ept) { 2285 vmx_cap->ept = 0; 2286 pr_warn_once("EPT CAP should not exist if not support " 2287 "1-setting enable EPT VM-execution control\n"); 2288 } 2289 if (!(_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_VPID) && 2290 vmx_cap->vpid) { 2291 vmx_cap->vpid = 0; 2292 pr_warn_once("VPID CAP should not exist if not support " 2293 "1-setting enable VPID VM-execution control\n"); 2294 } 2295 2296 min = VM_EXIT_SAVE_DEBUG_CONTROLS | VM_EXIT_ACK_INTR_ON_EXIT; 2297 #ifdef CONFIG_X86_64 2298 min |= VM_EXIT_HOST_ADDR_SPACE_SIZE; 2299 #endif 2300 opt = VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2301 VM_EXIT_SAVE_IA32_PAT | 2302 VM_EXIT_LOAD_IA32_PAT | 2303 VM_EXIT_LOAD_IA32_EFER | 2304 VM_EXIT_CLEAR_BNDCFGS | 2305 VM_EXIT_PT_CONCEAL_PIP | 2306 VM_EXIT_CLEAR_IA32_RTIT_CTL; 2307 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS, 2308 &_vmexit_control) < 0) 2309 return -EIO; 2310 2311 min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING; 2312 opt = PIN_BASED_VIRTUAL_NMIS | PIN_BASED_POSTED_INTR | 2313 PIN_BASED_VMX_PREEMPTION_TIMER; 2314 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS, 2315 &_pin_based_exec_control) < 0) 2316 return -EIO; 2317 2318 if (cpu_has_broken_vmx_preemption_timer()) 2319 _pin_based_exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER; 2320 if (!(_cpu_based_2nd_exec_control & 2321 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)) 2322 _pin_based_exec_control &= ~PIN_BASED_POSTED_INTR; 2323 2324 min = VM_ENTRY_LOAD_DEBUG_CONTROLS; 2325 opt = VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | 2326 VM_ENTRY_LOAD_IA32_PAT | 2327 VM_ENTRY_LOAD_IA32_EFER | 2328 VM_ENTRY_LOAD_BNDCFGS | 2329 VM_ENTRY_PT_CONCEAL_PIP | 2330 VM_ENTRY_LOAD_IA32_RTIT_CTL; 2331 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS, 2332 &_vmentry_control) < 0) 2333 return -EIO; 2334 2335 /* 2336 * Some cpus support VM_{ENTRY,EXIT}_IA32_PERF_GLOBAL_CTRL but they 2337 * can't be used due to an errata where VM Exit may incorrectly clear 2338 * IA32_PERF_GLOBAL_CTRL[34:32]. Workaround the errata by using the 2339 * MSR load mechanism to switch IA32_PERF_GLOBAL_CTRL. 2340 */ 2341 if (boot_cpu_data.x86 == 0x6) { 2342 switch (boot_cpu_data.x86_model) { 2343 case 26: /* AAK155 */ 2344 case 30: /* AAP115 */ 2345 case 37: /* AAT100 */ 2346 case 44: /* BC86,AAY89,BD102 */ 2347 case 46: /* BA97 */ 2348 _vmentry_control &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL; 2349 _vmexit_control &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL; 2350 pr_warn_once("kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL " 2351 "does not work properly. Using workaround\n"); 2352 break; 2353 default: 2354 break; 2355 } 2356 } 2357 2358 2359 rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high); 2360 2361 /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */ 2362 if ((vmx_msr_high & 0x1fff) > PAGE_SIZE) 2363 return -EIO; 2364 2365 #ifdef CONFIG_X86_64 2366 /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */ 2367 if (vmx_msr_high & (1u<<16)) 2368 return -EIO; 2369 #endif 2370 2371 /* Require Write-Back (WB) memory type for VMCS accesses. */ 2372 if (((vmx_msr_high >> 18) & 15) != 6) 2373 return -EIO; 2374 2375 vmcs_conf->size = vmx_msr_high & 0x1fff; 2376 vmcs_conf->order = get_order(vmcs_conf->size); 2377 vmcs_conf->basic_cap = vmx_msr_high & ~0x1fff; 2378 2379 vmcs_conf->revision_id = vmx_msr_low; 2380 2381 vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control; 2382 vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control; 2383 vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control; 2384 vmcs_conf->vmexit_ctrl = _vmexit_control; 2385 vmcs_conf->vmentry_ctrl = _vmentry_control; 2386 2387 if (static_branch_unlikely(&enable_evmcs)) 2388 evmcs_sanitize_exec_ctrls(vmcs_conf); 2389 2390 return 0; 2391 } 2392 2393 struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu) 2394 { 2395 int node = cpu_to_node(cpu); 2396 struct page *pages; 2397 struct vmcs *vmcs; 2398 2399 pages = __alloc_pages_node(node, GFP_KERNEL, vmcs_config.order); 2400 if (!pages) 2401 return NULL; 2402 vmcs = page_address(pages); 2403 memset(vmcs, 0, vmcs_config.size); 2404 2405 /* KVM supports Enlightened VMCS v1 only */ 2406 if (static_branch_unlikely(&enable_evmcs)) 2407 vmcs->hdr.revision_id = KVM_EVMCS_VERSION; 2408 else 2409 vmcs->hdr.revision_id = vmcs_config.revision_id; 2410 2411 if (shadow) 2412 vmcs->hdr.shadow_vmcs = 1; 2413 return vmcs; 2414 } 2415 2416 void free_vmcs(struct vmcs *vmcs) 2417 { 2418 free_pages((unsigned long)vmcs, vmcs_config.order); 2419 } 2420 2421 /* 2422 * Free a VMCS, but before that VMCLEAR it on the CPU where it was last loaded 2423 */ 2424 void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs) 2425 { 2426 if (!loaded_vmcs->vmcs) 2427 return; 2428 loaded_vmcs_clear(loaded_vmcs); 2429 free_vmcs(loaded_vmcs->vmcs); 2430 loaded_vmcs->vmcs = NULL; 2431 if (loaded_vmcs->msr_bitmap) 2432 free_page((unsigned long)loaded_vmcs->msr_bitmap); 2433 WARN_ON(loaded_vmcs->shadow_vmcs != NULL); 2434 } 2435 2436 int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs) 2437 { 2438 loaded_vmcs->vmcs = alloc_vmcs(false); 2439 if (!loaded_vmcs->vmcs) 2440 return -ENOMEM; 2441 2442 loaded_vmcs->shadow_vmcs = NULL; 2443 loaded_vmcs_init(loaded_vmcs); 2444 2445 if (cpu_has_vmx_msr_bitmap()) { 2446 loaded_vmcs->msr_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL); 2447 if (!loaded_vmcs->msr_bitmap) 2448 goto out_vmcs; 2449 memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE); 2450 2451 if (IS_ENABLED(CONFIG_HYPERV) && 2452 static_branch_unlikely(&enable_evmcs) && 2453 (ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) { 2454 struct hv_enlightened_vmcs *evmcs = 2455 (struct hv_enlightened_vmcs *)loaded_vmcs->vmcs; 2456 2457 evmcs->hv_enlightenments_control.msr_bitmap = 1; 2458 } 2459 } 2460 2461 memset(&loaded_vmcs->host_state, 0, sizeof(struct vmcs_host_state)); 2462 2463 return 0; 2464 2465 out_vmcs: 2466 free_loaded_vmcs(loaded_vmcs); 2467 return -ENOMEM; 2468 } 2469 2470 static void free_kvm_area(void) 2471 { 2472 int cpu; 2473 2474 for_each_possible_cpu(cpu) { 2475 free_vmcs(per_cpu(vmxarea, cpu)); 2476 per_cpu(vmxarea, cpu) = NULL; 2477 } 2478 } 2479 2480 static __init int alloc_kvm_area(void) 2481 { 2482 int cpu; 2483 2484 for_each_possible_cpu(cpu) { 2485 struct vmcs *vmcs; 2486 2487 vmcs = alloc_vmcs_cpu(false, cpu); 2488 if (!vmcs) { 2489 free_kvm_area(); 2490 return -ENOMEM; 2491 } 2492 2493 /* 2494 * When eVMCS is enabled, alloc_vmcs_cpu() sets 2495 * vmcs->revision_id to KVM_EVMCS_VERSION instead of 2496 * revision_id reported by MSR_IA32_VMX_BASIC. 2497 * 2498 * However, even though not explicitly documented by 2499 * TLFS, VMXArea passed as VMXON argument should 2500 * still be marked with revision_id reported by 2501 * physical CPU. 2502 */ 2503 if (static_branch_unlikely(&enable_evmcs)) 2504 vmcs->hdr.revision_id = vmcs_config.revision_id; 2505 2506 per_cpu(vmxarea, cpu) = vmcs; 2507 } 2508 return 0; 2509 } 2510 2511 static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg, 2512 struct kvm_segment *save) 2513 { 2514 if (!emulate_invalid_guest_state) { 2515 /* 2516 * CS and SS RPL should be equal during guest entry according 2517 * to VMX spec, but in reality it is not always so. Since vcpu 2518 * is in the middle of the transition from real mode to 2519 * protected mode it is safe to assume that RPL 0 is a good 2520 * default value. 2521 */ 2522 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS) 2523 save->selector &= ~SEGMENT_RPL_MASK; 2524 save->dpl = save->selector & SEGMENT_RPL_MASK; 2525 save->s = 1; 2526 } 2527 vmx_set_segment(vcpu, save, seg); 2528 } 2529 2530 static void enter_pmode(struct kvm_vcpu *vcpu) 2531 { 2532 unsigned long flags; 2533 struct vcpu_vmx *vmx = to_vmx(vcpu); 2534 2535 /* 2536 * Update real mode segment cache. It may be not up-to-date if sement 2537 * register was written while vcpu was in a guest mode. 2538 */ 2539 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); 2540 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); 2541 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); 2542 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); 2543 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); 2544 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); 2545 2546 vmx->rmode.vm86_active = 0; 2547 2548 vmx_segment_cache_clear(vmx); 2549 2550 vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); 2551 2552 flags = vmcs_readl(GUEST_RFLAGS); 2553 flags &= RMODE_GUEST_OWNED_EFLAGS_BITS; 2554 flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; 2555 vmcs_writel(GUEST_RFLAGS, flags); 2556 2557 vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) | 2558 (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME)); 2559 2560 update_exception_bitmap(vcpu); 2561 2562 fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); 2563 fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); 2564 fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); 2565 fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); 2566 fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); 2567 fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); 2568 } 2569 2570 static void fix_rmode_seg(int seg, struct kvm_segment *save) 2571 { 2572 const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; 2573 struct kvm_segment var = *save; 2574 2575 var.dpl = 0x3; 2576 if (seg == VCPU_SREG_CS) 2577 var.type = 0x3; 2578 2579 if (!emulate_invalid_guest_state) { 2580 var.selector = var.base >> 4; 2581 var.base = var.base & 0xffff0; 2582 var.limit = 0xffff; 2583 var.g = 0; 2584 var.db = 0; 2585 var.present = 1; 2586 var.s = 1; 2587 var.l = 0; 2588 var.unusable = 0; 2589 var.type = 0x3; 2590 var.avl = 0; 2591 if (save->base & 0xf) 2592 printk_once(KERN_WARNING "kvm: segment base is not " 2593 "paragraph aligned when entering " 2594 "protected mode (seg=%d)", seg); 2595 } 2596 2597 vmcs_write16(sf->selector, var.selector); 2598 vmcs_writel(sf->base, var.base); 2599 vmcs_write32(sf->limit, var.limit); 2600 vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var)); 2601 } 2602 2603 static void enter_rmode(struct kvm_vcpu *vcpu) 2604 { 2605 unsigned long flags; 2606 struct vcpu_vmx *vmx = to_vmx(vcpu); 2607 struct kvm_vmx *kvm_vmx = to_kvm_vmx(vcpu->kvm); 2608 2609 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); 2610 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); 2611 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); 2612 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); 2613 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); 2614 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); 2615 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); 2616 2617 vmx->rmode.vm86_active = 1; 2618 2619 /* 2620 * Very old userspace does not call KVM_SET_TSS_ADDR before entering 2621 * vcpu. Warn the user that an update is overdue. 2622 */ 2623 if (!kvm_vmx->tss_addr) 2624 printk_once(KERN_WARNING "kvm: KVM_SET_TSS_ADDR need to be " 2625 "called before entering vcpu\n"); 2626 2627 vmx_segment_cache_clear(vmx); 2628 2629 vmcs_writel(GUEST_TR_BASE, kvm_vmx->tss_addr); 2630 vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1); 2631 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); 2632 2633 flags = vmcs_readl(GUEST_RFLAGS); 2634 vmx->rmode.save_rflags = flags; 2635 2636 flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; 2637 2638 vmcs_writel(GUEST_RFLAGS, flags); 2639 vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME); 2640 update_exception_bitmap(vcpu); 2641 2642 fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); 2643 fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); 2644 fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); 2645 fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); 2646 fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); 2647 fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); 2648 2649 kvm_mmu_reset_context(vcpu); 2650 } 2651 2652 void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) 2653 { 2654 struct vcpu_vmx *vmx = to_vmx(vcpu); 2655 struct shared_msr_entry *msr = find_msr_entry(vmx, MSR_EFER); 2656 2657 if (!msr) 2658 return; 2659 2660 vcpu->arch.efer = efer; 2661 if (efer & EFER_LMA) { 2662 vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); 2663 msr->data = efer; 2664 } else { 2665 vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); 2666 2667 msr->data = efer & ~EFER_LME; 2668 } 2669 setup_msrs(vmx); 2670 } 2671 2672 #ifdef CONFIG_X86_64 2673 2674 static void enter_lmode(struct kvm_vcpu *vcpu) 2675 { 2676 u32 guest_tr_ar; 2677 2678 vmx_segment_cache_clear(to_vmx(vcpu)); 2679 2680 guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES); 2681 if ((guest_tr_ar & VMX_AR_TYPE_MASK) != VMX_AR_TYPE_BUSY_64_TSS) { 2682 pr_debug_ratelimited("%s: tss fixup for long mode. \n", 2683 __func__); 2684 vmcs_write32(GUEST_TR_AR_BYTES, 2685 (guest_tr_ar & ~VMX_AR_TYPE_MASK) 2686 | VMX_AR_TYPE_BUSY_64_TSS); 2687 } 2688 vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA); 2689 } 2690 2691 static void exit_lmode(struct kvm_vcpu *vcpu) 2692 { 2693 vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); 2694 vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA); 2695 } 2696 2697 #endif 2698 2699 static void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr) 2700 { 2701 int vpid = to_vmx(vcpu)->vpid; 2702 2703 if (!vpid_sync_vcpu_addr(vpid, addr)) 2704 vpid_sync_context(vpid); 2705 2706 /* 2707 * If VPIDs are not supported or enabled, then the above is a no-op. 2708 * But we don't really need a TLB flush in that case anyway, because 2709 * each VM entry/exit includes an implicit flush when VPID is 0. 2710 */ 2711 } 2712 2713 static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) 2714 { 2715 ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits; 2716 2717 vcpu->arch.cr0 &= ~cr0_guest_owned_bits; 2718 vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits; 2719 } 2720 2721 static void vmx_decache_cr3(struct kvm_vcpu *vcpu) 2722 { 2723 if (enable_unrestricted_guest || (enable_ept && is_paging(vcpu))) 2724 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); 2725 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); 2726 } 2727 2728 static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) 2729 { 2730 ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits; 2731 2732 vcpu->arch.cr4 &= ~cr4_guest_owned_bits; 2733 vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits; 2734 } 2735 2736 static void ept_load_pdptrs(struct kvm_vcpu *vcpu) 2737 { 2738 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 2739 2740 if (!test_bit(VCPU_EXREG_PDPTR, 2741 (unsigned long *)&vcpu->arch.regs_dirty)) 2742 return; 2743 2744 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { 2745 vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]); 2746 vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]); 2747 vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]); 2748 vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]); 2749 } 2750 } 2751 2752 void ept_save_pdptrs(struct kvm_vcpu *vcpu) 2753 { 2754 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 2755 2756 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { 2757 mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0); 2758 mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1); 2759 mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2); 2760 mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3); 2761 } 2762 2763 __set_bit(VCPU_EXREG_PDPTR, 2764 (unsigned long *)&vcpu->arch.regs_avail); 2765 __set_bit(VCPU_EXREG_PDPTR, 2766 (unsigned long *)&vcpu->arch.regs_dirty); 2767 } 2768 2769 static void ept_update_paging_mode_cr0(unsigned long *hw_cr0, 2770 unsigned long cr0, 2771 struct kvm_vcpu *vcpu) 2772 { 2773 if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail)) 2774 vmx_decache_cr3(vcpu); 2775 if (!(cr0 & X86_CR0_PG)) { 2776 /* From paging/starting to nonpaging */ 2777 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, 2778 vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) | 2779 (CPU_BASED_CR3_LOAD_EXITING | 2780 CPU_BASED_CR3_STORE_EXITING)); 2781 vcpu->arch.cr0 = cr0; 2782 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu)); 2783 } else if (!is_paging(vcpu)) { 2784 /* From nonpaging to paging */ 2785 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, 2786 vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) & 2787 ~(CPU_BASED_CR3_LOAD_EXITING | 2788 CPU_BASED_CR3_STORE_EXITING)); 2789 vcpu->arch.cr0 = cr0; 2790 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu)); 2791 } 2792 2793 if (!(cr0 & X86_CR0_WP)) 2794 *hw_cr0 &= ~X86_CR0_WP; 2795 } 2796 2797 void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 2798 { 2799 struct vcpu_vmx *vmx = to_vmx(vcpu); 2800 unsigned long hw_cr0; 2801 2802 hw_cr0 = (cr0 & ~KVM_VM_CR0_ALWAYS_OFF); 2803 if (enable_unrestricted_guest) 2804 hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST; 2805 else { 2806 hw_cr0 |= KVM_VM_CR0_ALWAYS_ON; 2807 2808 if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE)) 2809 enter_pmode(vcpu); 2810 2811 if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE)) 2812 enter_rmode(vcpu); 2813 } 2814 2815 #ifdef CONFIG_X86_64 2816 if (vcpu->arch.efer & EFER_LME) { 2817 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) 2818 enter_lmode(vcpu); 2819 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) 2820 exit_lmode(vcpu); 2821 } 2822 #endif 2823 2824 if (enable_ept && !enable_unrestricted_guest) 2825 ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu); 2826 2827 vmcs_writel(CR0_READ_SHADOW, cr0); 2828 vmcs_writel(GUEST_CR0, hw_cr0); 2829 vcpu->arch.cr0 = cr0; 2830 2831 /* depends on vcpu->arch.cr0 to be set to a new value */ 2832 vmx->emulation_required = emulation_required(vcpu); 2833 } 2834 2835 static int get_ept_level(struct kvm_vcpu *vcpu) 2836 { 2837 if (cpu_has_vmx_ept_5levels() && (cpuid_maxphyaddr(vcpu) > 48)) 2838 return 5; 2839 return 4; 2840 } 2841 2842 u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa) 2843 { 2844 u64 eptp = VMX_EPTP_MT_WB; 2845 2846 eptp |= (get_ept_level(vcpu) == 5) ? VMX_EPTP_PWL_5 : VMX_EPTP_PWL_4; 2847 2848 if (enable_ept_ad_bits && 2849 (!is_guest_mode(vcpu) || nested_ept_ad_enabled(vcpu))) 2850 eptp |= VMX_EPTP_AD_ENABLE_BIT; 2851 eptp |= (root_hpa & PAGE_MASK); 2852 2853 return eptp; 2854 } 2855 2856 void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) 2857 { 2858 struct kvm *kvm = vcpu->kvm; 2859 unsigned long guest_cr3; 2860 u64 eptp; 2861 2862 guest_cr3 = cr3; 2863 if (enable_ept) { 2864 eptp = construct_eptp(vcpu, cr3); 2865 vmcs_write64(EPT_POINTER, eptp); 2866 2867 if (kvm_x86_ops->tlb_remote_flush) { 2868 spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock); 2869 to_vmx(vcpu)->ept_pointer = eptp; 2870 to_kvm_vmx(kvm)->ept_pointers_match 2871 = EPT_POINTERS_CHECK; 2872 spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock); 2873 } 2874 2875 if (enable_unrestricted_guest || is_paging(vcpu) || 2876 is_guest_mode(vcpu)) 2877 guest_cr3 = kvm_read_cr3(vcpu); 2878 else 2879 guest_cr3 = to_kvm_vmx(kvm)->ept_identity_map_addr; 2880 ept_load_pdptrs(vcpu); 2881 } 2882 2883 vmcs_writel(GUEST_CR3, guest_cr3); 2884 } 2885 2886 int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 2887 { 2888 /* 2889 * Pass through host's Machine Check Enable value to hw_cr4, which 2890 * is in force while we are in guest mode. Do not let guests control 2891 * this bit, even if host CR4.MCE == 0. 2892 */ 2893 unsigned long hw_cr4; 2894 2895 hw_cr4 = (cr4_read_shadow() & X86_CR4_MCE) | (cr4 & ~X86_CR4_MCE); 2896 if (enable_unrestricted_guest) 2897 hw_cr4 |= KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST; 2898 else if (to_vmx(vcpu)->rmode.vm86_active) 2899 hw_cr4 |= KVM_RMODE_VM_CR4_ALWAYS_ON; 2900 else 2901 hw_cr4 |= KVM_PMODE_VM_CR4_ALWAYS_ON; 2902 2903 if (!boot_cpu_has(X86_FEATURE_UMIP) && vmx_umip_emulated()) { 2904 if (cr4 & X86_CR4_UMIP) { 2905 vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL, 2906 SECONDARY_EXEC_DESC); 2907 hw_cr4 &= ~X86_CR4_UMIP; 2908 } else if (!is_guest_mode(vcpu) || 2909 !nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC)) 2910 vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, 2911 SECONDARY_EXEC_DESC); 2912 } 2913 2914 if (cr4 & X86_CR4_VMXE) { 2915 /* 2916 * To use VMXON (and later other VMX instructions), a guest 2917 * must first be able to turn on cr4.VMXE (see handle_vmon()). 2918 * So basically the check on whether to allow nested VMX 2919 * is here. We operate under the default treatment of SMM, 2920 * so VMX cannot be enabled under SMM. 2921 */ 2922 if (!nested_vmx_allowed(vcpu) || is_smm(vcpu)) 2923 return 1; 2924 } 2925 2926 if (to_vmx(vcpu)->nested.vmxon && !nested_cr4_valid(vcpu, cr4)) 2927 return 1; 2928 2929 vcpu->arch.cr4 = cr4; 2930 2931 if (!enable_unrestricted_guest) { 2932 if (enable_ept) { 2933 if (!is_paging(vcpu)) { 2934 hw_cr4 &= ~X86_CR4_PAE; 2935 hw_cr4 |= X86_CR4_PSE; 2936 } else if (!(cr4 & X86_CR4_PAE)) { 2937 hw_cr4 &= ~X86_CR4_PAE; 2938 } 2939 } 2940 2941 /* 2942 * SMEP/SMAP/PKU is disabled if CPU is in non-paging mode in 2943 * hardware. To emulate this behavior, SMEP/SMAP/PKU needs 2944 * to be manually disabled when guest switches to non-paging 2945 * mode. 2946 * 2947 * If !enable_unrestricted_guest, the CPU is always running 2948 * with CR0.PG=1 and CR4 needs to be modified. 2949 * If enable_unrestricted_guest, the CPU automatically 2950 * disables SMEP/SMAP/PKU when the guest sets CR0.PG=0. 2951 */ 2952 if (!is_paging(vcpu)) 2953 hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE); 2954 } 2955 2956 vmcs_writel(CR4_READ_SHADOW, cr4); 2957 vmcs_writel(GUEST_CR4, hw_cr4); 2958 return 0; 2959 } 2960 2961 void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) 2962 { 2963 struct vcpu_vmx *vmx = to_vmx(vcpu); 2964 u32 ar; 2965 2966 if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { 2967 *var = vmx->rmode.segs[seg]; 2968 if (seg == VCPU_SREG_TR 2969 || var->selector == vmx_read_guest_seg_selector(vmx, seg)) 2970 return; 2971 var->base = vmx_read_guest_seg_base(vmx, seg); 2972 var->selector = vmx_read_guest_seg_selector(vmx, seg); 2973 return; 2974 } 2975 var->base = vmx_read_guest_seg_base(vmx, seg); 2976 var->limit = vmx_read_guest_seg_limit(vmx, seg); 2977 var->selector = vmx_read_guest_seg_selector(vmx, seg); 2978 ar = vmx_read_guest_seg_ar(vmx, seg); 2979 var->unusable = (ar >> 16) & 1; 2980 var->type = ar & 15; 2981 var->s = (ar >> 4) & 1; 2982 var->dpl = (ar >> 5) & 3; 2983 /* 2984 * Some userspaces do not preserve unusable property. Since usable 2985 * segment has to be present according to VMX spec we can use present 2986 * property to amend userspace bug by making unusable segment always 2987 * nonpresent. vmx_segment_access_rights() already marks nonpresent 2988 * segment as unusable. 2989 */ 2990 var->present = !var->unusable; 2991 var->avl = (ar >> 12) & 1; 2992 var->l = (ar >> 13) & 1; 2993 var->db = (ar >> 14) & 1; 2994 var->g = (ar >> 15) & 1; 2995 } 2996 2997 static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg) 2998 { 2999 struct kvm_segment s; 3000 3001 if (to_vmx(vcpu)->rmode.vm86_active) { 3002 vmx_get_segment(vcpu, &s, seg); 3003 return s.base; 3004 } 3005 return vmx_read_guest_seg_base(to_vmx(vcpu), seg); 3006 } 3007 3008 int vmx_get_cpl(struct kvm_vcpu *vcpu) 3009 { 3010 struct vcpu_vmx *vmx = to_vmx(vcpu); 3011 3012 if (unlikely(vmx->rmode.vm86_active)) 3013 return 0; 3014 else { 3015 int ar = vmx_read_guest_seg_ar(vmx, VCPU_SREG_SS); 3016 return VMX_AR_DPL(ar); 3017 } 3018 } 3019 3020 static u32 vmx_segment_access_rights(struct kvm_segment *var) 3021 { 3022 u32 ar; 3023 3024 if (var->unusable || !var->present) 3025 ar = 1 << 16; 3026 else { 3027 ar = var->type & 15; 3028 ar |= (var->s & 1) << 4; 3029 ar |= (var->dpl & 3) << 5; 3030 ar |= (var->present & 1) << 7; 3031 ar |= (var->avl & 1) << 12; 3032 ar |= (var->l & 1) << 13; 3033 ar |= (var->db & 1) << 14; 3034 ar |= (var->g & 1) << 15; 3035 } 3036 3037 return ar; 3038 } 3039 3040 void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) 3041 { 3042 struct vcpu_vmx *vmx = to_vmx(vcpu); 3043 const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; 3044 3045 vmx_segment_cache_clear(vmx); 3046 3047 if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { 3048 vmx->rmode.segs[seg] = *var; 3049 if (seg == VCPU_SREG_TR) 3050 vmcs_write16(sf->selector, var->selector); 3051 else if (var->s) 3052 fix_rmode_seg(seg, &vmx->rmode.segs[seg]); 3053 goto out; 3054 } 3055 3056 vmcs_writel(sf->base, var->base); 3057 vmcs_write32(sf->limit, var->limit); 3058 vmcs_write16(sf->selector, var->selector); 3059 3060 /* 3061 * Fix the "Accessed" bit in AR field of segment registers for older 3062 * qemu binaries. 3063 * IA32 arch specifies that at the time of processor reset the 3064 * "Accessed" bit in the AR field of segment registers is 1. And qemu 3065 * is setting it to 0 in the userland code. This causes invalid guest 3066 * state vmexit when "unrestricted guest" mode is turned on. 3067 * Fix for this setup issue in cpu_reset is being pushed in the qemu 3068 * tree. Newer qemu binaries with that qemu fix would not need this 3069 * kvm hack. 3070 */ 3071 if (enable_unrestricted_guest && (seg != VCPU_SREG_LDTR)) 3072 var->type |= 0x1; /* Accessed */ 3073 3074 vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var)); 3075 3076 out: 3077 vmx->emulation_required = emulation_required(vcpu); 3078 } 3079 3080 static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) 3081 { 3082 u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS); 3083 3084 *db = (ar >> 14) & 1; 3085 *l = (ar >> 13) & 1; 3086 } 3087 3088 static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) 3089 { 3090 dt->size = vmcs_read32(GUEST_IDTR_LIMIT); 3091 dt->address = vmcs_readl(GUEST_IDTR_BASE); 3092 } 3093 3094 static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) 3095 { 3096 vmcs_write32(GUEST_IDTR_LIMIT, dt->size); 3097 vmcs_writel(GUEST_IDTR_BASE, dt->address); 3098 } 3099 3100 static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) 3101 { 3102 dt->size = vmcs_read32(GUEST_GDTR_LIMIT); 3103 dt->address = vmcs_readl(GUEST_GDTR_BASE); 3104 } 3105 3106 static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) 3107 { 3108 vmcs_write32(GUEST_GDTR_LIMIT, dt->size); 3109 vmcs_writel(GUEST_GDTR_BASE, dt->address); 3110 } 3111 3112 static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg) 3113 { 3114 struct kvm_segment var; 3115 u32 ar; 3116 3117 vmx_get_segment(vcpu, &var, seg); 3118 var.dpl = 0x3; 3119 if (seg == VCPU_SREG_CS) 3120 var.type = 0x3; 3121 ar = vmx_segment_access_rights(&var); 3122 3123 if (var.base != (var.selector << 4)) 3124 return false; 3125 if (var.limit != 0xffff) 3126 return false; 3127 if (ar != 0xf3) 3128 return false; 3129 3130 return true; 3131 } 3132 3133 static bool code_segment_valid(struct kvm_vcpu *vcpu) 3134 { 3135 struct kvm_segment cs; 3136 unsigned int cs_rpl; 3137 3138 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); 3139 cs_rpl = cs.selector & SEGMENT_RPL_MASK; 3140 3141 if (cs.unusable) 3142 return false; 3143 if (~cs.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_ACCESSES_MASK)) 3144 return false; 3145 if (!cs.s) 3146 return false; 3147 if (cs.type & VMX_AR_TYPE_WRITEABLE_MASK) { 3148 if (cs.dpl > cs_rpl) 3149 return false; 3150 } else { 3151 if (cs.dpl != cs_rpl) 3152 return false; 3153 } 3154 if (!cs.present) 3155 return false; 3156 3157 /* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */ 3158 return true; 3159 } 3160 3161 static bool stack_segment_valid(struct kvm_vcpu *vcpu) 3162 { 3163 struct kvm_segment ss; 3164 unsigned int ss_rpl; 3165 3166 vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); 3167 ss_rpl = ss.selector & SEGMENT_RPL_MASK; 3168 3169 if (ss.unusable) 3170 return true; 3171 if (ss.type != 3 && ss.type != 7) 3172 return false; 3173 if (!ss.s) 3174 return false; 3175 if (ss.dpl != ss_rpl) /* DPL != RPL */ 3176 return false; 3177 if (!ss.present) 3178 return false; 3179 3180 return true; 3181 } 3182 3183 static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg) 3184 { 3185 struct kvm_segment var; 3186 unsigned int rpl; 3187 3188 vmx_get_segment(vcpu, &var, seg); 3189 rpl = var.selector & SEGMENT_RPL_MASK; 3190 3191 if (var.unusable) 3192 return true; 3193 if (!var.s) 3194 return false; 3195 if (!var.present) 3196 return false; 3197 if (~var.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_WRITEABLE_MASK)) { 3198 if (var.dpl < rpl) /* DPL < RPL */ 3199 return false; 3200 } 3201 3202 /* TODO: Add other members to kvm_segment_field to allow checking for other access 3203 * rights flags 3204 */ 3205 return true; 3206 } 3207 3208 static bool tr_valid(struct kvm_vcpu *vcpu) 3209 { 3210 struct kvm_segment tr; 3211 3212 vmx_get_segment(vcpu, &tr, VCPU_SREG_TR); 3213 3214 if (tr.unusable) 3215 return false; 3216 if (tr.selector & SEGMENT_TI_MASK) /* TI = 1 */ 3217 return false; 3218 if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */ 3219 return false; 3220 if (!tr.present) 3221 return false; 3222 3223 return true; 3224 } 3225 3226 static bool ldtr_valid(struct kvm_vcpu *vcpu) 3227 { 3228 struct kvm_segment ldtr; 3229 3230 vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR); 3231 3232 if (ldtr.unusable) 3233 return true; 3234 if (ldtr.selector & SEGMENT_TI_MASK) /* TI = 1 */ 3235 return false; 3236 if (ldtr.type != 2) 3237 return false; 3238 if (!ldtr.present) 3239 return false; 3240 3241 return true; 3242 } 3243 3244 static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu) 3245 { 3246 struct kvm_segment cs, ss; 3247 3248 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); 3249 vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); 3250 3251 return ((cs.selector & SEGMENT_RPL_MASK) == 3252 (ss.selector & SEGMENT_RPL_MASK)); 3253 } 3254 3255 /* 3256 * Check if guest state is valid. Returns true if valid, false if 3257 * not. 3258 * We assume that registers are always usable 3259 */ 3260 static bool guest_state_valid(struct kvm_vcpu *vcpu) 3261 { 3262 if (enable_unrestricted_guest) 3263 return true; 3264 3265 /* real mode guest state checks */ 3266 if (!is_protmode(vcpu) || (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) { 3267 if (!rmode_segment_valid(vcpu, VCPU_SREG_CS)) 3268 return false; 3269 if (!rmode_segment_valid(vcpu, VCPU_SREG_SS)) 3270 return false; 3271 if (!rmode_segment_valid(vcpu, VCPU_SREG_DS)) 3272 return false; 3273 if (!rmode_segment_valid(vcpu, VCPU_SREG_ES)) 3274 return false; 3275 if (!rmode_segment_valid(vcpu, VCPU_SREG_FS)) 3276 return false; 3277 if (!rmode_segment_valid(vcpu, VCPU_SREG_GS)) 3278 return false; 3279 } else { 3280 /* protected mode guest state checks */ 3281 if (!cs_ss_rpl_check(vcpu)) 3282 return false; 3283 if (!code_segment_valid(vcpu)) 3284 return false; 3285 if (!stack_segment_valid(vcpu)) 3286 return false; 3287 if (!data_segment_valid(vcpu, VCPU_SREG_DS)) 3288 return false; 3289 if (!data_segment_valid(vcpu, VCPU_SREG_ES)) 3290 return false; 3291 if (!data_segment_valid(vcpu, VCPU_SREG_FS)) 3292 return false; 3293 if (!data_segment_valid(vcpu, VCPU_SREG_GS)) 3294 return false; 3295 if (!tr_valid(vcpu)) 3296 return false; 3297 if (!ldtr_valid(vcpu)) 3298 return false; 3299 } 3300 /* TODO: 3301 * - Add checks on RIP 3302 * - Add checks on RFLAGS 3303 */ 3304 3305 return true; 3306 } 3307 3308 static int init_rmode_tss(struct kvm *kvm) 3309 { 3310 gfn_t fn; 3311 u16 data = 0; 3312 int idx, r; 3313 3314 idx = srcu_read_lock(&kvm->srcu); 3315 fn = to_kvm_vmx(kvm)->tss_addr >> PAGE_SHIFT; 3316 r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE); 3317 if (r < 0) 3318 goto out; 3319 data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE; 3320 r = kvm_write_guest_page(kvm, fn++, &data, 3321 TSS_IOPB_BASE_OFFSET, sizeof(u16)); 3322 if (r < 0) 3323 goto out; 3324 r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE); 3325 if (r < 0) 3326 goto out; 3327 r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE); 3328 if (r < 0) 3329 goto out; 3330 data = ~0; 3331 r = kvm_write_guest_page(kvm, fn, &data, 3332 RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1, 3333 sizeof(u8)); 3334 out: 3335 srcu_read_unlock(&kvm->srcu, idx); 3336 return r; 3337 } 3338 3339 static int init_rmode_identity_map(struct kvm *kvm) 3340 { 3341 struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm); 3342 int i, idx, r = 0; 3343 kvm_pfn_t identity_map_pfn; 3344 u32 tmp; 3345 3346 /* Protect kvm_vmx->ept_identity_pagetable_done. */ 3347 mutex_lock(&kvm->slots_lock); 3348 3349 if (likely(kvm_vmx->ept_identity_pagetable_done)) 3350 goto out2; 3351 3352 if (!kvm_vmx->ept_identity_map_addr) 3353 kvm_vmx->ept_identity_map_addr = VMX_EPT_IDENTITY_PAGETABLE_ADDR; 3354 identity_map_pfn = kvm_vmx->ept_identity_map_addr >> PAGE_SHIFT; 3355 3356 r = __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 3357 kvm_vmx->ept_identity_map_addr, PAGE_SIZE); 3358 if (r < 0) 3359 goto out2; 3360 3361 idx = srcu_read_lock(&kvm->srcu); 3362 r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE); 3363 if (r < 0) 3364 goto out; 3365 /* Set up identity-mapping pagetable for EPT in real mode */ 3366 for (i = 0; i < PT32_ENT_PER_PAGE; i++) { 3367 tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | 3368 _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE); 3369 r = kvm_write_guest_page(kvm, identity_map_pfn, 3370 &tmp, i * sizeof(tmp), sizeof(tmp)); 3371 if (r < 0) 3372 goto out; 3373 } 3374 kvm_vmx->ept_identity_pagetable_done = true; 3375 3376 out: 3377 srcu_read_unlock(&kvm->srcu, idx); 3378 3379 out2: 3380 mutex_unlock(&kvm->slots_lock); 3381 return r; 3382 } 3383 3384 static void seg_setup(int seg) 3385 { 3386 const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; 3387 unsigned int ar; 3388 3389 vmcs_write16(sf->selector, 0); 3390 vmcs_writel(sf->base, 0); 3391 vmcs_write32(sf->limit, 0xffff); 3392 ar = 0x93; 3393 if (seg == VCPU_SREG_CS) 3394 ar |= 0x08; /* code segment */ 3395 3396 vmcs_write32(sf->ar_bytes, ar); 3397 } 3398 3399 static int alloc_apic_access_page(struct kvm *kvm) 3400 { 3401 struct page *page; 3402 int r = 0; 3403 3404 mutex_lock(&kvm->slots_lock); 3405 if (kvm->arch.apic_access_page_done) 3406 goto out; 3407 r = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 3408 APIC_DEFAULT_PHYS_BASE, PAGE_SIZE); 3409 if (r) 3410 goto out; 3411 3412 page = gfn_to_page(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT); 3413 if (is_error_page(page)) { 3414 r = -EFAULT; 3415 goto out; 3416 } 3417 3418 /* 3419 * Do not pin the page in memory, so that memory hot-unplug 3420 * is able to migrate it. 3421 */ 3422 put_page(page); 3423 kvm->arch.apic_access_page_done = true; 3424 out: 3425 mutex_unlock(&kvm->slots_lock); 3426 return r; 3427 } 3428 3429 int allocate_vpid(void) 3430 { 3431 int vpid; 3432 3433 if (!enable_vpid) 3434 return 0; 3435 spin_lock(&vmx_vpid_lock); 3436 vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS); 3437 if (vpid < VMX_NR_VPIDS) 3438 __set_bit(vpid, vmx_vpid_bitmap); 3439 else 3440 vpid = 0; 3441 spin_unlock(&vmx_vpid_lock); 3442 return vpid; 3443 } 3444 3445 void free_vpid(int vpid) 3446 { 3447 if (!enable_vpid || vpid == 0) 3448 return; 3449 spin_lock(&vmx_vpid_lock); 3450 __clear_bit(vpid, vmx_vpid_bitmap); 3451 spin_unlock(&vmx_vpid_lock); 3452 } 3453 3454 static __always_inline void vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, 3455 u32 msr, int type) 3456 { 3457 int f = sizeof(unsigned long); 3458 3459 if (!cpu_has_vmx_msr_bitmap()) 3460 return; 3461 3462 if (static_branch_unlikely(&enable_evmcs)) 3463 evmcs_touch_msr_bitmap(); 3464 3465 /* 3466 * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals 3467 * have the write-low and read-high bitmap offsets the wrong way round. 3468 * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. 3469 */ 3470 if (msr <= 0x1fff) { 3471 if (type & MSR_TYPE_R) 3472 /* read-low */ 3473 __clear_bit(msr, msr_bitmap + 0x000 / f); 3474 3475 if (type & MSR_TYPE_W) 3476 /* write-low */ 3477 __clear_bit(msr, msr_bitmap + 0x800 / f); 3478 3479 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { 3480 msr &= 0x1fff; 3481 if (type & MSR_TYPE_R) 3482 /* read-high */ 3483 __clear_bit(msr, msr_bitmap + 0x400 / f); 3484 3485 if (type & MSR_TYPE_W) 3486 /* write-high */ 3487 __clear_bit(msr, msr_bitmap + 0xc00 / f); 3488 3489 } 3490 } 3491 3492 static __always_inline void vmx_enable_intercept_for_msr(unsigned long *msr_bitmap, 3493 u32 msr, int type) 3494 { 3495 int f = sizeof(unsigned long); 3496 3497 if (!cpu_has_vmx_msr_bitmap()) 3498 return; 3499 3500 if (static_branch_unlikely(&enable_evmcs)) 3501 evmcs_touch_msr_bitmap(); 3502 3503 /* 3504 * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals 3505 * have the write-low and read-high bitmap offsets the wrong way round. 3506 * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. 3507 */ 3508 if (msr <= 0x1fff) { 3509 if (type & MSR_TYPE_R) 3510 /* read-low */ 3511 __set_bit(msr, msr_bitmap + 0x000 / f); 3512 3513 if (type & MSR_TYPE_W) 3514 /* write-low */ 3515 __set_bit(msr, msr_bitmap + 0x800 / f); 3516 3517 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { 3518 msr &= 0x1fff; 3519 if (type & MSR_TYPE_R) 3520 /* read-high */ 3521 __set_bit(msr, msr_bitmap + 0x400 / f); 3522 3523 if (type & MSR_TYPE_W) 3524 /* write-high */ 3525 __set_bit(msr, msr_bitmap + 0xc00 / f); 3526 3527 } 3528 } 3529 3530 static __always_inline void vmx_set_intercept_for_msr(unsigned long *msr_bitmap, 3531 u32 msr, int type, bool value) 3532 { 3533 if (value) 3534 vmx_enable_intercept_for_msr(msr_bitmap, msr, type); 3535 else 3536 vmx_disable_intercept_for_msr(msr_bitmap, msr, type); 3537 } 3538 3539 static u8 vmx_msr_bitmap_mode(struct kvm_vcpu *vcpu) 3540 { 3541 u8 mode = 0; 3542 3543 if (cpu_has_secondary_exec_ctrls() && 3544 (vmcs_read32(SECONDARY_VM_EXEC_CONTROL) & 3545 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) { 3546 mode |= MSR_BITMAP_MODE_X2APIC; 3547 if (enable_apicv && kvm_vcpu_apicv_active(vcpu)) 3548 mode |= MSR_BITMAP_MODE_X2APIC_APICV; 3549 } 3550 3551 return mode; 3552 } 3553 3554 static void vmx_update_msr_bitmap_x2apic(unsigned long *msr_bitmap, 3555 u8 mode) 3556 { 3557 int msr; 3558 3559 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { 3560 unsigned word = msr / BITS_PER_LONG; 3561 msr_bitmap[word] = (mode & MSR_BITMAP_MODE_X2APIC_APICV) ? 0 : ~0; 3562 msr_bitmap[word + (0x800 / sizeof(long))] = ~0; 3563 } 3564 3565 if (mode & MSR_BITMAP_MODE_X2APIC) { 3566 /* 3567 * TPR reads and writes can be virtualized even if virtual interrupt 3568 * delivery is not in use. 3569 */ 3570 vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_TASKPRI), MSR_TYPE_RW); 3571 if (mode & MSR_BITMAP_MODE_X2APIC_APICV) { 3572 vmx_enable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_TMCCT), MSR_TYPE_R); 3573 vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_EOI), MSR_TYPE_W); 3574 vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_SELF_IPI), MSR_TYPE_W); 3575 } 3576 } 3577 } 3578 3579 void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu) 3580 { 3581 struct vcpu_vmx *vmx = to_vmx(vcpu); 3582 unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap; 3583 u8 mode = vmx_msr_bitmap_mode(vcpu); 3584 u8 changed = mode ^ vmx->msr_bitmap_mode; 3585 3586 if (!changed) 3587 return; 3588 3589 if (changed & (MSR_BITMAP_MODE_X2APIC | MSR_BITMAP_MODE_X2APIC_APICV)) 3590 vmx_update_msr_bitmap_x2apic(msr_bitmap, mode); 3591 3592 vmx->msr_bitmap_mode = mode; 3593 } 3594 3595 void pt_update_intercept_for_msr(struct vcpu_vmx *vmx) 3596 { 3597 unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap; 3598 bool flag = !(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN); 3599 u32 i; 3600 3601 vmx_set_intercept_for_msr(msr_bitmap, MSR_IA32_RTIT_STATUS, 3602 MSR_TYPE_RW, flag); 3603 vmx_set_intercept_for_msr(msr_bitmap, MSR_IA32_RTIT_OUTPUT_BASE, 3604 MSR_TYPE_RW, flag); 3605 vmx_set_intercept_for_msr(msr_bitmap, MSR_IA32_RTIT_OUTPUT_MASK, 3606 MSR_TYPE_RW, flag); 3607 vmx_set_intercept_for_msr(msr_bitmap, MSR_IA32_RTIT_CR3_MATCH, 3608 MSR_TYPE_RW, flag); 3609 for (i = 0; i < vmx->pt_desc.addr_range; i++) { 3610 vmx_set_intercept_for_msr(msr_bitmap, 3611 MSR_IA32_RTIT_ADDR0_A + i * 2, MSR_TYPE_RW, flag); 3612 vmx_set_intercept_for_msr(msr_bitmap, 3613 MSR_IA32_RTIT_ADDR0_B + i * 2, MSR_TYPE_RW, flag); 3614 } 3615 } 3616 3617 static bool vmx_get_enable_apicv(struct kvm_vcpu *vcpu) 3618 { 3619 return enable_apicv; 3620 } 3621 3622 static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu) 3623 { 3624 struct vcpu_vmx *vmx = to_vmx(vcpu); 3625 void *vapic_page; 3626 u32 vppr; 3627 int rvi; 3628 3629 if (WARN_ON_ONCE(!is_guest_mode(vcpu)) || 3630 !nested_cpu_has_vid(get_vmcs12(vcpu)) || 3631 WARN_ON_ONCE(!vmx->nested.virtual_apic_page)) 3632 return false; 3633 3634 rvi = vmx_get_rvi(); 3635 3636 vapic_page = kmap(vmx->nested.virtual_apic_page); 3637 vppr = *((u32 *)(vapic_page + APIC_PROCPRI)); 3638 kunmap(vmx->nested.virtual_apic_page); 3639 3640 return ((rvi & 0xf0) > (vppr & 0xf0)); 3641 } 3642 3643 static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu, 3644 bool nested) 3645 { 3646 #ifdef CONFIG_SMP 3647 int pi_vec = nested ? POSTED_INTR_NESTED_VECTOR : POSTED_INTR_VECTOR; 3648 3649 if (vcpu->mode == IN_GUEST_MODE) { 3650 /* 3651 * The vector of interrupt to be delivered to vcpu had 3652 * been set in PIR before this function. 3653 * 3654 * Following cases will be reached in this block, and 3655 * we always send a notification event in all cases as 3656 * explained below. 3657 * 3658 * Case 1: vcpu keeps in non-root mode. Sending a 3659 * notification event posts the interrupt to vcpu. 3660 * 3661 * Case 2: vcpu exits to root mode and is still 3662 * runnable. PIR will be synced to vIRR before the 3663 * next vcpu entry. Sending a notification event in 3664 * this case has no effect, as vcpu is not in root 3665 * mode. 3666 * 3667 * Case 3: vcpu exits to root mode and is blocked. 3668 * vcpu_block() has already synced PIR to vIRR and 3669 * never blocks vcpu if vIRR is not cleared. Therefore, 3670 * a blocked vcpu here does not wait for any requested 3671 * interrupts in PIR, and sending a notification event 3672 * which has no effect is safe here. 3673 */ 3674 3675 apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec); 3676 return true; 3677 } 3678 #endif 3679 return false; 3680 } 3681 3682 static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu, 3683 int vector) 3684 { 3685 struct vcpu_vmx *vmx = to_vmx(vcpu); 3686 3687 if (is_guest_mode(vcpu) && 3688 vector == vmx->nested.posted_intr_nv) { 3689 /* 3690 * If a posted intr is not recognized by hardware, 3691 * we will accomplish it in the next vmentry. 3692 */ 3693 vmx->nested.pi_pending = true; 3694 kvm_make_request(KVM_REQ_EVENT, vcpu); 3695 /* the PIR and ON have been set by L1. */ 3696 if (!kvm_vcpu_trigger_posted_interrupt(vcpu, true)) 3697 kvm_vcpu_kick(vcpu); 3698 return 0; 3699 } 3700 return -1; 3701 } 3702 /* 3703 * Send interrupt to vcpu via posted interrupt way. 3704 * 1. If target vcpu is running(non-root mode), send posted interrupt 3705 * notification to vcpu and hardware will sync PIR to vIRR atomically. 3706 * 2. If target vcpu isn't running(root mode), kick it to pick up the 3707 * interrupt from PIR in next vmentry. 3708 */ 3709 static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector) 3710 { 3711 struct vcpu_vmx *vmx = to_vmx(vcpu); 3712 int r; 3713 3714 r = vmx_deliver_nested_posted_interrupt(vcpu, vector); 3715 if (!r) 3716 return; 3717 3718 if (pi_test_and_set_pir(vector, &vmx->pi_desc)) 3719 return; 3720 3721 /* If a previous notification has sent the IPI, nothing to do. */ 3722 if (pi_test_and_set_on(&vmx->pi_desc)) 3723 return; 3724 3725 if (!kvm_vcpu_trigger_posted_interrupt(vcpu, false)) 3726 kvm_vcpu_kick(vcpu); 3727 } 3728 3729 /* 3730 * Set up the vmcs's constant host-state fields, i.e., host-state fields that 3731 * will not change in the lifetime of the guest. 3732 * Note that host-state that does change is set elsewhere. E.g., host-state 3733 * that is set differently for each CPU is set in vmx_vcpu_load(), not here. 3734 */ 3735 void vmx_set_constant_host_state(struct vcpu_vmx *vmx) 3736 { 3737 u32 low32, high32; 3738 unsigned long tmpl; 3739 struct desc_ptr dt; 3740 unsigned long cr0, cr3, cr4; 3741 3742 cr0 = read_cr0(); 3743 WARN_ON(cr0 & X86_CR0_TS); 3744 vmcs_writel(HOST_CR0, cr0); /* 22.2.3 */ 3745 3746 /* 3747 * Save the most likely value for this task's CR3 in the VMCS. 3748 * We can't use __get_current_cr3_fast() because we're not atomic. 3749 */ 3750 cr3 = __read_cr3(); 3751 vmcs_writel(HOST_CR3, cr3); /* 22.2.3 FIXME: shadow tables */ 3752 vmx->loaded_vmcs->host_state.cr3 = cr3; 3753 3754 /* Save the most likely value for this task's CR4 in the VMCS. */ 3755 cr4 = cr4_read_shadow(); 3756 vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */ 3757 vmx->loaded_vmcs->host_state.cr4 = cr4; 3758 3759 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ 3760 #ifdef CONFIG_X86_64 3761 /* 3762 * Load null selectors, so we can avoid reloading them in 3763 * vmx_prepare_switch_to_host(), in case userspace uses 3764 * the null selectors too (the expected case). 3765 */ 3766 vmcs_write16(HOST_DS_SELECTOR, 0); 3767 vmcs_write16(HOST_ES_SELECTOR, 0); 3768 #else 3769 vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ 3770 vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */ 3771 #endif 3772 vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ 3773 vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */ 3774 3775 store_idt(&dt); 3776 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */ 3777 vmx->host_idt_base = dt.address; 3778 3779 vmcs_writel(HOST_RIP, (unsigned long)vmx_vmexit); /* 22.2.5 */ 3780 3781 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32); 3782 vmcs_write32(HOST_IA32_SYSENTER_CS, low32); 3783 rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl); 3784 vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl); /* 22.2.3 */ 3785 3786 if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) { 3787 rdmsr(MSR_IA32_CR_PAT, low32, high32); 3788 vmcs_write64(HOST_IA32_PAT, low32 | ((u64) high32 << 32)); 3789 } 3790 3791 if (cpu_has_load_ia32_efer()) 3792 vmcs_write64(HOST_IA32_EFER, host_efer); 3793 } 3794 3795 void set_cr4_guest_host_mask(struct vcpu_vmx *vmx) 3796 { 3797 vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS; 3798 if (enable_ept) 3799 vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE; 3800 if (is_guest_mode(&vmx->vcpu)) 3801 vmx->vcpu.arch.cr4_guest_owned_bits &= 3802 ~get_vmcs12(&vmx->vcpu)->cr4_guest_host_mask; 3803 vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits); 3804 } 3805 3806 static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx) 3807 { 3808 u32 pin_based_exec_ctrl = vmcs_config.pin_based_exec_ctrl; 3809 3810 if (!kvm_vcpu_apicv_active(&vmx->vcpu)) 3811 pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR; 3812 3813 if (!enable_vnmi) 3814 pin_based_exec_ctrl &= ~PIN_BASED_VIRTUAL_NMIS; 3815 3816 /* Enable the preemption timer dynamically */ 3817 pin_based_exec_ctrl &= ~PIN_BASED_VMX_PREEMPTION_TIMER; 3818 return pin_based_exec_ctrl; 3819 } 3820 3821 static void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu) 3822 { 3823 struct vcpu_vmx *vmx = to_vmx(vcpu); 3824 3825 vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_ctrl(vmx)); 3826 if (cpu_has_secondary_exec_ctrls()) { 3827 if (kvm_vcpu_apicv_active(vcpu)) 3828 vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL, 3829 SECONDARY_EXEC_APIC_REGISTER_VIRT | 3830 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); 3831 else 3832 vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, 3833 SECONDARY_EXEC_APIC_REGISTER_VIRT | 3834 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); 3835 } 3836 3837 if (cpu_has_vmx_msr_bitmap()) 3838 vmx_update_msr_bitmap(vcpu); 3839 } 3840 3841 u32 vmx_exec_control(struct vcpu_vmx *vmx) 3842 { 3843 u32 exec_control = vmcs_config.cpu_based_exec_ctrl; 3844 3845 if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT) 3846 exec_control &= ~CPU_BASED_MOV_DR_EXITING; 3847 3848 if (!cpu_need_tpr_shadow(&vmx->vcpu)) { 3849 exec_control &= ~CPU_BASED_TPR_SHADOW; 3850 #ifdef CONFIG_X86_64 3851 exec_control |= CPU_BASED_CR8_STORE_EXITING | 3852 CPU_BASED_CR8_LOAD_EXITING; 3853 #endif 3854 } 3855 if (!enable_ept) 3856 exec_control |= CPU_BASED_CR3_STORE_EXITING | 3857 CPU_BASED_CR3_LOAD_EXITING | 3858 CPU_BASED_INVLPG_EXITING; 3859 if (kvm_mwait_in_guest(vmx->vcpu.kvm)) 3860 exec_control &= ~(CPU_BASED_MWAIT_EXITING | 3861 CPU_BASED_MONITOR_EXITING); 3862 if (kvm_hlt_in_guest(vmx->vcpu.kvm)) 3863 exec_control &= ~CPU_BASED_HLT_EXITING; 3864 return exec_control; 3865 } 3866 3867 3868 static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx) 3869 { 3870 struct kvm_vcpu *vcpu = &vmx->vcpu; 3871 3872 u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl; 3873 3874 if (pt_mode == PT_MODE_SYSTEM) 3875 exec_control &= ~(SECONDARY_EXEC_PT_USE_GPA | SECONDARY_EXEC_PT_CONCEAL_VMX); 3876 if (!cpu_need_virtualize_apic_accesses(vcpu)) 3877 exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; 3878 if (vmx->vpid == 0) 3879 exec_control &= ~SECONDARY_EXEC_ENABLE_VPID; 3880 if (!enable_ept) { 3881 exec_control &= ~SECONDARY_EXEC_ENABLE_EPT; 3882 enable_unrestricted_guest = 0; 3883 } 3884 if (!enable_unrestricted_guest) 3885 exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST; 3886 if (kvm_pause_in_guest(vmx->vcpu.kvm)) 3887 exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING; 3888 if (!kvm_vcpu_apicv_active(vcpu)) 3889 exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT | 3890 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); 3891 exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; 3892 3893 /* SECONDARY_EXEC_DESC is enabled/disabled on writes to CR4.UMIP, 3894 * in vmx_set_cr4. */ 3895 exec_control &= ~SECONDARY_EXEC_DESC; 3896 3897 /* SECONDARY_EXEC_SHADOW_VMCS is enabled when L1 executes VMPTRLD 3898 (handle_vmptrld). 3899 We can NOT enable shadow_vmcs here because we don't have yet 3900 a current VMCS12 3901 */ 3902 exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS; 3903 3904 if (!enable_pml) 3905 exec_control &= ~SECONDARY_EXEC_ENABLE_PML; 3906 3907 if (vmx_xsaves_supported()) { 3908 /* Exposing XSAVES only when XSAVE is exposed */ 3909 bool xsaves_enabled = 3910 guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && 3911 guest_cpuid_has(vcpu, X86_FEATURE_XSAVES); 3912 3913 if (!xsaves_enabled) 3914 exec_control &= ~SECONDARY_EXEC_XSAVES; 3915 3916 if (nested) { 3917 if (xsaves_enabled) 3918 vmx->nested.msrs.secondary_ctls_high |= 3919 SECONDARY_EXEC_XSAVES; 3920 else 3921 vmx->nested.msrs.secondary_ctls_high &= 3922 ~SECONDARY_EXEC_XSAVES; 3923 } 3924 } 3925 3926 if (vmx_rdtscp_supported()) { 3927 bool rdtscp_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP); 3928 if (!rdtscp_enabled) 3929 exec_control &= ~SECONDARY_EXEC_RDTSCP; 3930 3931 if (nested) { 3932 if (rdtscp_enabled) 3933 vmx->nested.msrs.secondary_ctls_high |= 3934 SECONDARY_EXEC_RDTSCP; 3935 else 3936 vmx->nested.msrs.secondary_ctls_high &= 3937 ~SECONDARY_EXEC_RDTSCP; 3938 } 3939 } 3940 3941 if (vmx_invpcid_supported()) { 3942 /* Exposing INVPCID only when PCID is exposed */ 3943 bool invpcid_enabled = 3944 guest_cpuid_has(vcpu, X86_FEATURE_INVPCID) && 3945 guest_cpuid_has(vcpu, X86_FEATURE_PCID); 3946 3947 if (!invpcid_enabled) { 3948 exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID; 3949 guest_cpuid_clear(vcpu, X86_FEATURE_INVPCID); 3950 } 3951 3952 if (nested) { 3953 if (invpcid_enabled) 3954 vmx->nested.msrs.secondary_ctls_high |= 3955 SECONDARY_EXEC_ENABLE_INVPCID; 3956 else 3957 vmx->nested.msrs.secondary_ctls_high &= 3958 ~SECONDARY_EXEC_ENABLE_INVPCID; 3959 } 3960 } 3961 3962 if (vmx_rdrand_supported()) { 3963 bool rdrand_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDRAND); 3964 if (rdrand_enabled) 3965 exec_control &= ~SECONDARY_EXEC_RDRAND_EXITING; 3966 3967 if (nested) { 3968 if (rdrand_enabled) 3969 vmx->nested.msrs.secondary_ctls_high |= 3970 SECONDARY_EXEC_RDRAND_EXITING; 3971 else 3972 vmx->nested.msrs.secondary_ctls_high &= 3973 ~SECONDARY_EXEC_RDRAND_EXITING; 3974 } 3975 } 3976 3977 if (vmx_rdseed_supported()) { 3978 bool rdseed_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDSEED); 3979 if (rdseed_enabled) 3980 exec_control &= ~SECONDARY_EXEC_RDSEED_EXITING; 3981 3982 if (nested) { 3983 if (rdseed_enabled) 3984 vmx->nested.msrs.secondary_ctls_high |= 3985 SECONDARY_EXEC_RDSEED_EXITING; 3986 else 3987 vmx->nested.msrs.secondary_ctls_high &= 3988 ~SECONDARY_EXEC_RDSEED_EXITING; 3989 } 3990 } 3991 3992 vmx->secondary_exec_control = exec_control; 3993 } 3994 3995 static void ept_set_mmio_spte_mask(void) 3996 { 3997 /* 3998 * EPT Misconfigurations can be generated if the value of bits 2:0 3999 * of an EPT paging-structure entry is 110b (write/execute). 4000 */ 4001 kvm_mmu_set_mmio_spte_mask(VMX_EPT_RWX_MASK, 4002 VMX_EPT_MISCONFIG_WX_VALUE); 4003 } 4004 4005 #define VMX_XSS_EXIT_BITMAP 0 4006 4007 /* 4008 * Sets up the vmcs for emulated real mode. 4009 */ 4010 static void vmx_vcpu_setup(struct vcpu_vmx *vmx) 4011 { 4012 int i; 4013 4014 if (nested) 4015 nested_vmx_vcpu_setup(); 4016 4017 if (cpu_has_vmx_msr_bitmap()) 4018 vmcs_write64(MSR_BITMAP, __pa(vmx->vmcs01.msr_bitmap)); 4019 4020 vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */ 4021 4022 /* Control */ 4023 vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_ctrl(vmx)); 4024 vmx->hv_deadline_tsc = -1; 4025 4026 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmx_exec_control(vmx)); 4027 4028 if (cpu_has_secondary_exec_ctrls()) { 4029 vmx_compute_secondary_exec_control(vmx); 4030 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, 4031 vmx->secondary_exec_control); 4032 } 4033 4034 if (kvm_vcpu_apicv_active(&vmx->vcpu)) { 4035 vmcs_write64(EOI_EXIT_BITMAP0, 0); 4036 vmcs_write64(EOI_EXIT_BITMAP1, 0); 4037 vmcs_write64(EOI_EXIT_BITMAP2, 0); 4038 vmcs_write64(EOI_EXIT_BITMAP3, 0); 4039 4040 vmcs_write16(GUEST_INTR_STATUS, 0); 4041 4042 vmcs_write16(POSTED_INTR_NV, POSTED_INTR_VECTOR); 4043 vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->pi_desc))); 4044 } 4045 4046 if (!kvm_pause_in_guest(vmx->vcpu.kvm)) { 4047 vmcs_write32(PLE_GAP, ple_gap); 4048 vmx->ple_window = ple_window; 4049 vmx->ple_window_dirty = true; 4050 } 4051 4052 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0); 4053 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0); 4054 vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */ 4055 4056 vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */ 4057 vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */ 4058 vmx_set_constant_host_state(vmx); 4059 vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */ 4060 vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */ 4061 4062 if (cpu_has_vmx_vmfunc()) 4063 vmcs_write64(VM_FUNCTION_CONTROL, 0); 4064 4065 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); 4066 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); 4067 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val)); 4068 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0); 4069 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val)); 4070 4071 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) 4072 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); 4073 4074 for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) { 4075 u32 index = vmx_msr_index[i]; 4076 u32 data_low, data_high; 4077 int j = vmx->nmsrs; 4078 4079 if (rdmsr_safe(index, &data_low, &data_high) < 0) 4080 continue; 4081 if (wrmsr_safe(index, data_low, data_high) < 0) 4082 continue; 4083 vmx->guest_msrs[j].index = i; 4084 vmx->guest_msrs[j].data = 0; 4085 vmx->guest_msrs[j].mask = -1ull; 4086 ++vmx->nmsrs; 4087 } 4088 4089 vmx->arch_capabilities = kvm_get_arch_capabilities(); 4090 4091 vm_exit_controls_init(vmx, vmx_vmexit_ctrl()); 4092 4093 /* 22.2.1, 20.8.1 */ 4094 vm_entry_controls_init(vmx, vmx_vmentry_ctrl()); 4095 4096 vmx->vcpu.arch.cr0_guest_owned_bits = X86_CR0_TS; 4097 vmcs_writel(CR0_GUEST_HOST_MASK, ~X86_CR0_TS); 4098 4099 set_cr4_guest_host_mask(vmx); 4100 4101 if (vmx_xsaves_supported()) 4102 vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP); 4103 4104 if (enable_pml) { 4105 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); 4106 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); 4107 } 4108 4109 if (cpu_has_vmx_encls_vmexit()) 4110 vmcs_write64(ENCLS_EXITING_BITMAP, -1ull); 4111 4112 if (pt_mode == PT_MODE_HOST_GUEST) { 4113 memset(&vmx->pt_desc, 0, sizeof(vmx->pt_desc)); 4114 /* Bit[6~0] are forced to 1, writes are ignored. */ 4115 vmx->pt_desc.guest.output_mask = 0x7F; 4116 vmcs_write64(GUEST_IA32_RTIT_CTL, 0); 4117 } 4118 } 4119 4120 static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) 4121 { 4122 struct vcpu_vmx *vmx = to_vmx(vcpu); 4123 struct msr_data apic_base_msr; 4124 u64 cr0; 4125 4126 vmx->rmode.vm86_active = 0; 4127 vmx->spec_ctrl = 0; 4128 4129 vcpu->arch.microcode_version = 0x100000000ULL; 4130 vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); 4131 kvm_set_cr8(vcpu, 0); 4132 4133 if (!init_event) { 4134 apic_base_msr.data = APIC_DEFAULT_PHYS_BASE | 4135 MSR_IA32_APICBASE_ENABLE; 4136 if (kvm_vcpu_is_reset_bsp(vcpu)) 4137 apic_base_msr.data |= MSR_IA32_APICBASE_BSP; 4138 apic_base_msr.host_initiated = true; 4139 kvm_set_apic_base(vcpu, &apic_base_msr); 4140 } 4141 4142 vmx_segment_cache_clear(vmx); 4143 4144 seg_setup(VCPU_SREG_CS); 4145 vmcs_write16(GUEST_CS_SELECTOR, 0xf000); 4146 vmcs_writel(GUEST_CS_BASE, 0xffff0000ul); 4147 4148 seg_setup(VCPU_SREG_DS); 4149 seg_setup(VCPU_SREG_ES); 4150 seg_setup(VCPU_SREG_FS); 4151 seg_setup(VCPU_SREG_GS); 4152 seg_setup(VCPU_SREG_SS); 4153 4154 vmcs_write16(GUEST_TR_SELECTOR, 0); 4155 vmcs_writel(GUEST_TR_BASE, 0); 4156 vmcs_write32(GUEST_TR_LIMIT, 0xffff); 4157 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); 4158 4159 vmcs_write16(GUEST_LDTR_SELECTOR, 0); 4160 vmcs_writel(GUEST_LDTR_BASE, 0); 4161 vmcs_write32(GUEST_LDTR_LIMIT, 0xffff); 4162 vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082); 4163 4164 if (!init_event) { 4165 vmcs_write32(GUEST_SYSENTER_CS, 0); 4166 vmcs_writel(GUEST_SYSENTER_ESP, 0); 4167 vmcs_writel(GUEST_SYSENTER_EIP, 0); 4168 vmcs_write64(GUEST_IA32_DEBUGCTL, 0); 4169 } 4170 4171 kvm_set_rflags(vcpu, X86_EFLAGS_FIXED); 4172 kvm_rip_write(vcpu, 0xfff0); 4173 4174 vmcs_writel(GUEST_GDTR_BASE, 0); 4175 vmcs_write32(GUEST_GDTR_LIMIT, 0xffff); 4176 4177 vmcs_writel(GUEST_IDTR_BASE, 0); 4178 vmcs_write32(GUEST_IDTR_LIMIT, 0xffff); 4179 4180 vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE); 4181 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0); 4182 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, 0); 4183 if (kvm_mpx_supported()) 4184 vmcs_write64(GUEST_BNDCFGS, 0); 4185 4186 setup_msrs(vmx); 4187 4188 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */ 4189 4190 if (cpu_has_vmx_tpr_shadow() && !init_event) { 4191 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0); 4192 if (cpu_need_tpr_shadow(vcpu)) 4193 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 4194 __pa(vcpu->arch.apic->regs)); 4195 vmcs_write32(TPR_THRESHOLD, 0); 4196 } 4197 4198 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); 4199 4200 if (vmx->vpid != 0) 4201 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); 4202 4203 cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET; 4204 vmx->vcpu.arch.cr0 = cr0; 4205 vmx_set_cr0(vcpu, cr0); /* enter rmode */ 4206 vmx_set_cr4(vcpu, 0); 4207 vmx_set_efer(vcpu, 0); 4208 4209 update_exception_bitmap(vcpu); 4210 4211 vpid_sync_context(vmx->vpid); 4212 if (init_event) 4213 vmx_clear_hlt(vcpu); 4214 } 4215 4216 static void enable_irq_window(struct kvm_vcpu *vcpu) 4217 { 4218 vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL, 4219 CPU_BASED_VIRTUAL_INTR_PENDING); 4220 } 4221 4222 static void enable_nmi_window(struct kvm_vcpu *vcpu) 4223 { 4224 if (!enable_vnmi || 4225 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) { 4226 enable_irq_window(vcpu); 4227 return; 4228 } 4229 4230 vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL, 4231 CPU_BASED_VIRTUAL_NMI_PENDING); 4232 } 4233 4234 static void vmx_inject_irq(struct kvm_vcpu *vcpu) 4235 { 4236 struct vcpu_vmx *vmx = to_vmx(vcpu); 4237 uint32_t intr; 4238 int irq = vcpu->arch.interrupt.nr; 4239 4240 trace_kvm_inj_virq(irq); 4241 4242 ++vcpu->stat.irq_injections; 4243 if (vmx->rmode.vm86_active) { 4244 int inc_eip = 0; 4245 if (vcpu->arch.interrupt.soft) 4246 inc_eip = vcpu->arch.event_exit_inst_len; 4247 if (kvm_inject_realmode_interrupt(vcpu, irq, inc_eip) != EMULATE_DONE) 4248 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 4249 return; 4250 } 4251 intr = irq | INTR_INFO_VALID_MASK; 4252 if (vcpu->arch.interrupt.soft) { 4253 intr |= INTR_TYPE_SOFT_INTR; 4254 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 4255 vmx->vcpu.arch.event_exit_inst_len); 4256 } else 4257 intr |= INTR_TYPE_EXT_INTR; 4258 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr); 4259 4260 vmx_clear_hlt(vcpu); 4261 } 4262 4263 static void vmx_inject_nmi(struct kvm_vcpu *vcpu) 4264 { 4265 struct vcpu_vmx *vmx = to_vmx(vcpu); 4266 4267 if (!enable_vnmi) { 4268 /* 4269 * Tracking the NMI-blocked state in software is built upon 4270 * finding the next open IRQ window. This, in turn, depends on 4271 * well-behaving guests: They have to keep IRQs disabled at 4272 * least as long as the NMI handler runs. Otherwise we may 4273 * cause NMI nesting, maybe breaking the guest. But as this is 4274 * highly unlikely, we can live with the residual risk. 4275 */ 4276 vmx->loaded_vmcs->soft_vnmi_blocked = 1; 4277 vmx->loaded_vmcs->vnmi_blocked_time = 0; 4278 } 4279 4280 ++vcpu->stat.nmi_injections; 4281 vmx->loaded_vmcs->nmi_known_unmasked = false; 4282 4283 if (vmx->rmode.vm86_active) { 4284 if (kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0) != EMULATE_DONE) 4285 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 4286 return; 4287 } 4288 4289 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 4290 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR); 4291 4292 vmx_clear_hlt(vcpu); 4293 } 4294 4295 bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu) 4296 { 4297 struct vcpu_vmx *vmx = to_vmx(vcpu); 4298 bool masked; 4299 4300 if (!enable_vnmi) 4301 return vmx->loaded_vmcs->soft_vnmi_blocked; 4302 if (vmx->loaded_vmcs->nmi_known_unmasked) 4303 return false; 4304 masked = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI; 4305 vmx->loaded_vmcs->nmi_known_unmasked = !masked; 4306 return masked; 4307 } 4308 4309 void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) 4310 { 4311 struct vcpu_vmx *vmx = to_vmx(vcpu); 4312 4313 if (!enable_vnmi) { 4314 if (vmx->loaded_vmcs->soft_vnmi_blocked != masked) { 4315 vmx->loaded_vmcs->soft_vnmi_blocked = masked; 4316 vmx->loaded_vmcs->vnmi_blocked_time = 0; 4317 } 4318 } else { 4319 vmx->loaded_vmcs->nmi_known_unmasked = !masked; 4320 if (masked) 4321 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, 4322 GUEST_INTR_STATE_NMI); 4323 else 4324 vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, 4325 GUEST_INTR_STATE_NMI); 4326 } 4327 } 4328 4329 static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) 4330 { 4331 if (to_vmx(vcpu)->nested.nested_run_pending) 4332 return 0; 4333 4334 if (!enable_vnmi && 4335 to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked) 4336 return 0; 4337 4338 return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 4339 (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI 4340 | GUEST_INTR_STATE_NMI)); 4341 } 4342 4343 static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu) 4344 { 4345 return (!to_vmx(vcpu)->nested.nested_run_pending && 4346 vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && 4347 !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 4348 (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS)); 4349 } 4350 4351 static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) 4352 { 4353 int ret; 4354 4355 if (enable_unrestricted_guest) 4356 return 0; 4357 4358 ret = x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, addr, 4359 PAGE_SIZE * 3); 4360 if (ret) 4361 return ret; 4362 to_kvm_vmx(kvm)->tss_addr = addr; 4363 return init_rmode_tss(kvm); 4364 } 4365 4366 static int vmx_set_identity_map_addr(struct kvm *kvm, u64 ident_addr) 4367 { 4368 to_kvm_vmx(kvm)->ept_identity_map_addr = ident_addr; 4369 return 0; 4370 } 4371 4372 static bool rmode_exception(struct kvm_vcpu *vcpu, int vec) 4373 { 4374 switch (vec) { 4375 case BP_VECTOR: 4376 /* 4377 * Update instruction length as we may reinject the exception 4378 * from user space while in guest debugging mode. 4379 */ 4380 to_vmx(vcpu)->vcpu.arch.event_exit_inst_len = 4381 vmcs_read32(VM_EXIT_INSTRUCTION_LEN); 4382 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) 4383 return false; 4384 /* fall through */ 4385 case DB_VECTOR: 4386 if (vcpu->guest_debug & 4387 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) 4388 return false; 4389 /* fall through */ 4390 case DE_VECTOR: 4391 case OF_VECTOR: 4392 case BR_VECTOR: 4393 case UD_VECTOR: 4394 case DF_VECTOR: 4395 case SS_VECTOR: 4396 case GP_VECTOR: 4397 case MF_VECTOR: 4398 return true; 4399 break; 4400 } 4401 return false; 4402 } 4403 4404 static int handle_rmode_exception(struct kvm_vcpu *vcpu, 4405 int vec, u32 err_code) 4406 { 4407 /* 4408 * Instruction with address size override prefix opcode 0x67 4409 * Cause the #SS fault with 0 error code in VM86 mode. 4410 */ 4411 if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) { 4412 if (kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE) { 4413 if (vcpu->arch.halt_request) { 4414 vcpu->arch.halt_request = 0; 4415 return kvm_vcpu_halt(vcpu); 4416 } 4417 return 1; 4418 } 4419 return 0; 4420 } 4421 4422 /* 4423 * Forward all other exceptions that are valid in real mode. 4424 * FIXME: Breaks guest debugging in real mode, needs to be fixed with 4425 * the required debugging infrastructure rework. 4426 */ 4427 kvm_queue_exception(vcpu, vec); 4428 return 1; 4429 } 4430 4431 /* 4432 * Trigger machine check on the host. We assume all the MSRs are already set up 4433 * by the CPU and that we still run on the same CPU as the MCE occurred on. 4434 * We pass a fake environment to the machine check handler because we want 4435 * the guest to be always treated like user space, no matter what context 4436 * it used internally. 4437 */ 4438 static void kvm_machine_check(void) 4439 { 4440 #if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64) 4441 struct pt_regs regs = { 4442 .cs = 3, /* Fake ring 3 no matter what the guest ran on */ 4443 .flags = X86_EFLAGS_IF, 4444 }; 4445 4446 do_machine_check(®s, 0); 4447 #endif 4448 } 4449 4450 static int handle_machine_check(struct kvm_vcpu *vcpu) 4451 { 4452 /* already handled by vcpu_run */ 4453 return 1; 4454 } 4455 4456 static int handle_exception(struct kvm_vcpu *vcpu) 4457 { 4458 struct vcpu_vmx *vmx = to_vmx(vcpu); 4459 struct kvm_run *kvm_run = vcpu->run; 4460 u32 intr_info, ex_no, error_code; 4461 unsigned long cr2, rip, dr6; 4462 u32 vect_info; 4463 enum emulation_result er; 4464 4465 vect_info = vmx->idt_vectoring_info; 4466 intr_info = vmx->exit_intr_info; 4467 4468 if (is_machine_check(intr_info)) 4469 return handle_machine_check(vcpu); 4470 4471 if (is_nmi(intr_info)) 4472 return 1; /* already handled by vmx_vcpu_run() */ 4473 4474 if (is_invalid_opcode(intr_info)) 4475 return handle_ud(vcpu); 4476 4477 error_code = 0; 4478 if (intr_info & INTR_INFO_DELIVER_CODE_MASK) 4479 error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE); 4480 4481 if (!vmx->rmode.vm86_active && is_gp_fault(intr_info)) { 4482 WARN_ON_ONCE(!enable_vmware_backdoor); 4483 er = kvm_emulate_instruction(vcpu, 4484 EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL); 4485 if (er == EMULATE_USER_EXIT) 4486 return 0; 4487 else if (er != EMULATE_DONE) 4488 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); 4489 return 1; 4490 } 4491 4492 /* 4493 * The #PF with PFEC.RSVD = 1 indicates the guest is accessing 4494 * MMIO, it is better to report an internal error. 4495 * See the comments in vmx_handle_exit. 4496 */ 4497 if ((vect_info & VECTORING_INFO_VALID_MASK) && 4498 !(is_page_fault(intr_info) && !(error_code & PFERR_RSVD_MASK))) { 4499 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 4500 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX; 4501 vcpu->run->internal.ndata = 3; 4502 vcpu->run->internal.data[0] = vect_info; 4503 vcpu->run->internal.data[1] = intr_info; 4504 vcpu->run->internal.data[2] = error_code; 4505 return 0; 4506 } 4507 4508 if (is_page_fault(intr_info)) { 4509 cr2 = vmcs_readl(EXIT_QUALIFICATION); 4510 /* EPT won't cause page fault directly */ 4511 WARN_ON_ONCE(!vcpu->arch.apf.host_apf_reason && enable_ept); 4512 return kvm_handle_page_fault(vcpu, error_code, cr2, NULL, 0); 4513 } 4514 4515 ex_no = intr_info & INTR_INFO_VECTOR_MASK; 4516 4517 if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no)) 4518 return handle_rmode_exception(vcpu, ex_no, error_code); 4519 4520 switch (ex_no) { 4521 case AC_VECTOR: 4522 kvm_queue_exception_e(vcpu, AC_VECTOR, error_code); 4523 return 1; 4524 case DB_VECTOR: 4525 dr6 = vmcs_readl(EXIT_QUALIFICATION); 4526 if (!(vcpu->guest_debug & 4527 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) { 4528 vcpu->arch.dr6 &= ~15; 4529 vcpu->arch.dr6 |= dr6 | DR6_RTM; 4530 if (is_icebp(intr_info)) 4531 skip_emulated_instruction(vcpu); 4532 4533 kvm_queue_exception(vcpu, DB_VECTOR); 4534 return 1; 4535 } 4536 kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1; 4537 kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7); 4538 /* fall through */ 4539 case BP_VECTOR: 4540 /* 4541 * Update instruction length as we may reinject #BP from 4542 * user space while in guest debugging mode. Reading it for 4543 * #DB as well causes no harm, it is not used in that case. 4544 */ 4545 vmx->vcpu.arch.event_exit_inst_len = 4546 vmcs_read32(VM_EXIT_INSTRUCTION_LEN); 4547 kvm_run->exit_reason = KVM_EXIT_DEBUG; 4548 rip = kvm_rip_read(vcpu); 4549 kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip; 4550 kvm_run->debug.arch.exception = ex_no; 4551 break; 4552 default: 4553 kvm_run->exit_reason = KVM_EXIT_EXCEPTION; 4554 kvm_run->ex.exception = ex_no; 4555 kvm_run->ex.error_code = error_code; 4556 break; 4557 } 4558 return 0; 4559 } 4560 4561 static int handle_external_interrupt(struct kvm_vcpu *vcpu) 4562 { 4563 ++vcpu->stat.irq_exits; 4564 return 1; 4565 } 4566 4567 static int handle_triple_fault(struct kvm_vcpu *vcpu) 4568 { 4569 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; 4570 vcpu->mmio_needed = 0; 4571 return 0; 4572 } 4573 4574 static int handle_io(struct kvm_vcpu *vcpu) 4575 { 4576 unsigned long exit_qualification; 4577 int size, in, string; 4578 unsigned port; 4579 4580 exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 4581 string = (exit_qualification & 16) != 0; 4582 4583 ++vcpu->stat.io_exits; 4584 4585 if (string) 4586 return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE; 4587 4588 port = exit_qualification >> 16; 4589 size = (exit_qualification & 7) + 1; 4590 in = (exit_qualification & 8) != 0; 4591 4592 return kvm_fast_pio(vcpu, size, port, in); 4593 } 4594 4595 static void 4596 vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) 4597 { 4598 /* 4599 * Patch in the VMCALL instruction: 4600 */ 4601 hypercall[0] = 0x0f; 4602 hypercall[1] = 0x01; 4603 hypercall[2] = 0xc1; 4604 } 4605 4606 /* called to set cr0 as appropriate for a mov-to-cr0 exit. */ 4607 static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val) 4608 { 4609 if (is_guest_mode(vcpu)) { 4610 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 4611 unsigned long orig_val = val; 4612 4613 /* 4614 * We get here when L2 changed cr0 in a way that did not change 4615 * any of L1's shadowed bits (see nested_vmx_exit_handled_cr), 4616 * but did change L0 shadowed bits. So we first calculate the 4617 * effective cr0 value that L1 would like to write into the 4618 * hardware. It consists of the L2-owned bits from the new 4619 * value combined with the L1-owned bits from L1's guest_cr0. 4620 */ 4621 val = (val & ~vmcs12->cr0_guest_host_mask) | 4622 (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask); 4623 4624 if (!nested_guest_cr0_valid(vcpu, val)) 4625 return 1; 4626 4627 if (kvm_set_cr0(vcpu, val)) 4628 return 1; 4629 vmcs_writel(CR0_READ_SHADOW, orig_val); 4630 return 0; 4631 } else { 4632 if (to_vmx(vcpu)->nested.vmxon && 4633 !nested_host_cr0_valid(vcpu, val)) 4634 return 1; 4635 4636 return kvm_set_cr0(vcpu, val); 4637 } 4638 } 4639 4640 static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val) 4641 { 4642 if (is_guest_mode(vcpu)) { 4643 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 4644 unsigned long orig_val = val; 4645 4646 /* analogously to handle_set_cr0 */ 4647 val = (val & ~vmcs12->cr4_guest_host_mask) | 4648 (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask); 4649 if (kvm_set_cr4(vcpu, val)) 4650 return 1; 4651 vmcs_writel(CR4_READ_SHADOW, orig_val); 4652 return 0; 4653 } else 4654 return kvm_set_cr4(vcpu, val); 4655 } 4656 4657 static int handle_desc(struct kvm_vcpu *vcpu) 4658 { 4659 WARN_ON(!(vcpu->arch.cr4 & X86_CR4_UMIP)); 4660 return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE; 4661 } 4662 4663 static int handle_cr(struct kvm_vcpu *vcpu) 4664 { 4665 unsigned long exit_qualification, val; 4666 int cr; 4667 int reg; 4668 int err; 4669 int ret; 4670 4671 exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 4672 cr = exit_qualification & 15; 4673 reg = (exit_qualification >> 8) & 15; 4674 switch ((exit_qualification >> 4) & 3) { 4675 case 0: /* mov to cr */ 4676 val = kvm_register_readl(vcpu, reg); 4677 trace_kvm_cr_write(cr, val); 4678 switch (cr) { 4679 case 0: 4680 err = handle_set_cr0(vcpu, val); 4681 return kvm_complete_insn_gp(vcpu, err); 4682 case 3: 4683 WARN_ON_ONCE(enable_unrestricted_guest); 4684 err = kvm_set_cr3(vcpu, val); 4685 return kvm_complete_insn_gp(vcpu, err); 4686 case 4: 4687 err = handle_set_cr4(vcpu, val); 4688 return kvm_complete_insn_gp(vcpu, err); 4689 case 8: { 4690 u8 cr8_prev = kvm_get_cr8(vcpu); 4691 u8 cr8 = (u8)val; 4692 err = kvm_set_cr8(vcpu, cr8); 4693 ret = kvm_complete_insn_gp(vcpu, err); 4694 if (lapic_in_kernel(vcpu)) 4695 return ret; 4696 if (cr8_prev <= cr8) 4697 return ret; 4698 /* 4699 * TODO: we might be squashing a 4700 * KVM_GUESTDBG_SINGLESTEP-triggered 4701 * KVM_EXIT_DEBUG here. 4702 */ 4703 vcpu->run->exit_reason = KVM_EXIT_SET_TPR; 4704 return 0; 4705 } 4706 } 4707 break; 4708 case 2: /* clts */ 4709 WARN_ONCE(1, "Guest should always own CR0.TS"); 4710 vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS)); 4711 trace_kvm_cr_write(0, kvm_read_cr0(vcpu)); 4712 return kvm_skip_emulated_instruction(vcpu); 4713 case 1: /*mov from cr*/ 4714 switch (cr) { 4715 case 3: 4716 WARN_ON_ONCE(enable_unrestricted_guest); 4717 val = kvm_read_cr3(vcpu); 4718 kvm_register_write(vcpu, reg, val); 4719 trace_kvm_cr_read(cr, val); 4720 return kvm_skip_emulated_instruction(vcpu); 4721 case 8: 4722 val = kvm_get_cr8(vcpu); 4723 kvm_register_write(vcpu, reg, val); 4724 trace_kvm_cr_read(cr, val); 4725 return kvm_skip_emulated_instruction(vcpu); 4726 } 4727 break; 4728 case 3: /* lmsw */ 4729 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f; 4730 trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val); 4731 kvm_lmsw(vcpu, val); 4732 4733 return kvm_skip_emulated_instruction(vcpu); 4734 default: 4735 break; 4736 } 4737 vcpu->run->exit_reason = 0; 4738 vcpu_unimpl(vcpu, "unhandled control register: op %d cr %d\n", 4739 (int)(exit_qualification >> 4) & 3, cr); 4740 return 0; 4741 } 4742 4743 static int handle_dr(struct kvm_vcpu *vcpu) 4744 { 4745 unsigned long exit_qualification; 4746 int dr, dr7, reg; 4747 4748 exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 4749 dr = exit_qualification & DEBUG_REG_ACCESS_NUM; 4750 4751 /* First, if DR does not exist, trigger UD */ 4752 if (!kvm_require_dr(vcpu, dr)) 4753 return 1; 4754 4755 /* Do not handle if the CPL > 0, will trigger GP on re-entry */ 4756 if (!kvm_require_cpl(vcpu, 0)) 4757 return 1; 4758 dr7 = vmcs_readl(GUEST_DR7); 4759 if (dr7 & DR7_GD) { 4760 /* 4761 * As the vm-exit takes precedence over the debug trap, we 4762 * need to emulate the latter, either for the host or the 4763 * guest debugging itself. 4764 */ 4765 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { 4766 vcpu->run->debug.arch.dr6 = vcpu->arch.dr6; 4767 vcpu->run->debug.arch.dr7 = dr7; 4768 vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu); 4769 vcpu->run->debug.arch.exception = DB_VECTOR; 4770 vcpu->run->exit_reason = KVM_EXIT_DEBUG; 4771 return 0; 4772 } else { 4773 vcpu->arch.dr6 &= ~15; 4774 vcpu->arch.dr6 |= DR6_BD | DR6_RTM; 4775 kvm_queue_exception(vcpu, DB_VECTOR); 4776 return 1; 4777 } 4778 } 4779 4780 if (vcpu->guest_debug == 0) { 4781 vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL, 4782 CPU_BASED_MOV_DR_EXITING); 4783 4784 /* 4785 * No more DR vmexits; force a reload of the debug registers 4786 * and reenter on this instruction. The next vmexit will 4787 * retrieve the full state of the debug registers. 4788 */ 4789 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT; 4790 return 1; 4791 } 4792 4793 reg = DEBUG_REG_ACCESS_REG(exit_qualification); 4794 if (exit_qualification & TYPE_MOV_FROM_DR) { 4795 unsigned long val; 4796 4797 if (kvm_get_dr(vcpu, dr, &val)) 4798 return 1; 4799 kvm_register_write(vcpu, reg, val); 4800 } else 4801 if (kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg))) 4802 return 1; 4803 4804 return kvm_skip_emulated_instruction(vcpu); 4805 } 4806 4807 static u64 vmx_get_dr6(struct kvm_vcpu *vcpu) 4808 { 4809 return vcpu->arch.dr6; 4810 } 4811 4812 static void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val) 4813 { 4814 } 4815 4816 static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu) 4817 { 4818 get_debugreg(vcpu->arch.db[0], 0); 4819 get_debugreg(vcpu->arch.db[1], 1); 4820 get_debugreg(vcpu->arch.db[2], 2); 4821 get_debugreg(vcpu->arch.db[3], 3); 4822 get_debugreg(vcpu->arch.dr6, 6); 4823 vcpu->arch.dr7 = vmcs_readl(GUEST_DR7); 4824 4825 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT; 4826 vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL, CPU_BASED_MOV_DR_EXITING); 4827 } 4828 4829 static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val) 4830 { 4831 vmcs_writel(GUEST_DR7, val); 4832 } 4833 4834 static int handle_cpuid(struct kvm_vcpu *vcpu) 4835 { 4836 return kvm_emulate_cpuid(vcpu); 4837 } 4838 4839 static int handle_rdmsr(struct kvm_vcpu *vcpu) 4840 { 4841 u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; 4842 struct msr_data msr_info; 4843 4844 msr_info.index = ecx; 4845 msr_info.host_initiated = false; 4846 if (vmx_get_msr(vcpu, &msr_info)) { 4847 trace_kvm_msr_read_ex(ecx); 4848 kvm_inject_gp(vcpu, 0); 4849 return 1; 4850 } 4851 4852 trace_kvm_msr_read(ecx, msr_info.data); 4853 4854 /* FIXME: handling of bits 32:63 of rax, rdx */ 4855 vcpu->arch.regs[VCPU_REGS_RAX] = msr_info.data & -1u; 4856 vcpu->arch.regs[VCPU_REGS_RDX] = (msr_info.data >> 32) & -1u; 4857 return kvm_skip_emulated_instruction(vcpu); 4858 } 4859 4860 static int handle_wrmsr(struct kvm_vcpu *vcpu) 4861 { 4862 struct msr_data msr; 4863 u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; 4864 u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u) 4865 | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32); 4866 4867 msr.data = data; 4868 msr.index = ecx; 4869 msr.host_initiated = false; 4870 if (kvm_set_msr(vcpu, &msr) != 0) { 4871 trace_kvm_msr_write_ex(ecx, data); 4872 kvm_inject_gp(vcpu, 0); 4873 return 1; 4874 } 4875 4876 trace_kvm_msr_write(ecx, data); 4877 return kvm_skip_emulated_instruction(vcpu); 4878 } 4879 4880 static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu) 4881 { 4882 kvm_apic_update_ppr(vcpu); 4883 return 1; 4884 } 4885 4886 static int handle_interrupt_window(struct kvm_vcpu *vcpu) 4887 { 4888 vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL, 4889 CPU_BASED_VIRTUAL_INTR_PENDING); 4890 4891 kvm_make_request(KVM_REQ_EVENT, vcpu); 4892 4893 ++vcpu->stat.irq_window_exits; 4894 return 1; 4895 } 4896 4897 static int handle_halt(struct kvm_vcpu *vcpu) 4898 { 4899 return kvm_emulate_halt(vcpu); 4900 } 4901 4902 static int handle_vmcall(struct kvm_vcpu *vcpu) 4903 { 4904 return kvm_emulate_hypercall(vcpu); 4905 } 4906 4907 static int handle_invd(struct kvm_vcpu *vcpu) 4908 { 4909 return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE; 4910 } 4911 4912 static int handle_invlpg(struct kvm_vcpu *vcpu) 4913 { 4914 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 4915 4916 kvm_mmu_invlpg(vcpu, exit_qualification); 4917 return kvm_skip_emulated_instruction(vcpu); 4918 } 4919 4920 static int handle_rdpmc(struct kvm_vcpu *vcpu) 4921 { 4922 int err; 4923 4924 err = kvm_rdpmc(vcpu); 4925 return kvm_complete_insn_gp(vcpu, err); 4926 } 4927 4928 static int handle_wbinvd(struct kvm_vcpu *vcpu) 4929 { 4930 return kvm_emulate_wbinvd(vcpu); 4931 } 4932 4933 static int handle_xsetbv(struct kvm_vcpu *vcpu) 4934 { 4935 u64 new_bv = kvm_read_edx_eax(vcpu); 4936 u32 index = kvm_register_read(vcpu, VCPU_REGS_RCX); 4937 4938 if (kvm_set_xcr(vcpu, index, new_bv) == 0) 4939 return kvm_skip_emulated_instruction(vcpu); 4940 return 1; 4941 } 4942 4943 static int handle_xsaves(struct kvm_vcpu *vcpu) 4944 { 4945 kvm_skip_emulated_instruction(vcpu); 4946 WARN(1, "this should never happen\n"); 4947 return 1; 4948 } 4949 4950 static int handle_xrstors(struct kvm_vcpu *vcpu) 4951 { 4952 kvm_skip_emulated_instruction(vcpu); 4953 WARN(1, "this should never happen\n"); 4954 return 1; 4955 } 4956 4957 static int handle_apic_access(struct kvm_vcpu *vcpu) 4958 { 4959 if (likely(fasteoi)) { 4960 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 4961 int access_type, offset; 4962 4963 access_type = exit_qualification & APIC_ACCESS_TYPE; 4964 offset = exit_qualification & APIC_ACCESS_OFFSET; 4965 /* 4966 * Sane guest uses MOV to write EOI, with written value 4967 * not cared. So make a short-circuit here by avoiding 4968 * heavy instruction emulation. 4969 */ 4970 if ((access_type == TYPE_LINEAR_APIC_INST_WRITE) && 4971 (offset == APIC_EOI)) { 4972 kvm_lapic_set_eoi(vcpu); 4973 return kvm_skip_emulated_instruction(vcpu); 4974 } 4975 } 4976 return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE; 4977 } 4978 4979 static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu) 4980 { 4981 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 4982 int vector = exit_qualification & 0xff; 4983 4984 /* EOI-induced VM exit is trap-like and thus no need to adjust IP */ 4985 kvm_apic_set_eoi_accelerated(vcpu, vector); 4986 return 1; 4987 } 4988 4989 static int handle_apic_write(struct kvm_vcpu *vcpu) 4990 { 4991 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 4992 u32 offset = exit_qualification & 0xfff; 4993 4994 /* APIC-write VM exit is trap-like and thus no need to adjust IP */ 4995 kvm_apic_write_nodecode(vcpu, offset); 4996 return 1; 4997 } 4998 4999 static int handle_task_switch(struct kvm_vcpu *vcpu) 5000 { 5001 struct vcpu_vmx *vmx = to_vmx(vcpu); 5002 unsigned long exit_qualification; 5003 bool has_error_code = false; 5004 u32 error_code = 0; 5005 u16 tss_selector; 5006 int reason, type, idt_v, idt_index; 5007 5008 idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK); 5009 idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK); 5010 type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK); 5011 5012 exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 5013 5014 reason = (u32)exit_qualification >> 30; 5015 if (reason == TASK_SWITCH_GATE && idt_v) { 5016 switch (type) { 5017 case INTR_TYPE_NMI_INTR: 5018 vcpu->arch.nmi_injected = false; 5019 vmx_set_nmi_mask(vcpu, true); 5020 break; 5021 case INTR_TYPE_EXT_INTR: 5022 case INTR_TYPE_SOFT_INTR: 5023 kvm_clear_interrupt_queue(vcpu); 5024 break; 5025 case INTR_TYPE_HARD_EXCEPTION: 5026 if (vmx->idt_vectoring_info & 5027 VECTORING_INFO_DELIVER_CODE_MASK) { 5028 has_error_code = true; 5029 error_code = 5030 vmcs_read32(IDT_VECTORING_ERROR_CODE); 5031 } 5032 /* fall through */ 5033 case INTR_TYPE_SOFT_EXCEPTION: 5034 kvm_clear_exception_queue(vcpu); 5035 break; 5036 default: 5037 break; 5038 } 5039 } 5040 tss_selector = exit_qualification; 5041 5042 if (!idt_v || (type != INTR_TYPE_HARD_EXCEPTION && 5043 type != INTR_TYPE_EXT_INTR && 5044 type != INTR_TYPE_NMI_INTR)) 5045 skip_emulated_instruction(vcpu); 5046 5047 if (kvm_task_switch(vcpu, tss_selector, 5048 type == INTR_TYPE_SOFT_INTR ? idt_index : -1, reason, 5049 has_error_code, error_code) == EMULATE_FAIL) { 5050 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 5051 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; 5052 vcpu->run->internal.ndata = 0; 5053 return 0; 5054 } 5055 5056 /* 5057 * TODO: What about debug traps on tss switch? 5058 * Are we supposed to inject them and update dr6? 5059 */ 5060 5061 return 1; 5062 } 5063 5064 static int handle_ept_violation(struct kvm_vcpu *vcpu) 5065 { 5066 unsigned long exit_qualification; 5067 gpa_t gpa; 5068 u64 error_code; 5069 5070 exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 5071 5072 /* 5073 * EPT violation happened while executing iret from NMI, 5074 * "blocked by NMI" bit has to be set before next VM entry. 5075 * There are errata that may cause this bit to not be set: 5076 * AAK134, BY25. 5077 */ 5078 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && 5079 enable_vnmi && 5080 (exit_qualification & INTR_INFO_UNBLOCK_NMI)) 5081 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); 5082 5083 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); 5084 trace_kvm_page_fault(gpa, exit_qualification); 5085 5086 /* Is it a read fault? */ 5087 error_code = (exit_qualification & EPT_VIOLATION_ACC_READ) 5088 ? PFERR_USER_MASK : 0; 5089 /* Is it a write fault? */ 5090 error_code |= (exit_qualification & EPT_VIOLATION_ACC_WRITE) 5091 ? PFERR_WRITE_MASK : 0; 5092 /* Is it a fetch fault? */ 5093 error_code |= (exit_qualification & EPT_VIOLATION_ACC_INSTR) 5094 ? PFERR_FETCH_MASK : 0; 5095 /* ept page table entry is present? */ 5096 error_code |= (exit_qualification & 5097 (EPT_VIOLATION_READABLE | EPT_VIOLATION_WRITABLE | 5098 EPT_VIOLATION_EXECUTABLE)) 5099 ? PFERR_PRESENT_MASK : 0; 5100 5101 error_code |= (exit_qualification & 0x100) != 0 ? 5102 PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK; 5103 5104 vcpu->arch.exit_qualification = exit_qualification; 5105 return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0); 5106 } 5107 5108 static int handle_ept_misconfig(struct kvm_vcpu *vcpu) 5109 { 5110 gpa_t gpa; 5111 5112 /* 5113 * A nested guest cannot optimize MMIO vmexits, because we have an 5114 * nGPA here instead of the required GPA. 5115 */ 5116 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); 5117 if (!is_guest_mode(vcpu) && 5118 !kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) { 5119 trace_kvm_fast_mmio(gpa); 5120 /* 5121 * Doing kvm_skip_emulated_instruction() depends on undefined 5122 * behavior: Intel's manual doesn't mandate 5123 * VM_EXIT_INSTRUCTION_LEN to be set in VMCS when EPT MISCONFIG 5124 * occurs and while on real hardware it was observed to be set, 5125 * other hypervisors (namely Hyper-V) don't set it, we end up 5126 * advancing IP with some random value. Disable fast mmio when 5127 * running nested and keep it for real hardware in hope that 5128 * VM_EXIT_INSTRUCTION_LEN will always be set correctly. 5129 */ 5130 if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) 5131 return kvm_skip_emulated_instruction(vcpu); 5132 else 5133 return kvm_emulate_instruction(vcpu, EMULTYPE_SKIP) == 5134 EMULATE_DONE; 5135 } 5136 5137 return kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0); 5138 } 5139 5140 static int handle_nmi_window(struct kvm_vcpu *vcpu) 5141 { 5142 WARN_ON_ONCE(!enable_vnmi); 5143 vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL, 5144 CPU_BASED_VIRTUAL_NMI_PENDING); 5145 ++vcpu->stat.nmi_window_exits; 5146 kvm_make_request(KVM_REQ_EVENT, vcpu); 5147 5148 return 1; 5149 } 5150 5151 static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) 5152 { 5153 struct vcpu_vmx *vmx = to_vmx(vcpu); 5154 enum emulation_result err = EMULATE_DONE; 5155 int ret = 1; 5156 u32 cpu_exec_ctrl; 5157 bool intr_window_requested; 5158 unsigned count = 130; 5159 5160 /* 5161 * We should never reach the point where we are emulating L2 5162 * due to invalid guest state as that means we incorrectly 5163 * allowed a nested VMEntry with an invalid vmcs12. 5164 */ 5165 WARN_ON_ONCE(vmx->emulation_required && vmx->nested.nested_run_pending); 5166 5167 cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); 5168 intr_window_requested = cpu_exec_ctrl & CPU_BASED_VIRTUAL_INTR_PENDING; 5169 5170 while (vmx->emulation_required && count-- != 0) { 5171 if (intr_window_requested && vmx_interrupt_allowed(vcpu)) 5172 return handle_interrupt_window(&vmx->vcpu); 5173 5174 if (kvm_test_request(KVM_REQ_EVENT, vcpu)) 5175 return 1; 5176 5177 err = kvm_emulate_instruction(vcpu, 0); 5178 5179 if (err == EMULATE_USER_EXIT) { 5180 ++vcpu->stat.mmio_exits; 5181 ret = 0; 5182 goto out; 5183 } 5184 5185 if (err != EMULATE_DONE) 5186 goto emulation_error; 5187 5188 if (vmx->emulation_required && !vmx->rmode.vm86_active && 5189 vcpu->arch.exception.pending) 5190 goto emulation_error; 5191 5192 if (vcpu->arch.halt_request) { 5193 vcpu->arch.halt_request = 0; 5194 ret = kvm_vcpu_halt(vcpu); 5195 goto out; 5196 } 5197 5198 if (signal_pending(current)) 5199 goto out; 5200 if (need_resched()) 5201 schedule(); 5202 } 5203 5204 out: 5205 return ret; 5206 5207 emulation_error: 5208 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 5209 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; 5210 vcpu->run->internal.ndata = 0; 5211 return 0; 5212 } 5213 5214 static void grow_ple_window(struct kvm_vcpu *vcpu) 5215 { 5216 struct vcpu_vmx *vmx = to_vmx(vcpu); 5217 int old = vmx->ple_window; 5218 5219 vmx->ple_window = __grow_ple_window(old, ple_window, 5220 ple_window_grow, 5221 ple_window_max); 5222 5223 if (vmx->ple_window != old) 5224 vmx->ple_window_dirty = true; 5225 5226 trace_kvm_ple_window_grow(vcpu->vcpu_id, vmx->ple_window, old); 5227 } 5228 5229 static void shrink_ple_window(struct kvm_vcpu *vcpu) 5230 { 5231 struct vcpu_vmx *vmx = to_vmx(vcpu); 5232 int old = vmx->ple_window; 5233 5234 vmx->ple_window = __shrink_ple_window(old, ple_window, 5235 ple_window_shrink, 5236 ple_window); 5237 5238 if (vmx->ple_window != old) 5239 vmx->ple_window_dirty = true; 5240 5241 trace_kvm_ple_window_shrink(vcpu->vcpu_id, vmx->ple_window, old); 5242 } 5243 5244 /* 5245 * Handler for POSTED_INTERRUPT_WAKEUP_VECTOR. 5246 */ 5247 static void wakeup_handler(void) 5248 { 5249 struct kvm_vcpu *vcpu; 5250 int cpu = smp_processor_id(); 5251 5252 spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); 5253 list_for_each_entry(vcpu, &per_cpu(blocked_vcpu_on_cpu, cpu), 5254 blocked_vcpu_list) { 5255 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); 5256 5257 if (pi_test_on(pi_desc) == 1) 5258 kvm_vcpu_kick(vcpu); 5259 } 5260 spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); 5261 } 5262 5263 static void vmx_enable_tdp(void) 5264 { 5265 kvm_mmu_set_mask_ptes(VMX_EPT_READABLE_MASK, 5266 enable_ept_ad_bits ? VMX_EPT_ACCESS_BIT : 0ull, 5267 enable_ept_ad_bits ? VMX_EPT_DIRTY_BIT : 0ull, 5268 0ull, VMX_EPT_EXECUTABLE_MASK, 5269 cpu_has_vmx_ept_execute_only() ? 0ull : VMX_EPT_READABLE_MASK, 5270 VMX_EPT_RWX_MASK, 0ull); 5271 5272 ept_set_mmio_spte_mask(); 5273 kvm_enable_tdp(); 5274 } 5275 5276 /* 5277 * Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE 5278 * exiting, so only get here on cpu with PAUSE-Loop-Exiting. 5279 */ 5280 static int handle_pause(struct kvm_vcpu *vcpu) 5281 { 5282 if (!kvm_pause_in_guest(vcpu->kvm)) 5283 grow_ple_window(vcpu); 5284 5285 /* 5286 * Intel sdm vol3 ch-25.1.3 says: The "PAUSE-loop exiting" 5287 * VM-execution control is ignored if CPL > 0. OTOH, KVM 5288 * never set PAUSE_EXITING and just set PLE if supported, 5289 * so the vcpu must be CPL=0 if it gets a PAUSE exit. 5290 */ 5291 kvm_vcpu_on_spin(vcpu, true); 5292 return kvm_skip_emulated_instruction(vcpu); 5293 } 5294 5295 static int handle_nop(struct kvm_vcpu *vcpu) 5296 { 5297 return kvm_skip_emulated_instruction(vcpu); 5298 } 5299 5300 static int handle_mwait(struct kvm_vcpu *vcpu) 5301 { 5302 printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n"); 5303 return handle_nop(vcpu); 5304 } 5305 5306 static int handle_invalid_op(struct kvm_vcpu *vcpu) 5307 { 5308 kvm_queue_exception(vcpu, UD_VECTOR); 5309 return 1; 5310 } 5311 5312 static int handle_monitor_trap(struct kvm_vcpu *vcpu) 5313 { 5314 return 1; 5315 } 5316 5317 static int handle_monitor(struct kvm_vcpu *vcpu) 5318 { 5319 printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n"); 5320 return handle_nop(vcpu); 5321 } 5322 5323 static int handle_invpcid(struct kvm_vcpu *vcpu) 5324 { 5325 u32 vmx_instruction_info; 5326 unsigned long type; 5327 bool pcid_enabled; 5328 gva_t gva; 5329 struct x86_exception e; 5330 unsigned i; 5331 unsigned long roots_to_free = 0; 5332 struct { 5333 u64 pcid; 5334 u64 gla; 5335 } operand; 5336 5337 if (!guest_cpuid_has(vcpu, X86_FEATURE_INVPCID)) { 5338 kvm_queue_exception(vcpu, UD_VECTOR); 5339 return 1; 5340 } 5341 5342 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 5343 type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); 5344 5345 if (type > 3) { 5346 kvm_inject_gp(vcpu, 0); 5347 return 1; 5348 } 5349 5350 /* According to the Intel instruction reference, the memory operand 5351 * is read even if it isn't needed (e.g., for type==all) 5352 */ 5353 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), 5354 vmx_instruction_info, false, &gva)) 5355 return 1; 5356 5357 if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) { 5358 kvm_inject_page_fault(vcpu, &e); 5359 return 1; 5360 } 5361 5362 if (operand.pcid >> 12 != 0) { 5363 kvm_inject_gp(vcpu, 0); 5364 return 1; 5365 } 5366 5367 pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE); 5368 5369 switch (type) { 5370 case INVPCID_TYPE_INDIV_ADDR: 5371 if ((!pcid_enabled && (operand.pcid != 0)) || 5372 is_noncanonical_address(operand.gla, vcpu)) { 5373 kvm_inject_gp(vcpu, 0); 5374 return 1; 5375 } 5376 kvm_mmu_invpcid_gva(vcpu, operand.gla, operand.pcid); 5377 return kvm_skip_emulated_instruction(vcpu); 5378 5379 case INVPCID_TYPE_SINGLE_CTXT: 5380 if (!pcid_enabled && (operand.pcid != 0)) { 5381 kvm_inject_gp(vcpu, 0); 5382 return 1; 5383 } 5384 5385 if (kvm_get_active_pcid(vcpu) == operand.pcid) { 5386 kvm_mmu_sync_roots(vcpu); 5387 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 5388 } 5389 5390 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) 5391 if (kvm_get_pcid(vcpu, vcpu->arch.mmu->prev_roots[i].cr3) 5392 == operand.pcid) 5393 roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i); 5394 5395 kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, roots_to_free); 5396 /* 5397 * If neither the current cr3 nor any of the prev_roots use the 5398 * given PCID, then nothing needs to be done here because a 5399 * resync will happen anyway before switching to any other CR3. 5400 */ 5401 5402 return kvm_skip_emulated_instruction(vcpu); 5403 5404 case INVPCID_TYPE_ALL_NON_GLOBAL: 5405 /* 5406 * Currently, KVM doesn't mark global entries in the shadow 5407 * page tables, so a non-global flush just degenerates to a 5408 * global flush. If needed, we could optimize this later by 5409 * keeping track of global entries in shadow page tables. 5410 */ 5411 5412 /* fall-through */ 5413 case INVPCID_TYPE_ALL_INCL_GLOBAL: 5414 kvm_mmu_unload(vcpu); 5415 return kvm_skip_emulated_instruction(vcpu); 5416 5417 default: 5418 BUG(); /* We have already checked above that type <= 3 */ 5419 } 5420 } 5421 5422 static int handle_pml_full(struct kvm_vcpu *vcpu) 5423 { 5424 unsigned long exit_qualification; 5425 5426 trace_kvm_pml_full(vcpu->vcpu_id); 5427 5428 exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 5429 5430 /* 5431 * PML buffer FULL happened while executing iret from NMI, 5432 * "blocked by NMI" bit has to be set before next VM entry. 5433 */ 5434 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && 5435 enable_vnmi && 5436 (exit_qualification & INTR_INFO_UNBLOCK_NMI)) 5437 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, 5438 GUEST_INTR_STATE_NMI); 5439 5440 /* 5441 * PML buffer already flushed at beginning of VMEXIT. Nothing to do 5442 * here.., and there's no userspace involvement needed for PML. 5443 */ 5444 return 1; 5445 } 5446 5447 static int handle_preemption_timer(struct kvm_vcpu *vcpu) 5448 { 5449 if (!to_vmx(vcpu)->req_immediate_exit) 5450 kvm_lapic_expired_hv_timer(vcpu); 5451 return 1; 5452 } 5453 5454 /* 5455 * When nested=0, all VMX instruction VM Exits filter here. The handlers 5456 * are overwritten by nested_vmx_setup() when nested=1. 5457 */ 5458 static int handle_vmx_instruction(struct kvm_vcpu *vcpu) 5459 { 5460 kvm_queue_exception(vcpu, UD_VECTOR); 5461 return 1; 5462 } 5463 5464 static int handle_encls(struct kvm_vcpu *vcpu) 5465 { 5466 /* 5467 * SGX virtualization is not yet supported. There is no software 5468 * enable bit for SGX, so we have to trap ENCLS and inject a #UD 5469 * to prevent the guest from executing ENCLS. 5470 */ 5471 kvm_queue_exception(vcpu, UD_VECTOR); 5472 return 1; 5473 } 5474 5475 /* 5476 * The exit handlers return 1 if the exit was handled fully and guest execution 5477 * may resume. Otherwise they set the kvm_run parameter to indicate what needs 5478 * to be done to userspace and return 0. 5479 */ 5480 static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { 5481 [EXIT_REASON_EXCEPTION_NMI] = handle_exception, 5482 [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, 5483 [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault, 5484 [EXIT_REASON_NMI_WINDOW] = handle_nmi_window, 5485 [EXIT_REASON_IO_INSTRUCTION] = handle_io, 5486 [EXIT_REASON_CR_ACCESS] = handle_cr, 5487 [EXIT_REASON_DR_ACCESS] = handle_dr, 5488 [EXIT_REASON_CPUID] = handle_cpuid, 5489 [EXIT_REASON_MSR_READ] = handle_rdmsr, 5490 [EXIT_REASON_MSR_WRITE] = handle_wrmsr, 5491 [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window, 5492 [EXIT_REASON_HLT] = handle_halt, 5493 [EXIT_REASON_INVD] = handle_invd, 5494 [EXIT_REASON_INVLPG] = handle_invlpg, 5495 [EXIT_REASON_RDPMC] = handle_rdpmc, 5496 [EXIT_REASON_VMCALL] = handle_vmcall, 5497 [EXIT_REASON_VMCLEAR] = handle_vmx_instruction, 5498 [EXIT_REASON_VMLAUNCH] = handle_vmx_instruction, 5499 [EXIT_REASON_VMPTRLD] = handle_vmx_instruction, 5500 [EXIT_REASON_VMPTRST] = handle_vmx_instruction, 5501 [EXIT_REASON_VMREAD] = handle_vmx_instruction, 5502 [EXIT_REASON_VMRESUME] = handle_vmx_instruction, 5503 [EXIT_REASON_VMWRITE] = handle_vmx_instruction, 5504 [EXIT_REASON_VMOFF] = handle_vmx_instruction, 5505 [EXIT_REASON_VMON] = handle_vmx_instruction, 5506 [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold, 5507 [EXIT_REASON_APIC_ACCESS] = handle_apic_access, 5508 [EXIT_REASON_APIC_WRITE] = handle_apic_write, 5509 [EXIT_REASON_EOI_INDUCED] = handle_apic_eoi_induced, 5510 [EXIT_REASON_WBINVD] = handle_wbinvd, 5511 [EXIT_REASON_XSETBV] = handle_xsetbv, 5512 [EXIT_REASON_TASK_SWITCH] = handle_task_switch, 5513 [EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check, 5514 [EXIT_REASON_GDTR_IDTR] = handle_desc, 5515 [EXIT_REASON_LDTR_TR] = handle_desc, 5516 [EXIT_REASON_EPT_VIOLATION] = handle_ept_violation, 5517 [EXIT_REASON_EPT_MISCONFIG] = handle_ept_misconfig, 5518 [EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause, 5519 [EXIT_REASON_MWAIT_INSTRUCTION] = handle_mwait, 5520 [EXIT_REASON_MONITOR_TRAP_FLAG] = handle_monitor_trap, 5521 [EXIT_REASON_MONITOR_INSTRUCTION] = handle_monitor, 5522 [EXIT_REASON_INVEPT] = handle_vmx_instruction, 5523 [EXIT_REASON_INVVPID] = handle_vmx_instruction, 5524 [EXIT_REASON_RDRAND] = handle_invalid_op, 5525 [EXIT_REASON_RDSEED] = handle_invalid_op, 5526 [EXIT_REASON_XSAVES] = handle_xsaves, 5527 [EXIT_REASON_XRSTORS] = handle_xrstors, 5528 [EXIT_REASON_PML_FULL] = handle_pml_full, 5529 [EXIT_REASON_INVPCID] = handle_invpcid, 5530 [EXIT_REASON_VMFUNC] = handle_vmx_instruction, 5531 [EXIT_REASON_PREEMPTION_TIMER] = handle_preemption_timer, 5532 [EXIT_REASON_ENCLS] = handle_encls, 5533 }; 5534 5535 static const int kvm_vmx_max_exit_handlers = 5536 ARRAY_SIZE(kvm_vmx_exit_handlers); 5537 5538 static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2) 5539 { 5540 *info1 = vmcs_readl(EXIT_QUALIFICATION); 5541 *info2 = vmcs_read32(VM_EXIT_INTR_INFO); 5542 } 5543 5544 static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx) 5545 { 5546 if (vmx->pml_pg) { 5547 __free_page(vmx->pml_pg); 5548 vmx->pml_pg = NULL; 5549 } 5550 } 5551 5552 static void vmx_flush_pml_buffer(struct kvm_vcpu *vcpu) 5553 { 5554 struct vcpu_vmx *vmx = to_vmx(vcpu); 5555 u64 *pml_buf; 5556 u16 pml_idx; 5557 5558 pml_idx = vmcs_read16(GUEST_PML_INDEX); 5559 5560 /* Do nothing if PML buffer is empty */ 5561 if (pml_idx == (PML_ENTITY_NUM - 1)) 5562 return; 5563 5564 /* PML index always points to next available PML buffer entity */ 5565 if (pml_idx >= PML_ENTITY_NUM) 5566 pml_idx = 0; 5567 else 5568 pml_idx++; 5569 5570 pml_buf = page_address(vmx->pml_pg); 5571 for (; pml_idx < PML_ENTITY_NUM; pml_idx++) { 5572 u64 gpa; 5573 5574 gpa = pml_buf[pml_idx]; 5575 WARN_ON(gpa & (PAGE_SIZE - 1)); 5576 kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT); 5577 } 5578 5579 /* reset PML index */ 5580 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); 5581 } 5582 5583 /* 5584 * Flush all vcpus' PML buffer and update logged GPAs to dirty_bitmap. 5585 * Called before reporting dirty_bitmap to userspace. 5586 */ 5587 static void kvm_flush_pml_buffers(struct kvm *kvm) 5588 { 5589 int i; 5590 struct kvm_vcpu *vcpu; 5591 /* 5592 * We only need to kick vcpu out of guest mode here, as PML buffer 5593 * is flushed at beginning of all VMEXITs, and it's obvious that only 5594 * vcpus running in guest are possible to have unflushed GPAs in PML 5595 * buffer. 5596 */ 5597 kvm_for_each_vcpu(i, vcpu, kvm) 5598 kvm_vcpu_kick(vcpu); 5599 } 5600 5601 static void vmx_dump_sel(char *name, uint32_t sel) 5602 { 5603 pr_err("%s sel=0x%04x, attr=0x%05x, limit=0x%08x, base=0x%016lx\n", 5604 name, vmcs_read16(sel), 5605 vmcs_read32(sel + GUEST_ES_AR_BYTES - GUEST_ES_SELECTOR), 5606 vmcs_read32(sel + GUEST_ES_LIMIT - GUEST_ES_SELECTOR), 5607 vmcs_readl(sel + GUEST_ES_BASE - GUEST_ES_SELECTOR)); 5608 } 5609 5610 static void vmx_dump_dtsel(char *name, uint32_t limit) 5611 { 5612 pr_err("%s limit=0x%08x, base=0x%016lx\n", 5613 name, vmcs_read32(limit), 5614 vmcs_readl(limit + GUEST_GDTR_BASE - GUEST_GDTR_LIMIT)); 5615 } 5616 5617 static void dump_vmcs(void) 5618 { 5619 u32 vmentry_ctl = vmcs_read32(VM_ENTRY_CONTROLS); 5620 u32 vmexit_ctl = vmcs_read32(VM_EXIT_CONTROLS); 5621 u32 cpu_based_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); 5622 u32 pin_based_exec_ctrl = vmcs_read32(PIN_BASED_VM_EXEC_CONTROL); 5623 u32 secondary_exec_control = 0; 5624 unsigned long cr4 = vmcs_readl(GUEST_CR4); 5625 u64 efer = vmcs_read64(GUEST_IA32_EFER); 5626 int i, n; 5627 5628 if (cpu_has_secondary_exec_ctrls()) 5629 secondary_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); 5630 5631 pr_err("*** Guest State ***\n"); 5632 pr_err("CR0: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n", 5633 vmcs_readl(GUEST_CR0), vmcs_readl(CR0_READ_SHADOW), 5634 vmcs_readl(CR0_GUEST_HOST_MASK)); 5635 pr_err("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n", 5636 cr4, vmcs_readl(CR4_READ_SHADOW), vmcs_readl(CR4_GUEST_HOST_MASK)); 5637 pr_err("CR3 = 0x%016lx\n", vmcs_readl(GUEST_CR3)); 5638 if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT) && 5639 (cr4 & X86_CR4_PAE) && !(efer & EFER_LMA)) 5640 { 5641 pr_err("PDPTR0 = 0x%016llx PDPTR1 = 0x%016llx\n", 5642 vmcs_read64(GUEST_PDPTR0), vmcs_read64(GUEST_PDPTR1)); 5643 pr_err("PDPTR2 = 0x%016llx PDPTR3 = 0x%016llx\n", 5644 vmcs_read64(GUEST_PDPTR2), vmcs_read64(GUEST_PDPTR3)); 5645 } 5646 pr_err("RSP = 0x%016lx RIP = 0x%016lx\n", 5647 vmcs_readl(GUEST_RSP), vmcs_readl(GUEST_RIP)); 5648 pr_err("RFLAGS=0x%08lx DR7 = 0x%016lx\n", 5649 vmcs_readl(GUEST_RFLAGS), vmcs_readl(GUEST_DR7)); 5650 pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n", 5651 vmcs_readl(GUEST_SYSENTER_ESP), 5652 vmcs_read32(GUEST_SYSENTER_CS), vmcs_readl(GUEST_SYSENTER_EIP)); 5653 vmx_dump_sel("CS: ", GUEST_CS_SELECTOR); 5654 vmx_dump_sel("DS: ", GUEST_DS_SELECTOR); 5655 vmx_dump_sel("SS: ", GUEST_SS_SELECTOR); 5656 vmx_dump_sel("ES: ", GUEST_ES_SELECTOR); 5657 vmx_dump_sel("FS: ", GUEST_FS_SELECTOR); 5658 vmx_dump_sel("GS: ", GUEST_GS_SELECTOR); 5659 vmx_dump_dtsel("GDTR:", GUEST_GDTR_LIMIT); 5660 vmx_dump_sel("LDTR:", GUEST_LDTR_SELECTOR); 5661 vmx_dump_dtsel("IDTR:", GUEST_IDTR_LIMIT); 5662 vmx_dump_sel("TR: ", GUEST_TR_SELECTOR); 5663 if ((vmexit_ctl & (VM_EXIT_SAVE_IA32_PAT | VM_EXIT_SAVE_IA32_EFER)) || 5664 (vmentry_ctl & (VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_IA32_EFER))) 5665 pr_err("EFER = 0x%016llx PAT = 0x%016llx\n", 5666 efer, vmcs_read64(GUEST_IA32_PAT)); 5667 pr_err("DebugCtl = 0x%016llx DebugExceptions = 0x%016lx\n", 5668 vmcs_read64(GUEST_IA32_DEBUGCTL), 5669 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS)); 5670 if (cpu_has_load_perf_global_ctrl() && 5671 vmentry_ctl & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) 5672 pr_err("PerfGlobCtl = 0x%016llx\n", 5673 vmcs_read64(GUEST_IA32_PERF_GLOBAL_CTRL)); 5674 if (vmentry_ctl & VM_ENTRY_LOAD_BNDCFGS) 5675 pr_err("BndCfgS = 0x%016llx\n", vmcs_read64(GUEST_BNDCFGS)); 5676 pr_err("Interruptibility = %08x ActivityState = %08x\n", 5677 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO), 5678 vmcs_read32(GUEST_ACTIVITY_STATE)); 5679 if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) 5680 pr_err("InterruptStatus = %04x\n", 5681 vmcs_read16(GUEST_INTR_STATUS)); 5682 5683 pr_err("*** Host State ***\n"); 5684 pr_err("RIP = 0x%016lx RSP = 0x%016lx\n", 5685 vmcs_readl(HOST_RIP), vmcs_readl(HOST_RSP)); 5686 pr_err("CS=%04x SS=%04x DS=%04x ES=%04x FS=%04x GS=%04x TR=%04x\n", 5687 vmcs_read16(HOST_CS_SELECTOR), vmcs_read16(HOST_SS_SELECTOR), 5688 vmcs_read16(HOST_DS_SELECTOR), vmcs_read16(HOST_ES_SELECTOR), 5689 vmcs_read16(HOST_FS_SELECTOR), vmcs_read16(HOST_GS_SELECTOR), 5690 vmcs_read16(HOST_TR_SELECTOR)); 5691 pr_err("FSBase=%016lx GSBase=%016lx TRBase=%016lx\n", 5692 vmcs_readl(HOST_FS_BASE), vmcs_readl(HOST_GS_BASE), 5693 vmcs_readl(HOST_TR_BASE)); 5694 pr_err("GDTBase=%016lx IDTBase=%016lx\n", 5695 vmcs_readl(HOST_GDTR_BASE), vmcs_readl(HOST_IDTR_BASE)); 5696 pr_err("CR0=%016lx CR3=%016lx CR4=%016lx\n", 5697 vmcs_readl(HOST_CR0), vmcs_readl(HOST_CR3), 5698 vmcs_readl(HOST_CR4)); 5699 pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n", 5700 vmcs_readl(HOST_IA32_SYSENTER_ESP), 5701 vmcs_read32(HOST_IA32_SYSENTER_CS), 5702 vmcs_readl(HOST_IA32_SYSENTER_EIP)); 5703 if (vmexit_ctl & (VM_EXIT_LOAD_IA32_PAT | VM_EXIT_LOAD_IA32_EFER)) 5704 pr_err("EFER = 0x%016llx PAT = 0x%016llx\n", 5705 vmcs_read64(HOST_IA32_EFER), 5706 vmcs_read64(HOST_IA32_PAT)); 5707 if (cpu_has_load_perf_global_ctrl() && 5708 vmexit_ctl & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) 5709 pr_err("PerfGlobCtl = 0x%016llx\n", 5710 vmcs_read64(HOST_IA32_PERF_GLOBAL_CTRL)); 5711 5712 pr_err("*** Control State ***\n"); 5713 pr_err("PinBased=%08x CPUBased=%08x SecondaryExec=%08x\n", 5714 pin_based_exec_ctrl, cpu_based_exec_ctrl, secondary_exec_control); 5715 pr_err("EntryControls=%08x ExitControls=%08x\n", vmentry_ctl, vmexit_ctl); 5716 pr_err("ExceptionBitmap=%08x PFECmask=%08x PFECmatch=%08x\n", 5717 vmcs_read32(EXCEPTION_BITMAP), 5718 vmcs_read32(PAGE_FAULT_ERROR_CODE_MASK), 5719 vmcs_read32(PAGE_FAULT_ERROR_CODE_MATCH)); 5720 pr_err("VMEntry: intr_info=%08x errcode=%08x ilen=%08x\n", 5721 vmcs_read32(VM_ENTRY_INTR_INFO_FIELD), 5722 vmcs_read32(VM_ENTRY_EXCEPTION_ERROR_CODE), 5723 vmcs_read32(VM_ENTRY_INSTRUCTION_LEN)); 5724 pr_err("VMExit: intr_info=%08x errcode=%08x ilen=%08x\n", 5725 vmcs_read32(VM_EXIT_INTR_INFO), 5726 vmcs_read32(VM_EXIT_INTR_ERROR_CODE), 5727 vmcs_read32(VM_EXIT_INSTRUCTION_LEN)); 5728 pr_err(" reason=%08x qualification=%016lx\n", 5729 vmcs_read32(VM_EXIT_REASON), vmcs_readl(EXIT_QUALIFICATION)); 5730 pr_err("IDTVectoring: info=%08x errcode=%08x\n", 5731 vmcs_read32(IDT_VECTORING_INFO_FIELD), 5732 vmcs_read32(IDT_VECTORING_ERROR_CODE)); 5733 pr_err("TSC Offset = 0x%016llx\n", vmcs_read64(TSC_OFFSET)); 5734 if (secondary_exec_control & SECONDARY_EXEC_TSC_SCALING) 5735 pr_err("TSC Multiplier = 0x%016llx\n", 5736 vmcs_read64(TSC_MULTIPLIER)); 5737 if (cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW) 5738 pr_err("TPR Threshold = 0x%02x\n", vmcs_read32(TPR_THRESHOLD)); 5739 if (pin_based_exec_ctrl & PIN_BASED_POSTED_INTR) 5740 pr_err("PostedIntrVec = 0x%02x\n", vmcs_read16(POSTED_INTR_NV)); 5741 if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT)) 5742 pr_err("EPT pointer = 0x%016llx\n", vmcs_read64(EPT_POINTER)); 5743 n = vmcs_read32(CR3_TARGET_COUNT); 5744 for (i = 0; i + 1 < n; i += 4) 5745 pr_err("CR3 target%u=%016lx target%u=%016lx\n", 5746 i, vmcs_readl(CR3_TARGET_VALUE0 + i * 2), 5747 i + 1, vmcs_readl(CR3_TARGET_VALUE0 + i * 2 + 2)); 5748 if (i < n) 5749 pr_err("CR3 target%u=%016lx\n", 5750 i, vmcs_readl(CR3_TARGET_VALUE0 + i * 2)); 5751 if (secondary_exec_control & SECONDARY_EXEC_PAUSE_LOOP_EXITING) 5752 pr_err("PLE Gap=%08x Window=%08x\n", 5753 vmcs_read32(PLE_GAP), vmcs_read32(PLE_WINDOW)); 5754 if (secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID) 5755 pr_err("Virtual processor ID = 0x%04x\n", 5756 vmcs_read16(VIRTUAL_PROCESSOR_ID)); 5757 } 5758 5759 /* 5760 * The guest has exited. See if we can fix it or if we need userspace 5761 * assistance. 5762 */ 5763 static int vmx_handle_exit(struct kvm_vcpu *vcpu) 5764 { 5765 struct vcpu_vmx *vmx = to_vmx(vcpu); 5766 u32 exit_reason = vmx->exit_reason; 5767 u32 vectoring_info = vmx->idt_vectoring_info; 5768 5769 trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX); 5770 5771 /* 5772 * Flush logged GPAs PML buffer, this will make dirty_bitmap more 5773 * updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before 5774 * querying dirty_bitmap, we only need to kick all vcpus out of guest 5775 * mode as if vcpus is in root mode, the PML buffer must has been 5776 * flushed already. 5777 */ 5778 if (enable_pml) 5779 vmx_flush_pml_buffer(vcpu); 5780 5781 /* If guest state is invalid, start emulating */ 5782 if (vmx->emulation_required) 5783 return handle_invalid_guest_state(vcpu); 5784 5785 if (is_guest_mode(vcpu) && nested_vmx_exit_reflected(vcpu, exit_reason)) 5786 return nested_vmx_reflect_vmexit(vcpu, exit_reason); 5787 5788 if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) { 5789 dump_vmcs(); 5790 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; 5791 vcpu->run->fail_entry.hardware_entry_failure_reason 5792 = exit_reason; 5793 return 0; 5794 } 5795 5796 if (unlikely(vmx->fail)) { 5797 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; 5798 vcpu->run->fail_entry.hardware_entry_failure_reason 5799 = vmcs_read32(VM_INSTRUCTION_ERROR); 5800 return 0; 5801 } 5802 5803 /* 5804 * Note: 5805 * Do not try to fix EXIT_REASON_EPT_MISCONFIG if it caused by 5806 * delivery event since it indicates guest is accessing MMIO. 5807 * The vm-exit can be triggered again after return to guest that 5808 * will cause infinite loop. 5809 */ 5810 if ((vectoring_info & VECTORING_INFO_VALID_MASK) && 5811 (exit_reason != EXIT_REASON_EXCEPTION_NMI && 5812 exit_reason != EXIT_REASON_EPT_VIOLATION && 5813 exit_reason != EXIT_REASON_PML_FULL && 5814 exit_reason != EXIT_REASON_TASK_SWITCH)) { 5815 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 5816 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV; 5817 vcpu->run->internal.ndata = 3; 5818 vcpu->run->internal.data[0] = vectoring_info; 5819 vcpu->run->internal.data[1] = exit_reason; 5820 vcpu->run->internal.data[2] = vcpu->arch.exit_qualification; 5821 if (exit_reason == EXIT_REASON_EPT_MISCONFIG) { 5822 vcpu->run->internal.ndata++; 5823 vcpu->run->internal.data[3] = 5824 vmcs_read64(GUEST_PHYSICAL_ADDRESS); 5825 } 5826 return 0; 5827 } 5828 5829 if (unlikely(!enable_vnmi && 5830 vmx->loaded_vmcs->soft_vnmi_blocked)) { 5831 if (vmx_interrupt_allowed(vcpu)) { 5832 vmx->loaded_vmcs->soft_vnmi_blocked = 0; 5833 } else if (vmx->loaded_vmcs->vnmi_blocked_time > 1000000000LL && 5834 vcpu->arch.nmi_pending) { 5835 /* 5836 * This CPU don't support us in finding the end of an 5837 * NMI-blocked window if the guest runs with IRQs 5838 * disabled. So we pull the trigger after 1 s of 5839 * futile waiting, but inform the user about this. 5840 */ 5841 printk(KERN_WARNING "%s: Breaking out of NMI-blocked " 5842 "state on VCPU %d after 1 s timeout\n", 5843 __func__, vcpu->vcpu_id); 5844 vmx->loaded_vmcs->soft_vnmi_blocked = 0; 5845 } 5846 } 5847 5848 if (exit_reason < kvm_vmx_max_exit_handlers 5849 && kvm_vmx_exit_handlers[exit_reason]) 5850 return kvm_vmx_exit_handlers[exit_reason](vcpu); 5851 else { 5852 vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n", 5853 exit_reason); 5854 kvm_queue_exception(vcpu, UD_VECTOR); 5855 return 1; 5856 } 5857 } 5858 5859 /* 5860 * Software based L1D cache flush which is used when microcode providing 5861 * the cache control MSR is not loaded. 5862 * 5863 * The L1D cache is 32 KiB on Nehalem and later microarchitectures, but to 5864 * flush it is required to read in 64 KiB because the replacement algorithm 5865 * is not exactly LRU. This could be sized at runtime via topology 5866 * information but as all relevant affected CPUs have 32KiB L1D cache size 5867 * there is no point in doing so. 5868 */ 5869 static void vmx_l1d_flush(struct kvm_vcpu *vcpu) 5870 { 5871 int size = PAGE_SIZE << L1D_CACHE_ORDER; 5872 5873 /* 5874 * This code is only executed when the the flush mode is 'cond' or 5875 * 'always' 5876 */ 5877 if (static_branch_likely(&vmx_l1d_flush_cond)) { 5878 bool flush_l1d; 5879 5880 /* 5881 * Clear the per-vcpu flush bit, it gets set again 5882 * either from vcpu_run() or from one of the unsafe 5883 * VMEXIT handlers. 5884 */ 5885 flush_l1d = vcpu->arch.l1tf_flush_l1d; 5886 vcpu->arch.l1tf_flush_l1d = false; 5887 5888 /* 5889 * Clear the per-cpu flush bit, it gets set again from 5890 * the interrupt handlers. 5891 */ 5892 flush_l1d |= kvm_get_cpu_l1tf_flush_l1d(); 5893 kvm_clear_cpu_l1tf_flush_l1d(); 5894 5895 if (!flush_l1d) 5896 return; 5897 } 5898 5899 vcpu->stat.l1d_flush++; 5900 5901 if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) { 5902 wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH); 5903 return; 5904 } 5905 5906 asm volatile( 5907 /* First ensure the pages are in the TLB */ 5908 "xorl %%eax, %%eax\n" 5909 ".Lpopulate_tlb:\n\t" 5910 "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t" 5911 "addl $4096, %%eax\n\t" 5912 "cmpl %%eax, %[size]\n\t" 5913 "jne .Lpopulate_tlb\n\t" 5914 "xorl %%eax, %%eax\n\t" 5915 "cpuid\n\t" 5916 /* Now fill the cache */ 5917 "xorl %%eax, %%eax\n" 5918 ".Lfill_cache:\n" 5919 "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t" 5920 "addl $64, %%eax\n\t" 5921 "cmpl %%eax, %[size]\n\t" 5922 "jne .Lfill_cache\n\t" 5923 "lfence\n" 5924 :: [flush_pages] "r" (vmx_l1d_flush_pages), 5925 [size] "r" (size) 5926 : "eax", "ebx", "ecx", "edx"); 5927 } 5928 5929 static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) 5930 { 5931 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 5932 5933 if (is_guest_mode(vcpu) && 5934 nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) 5935 return; 5936 5937 if (irr == -1 || tpr < irr) { 5938 vmcs_write32(TPR_THRESHOLD, 0); 5939 return; 5940 } 5941 5942 vmcs_write32(TPR_THRESHOLD, irr); 5943 } 5944 5945 void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu) 5946 { 5947 u32 sec_exec_control; 5948 5949 if (!lapic_in_kernel(vcpu)) 5950 return; 5951 5952 if (!flexpriority_enabled && 5953 !cpu_has_vmx_virtualize_x2apic_mode()) 5954 return; 5955 5956 /* Postpone execution until vmcs01 is the current VMCS. */ 5957 if (is_guest_mode(vcpu)) { 5958 to_vmx(vcpu)->nested.change_vmcs01_virtual_apic_mode = true; 5959 return; 5960 } 5961 5962 sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); 5963 sec_exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 5964 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE); 5965 5966 switch (kvm_get_apic_mode(vcpu)) { 5967 case LAPIC_MODE_INVALID: 5968 WARN_ONCE(true, "Invalid local APIC state"); 5969 case LAPIC_MODE_DISABLED: 5970 break; 5971 case LAPIC_MODE_XAPIC: 5972 if (flexpriority_enabled) { 5973 sec_exec_control |= 5974 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; 5975 vmx_flush_tlb(vcpu, true); 5976 } 5977 break; 5978 case LAPIC_MODE_X2APIC: 5979 if (cpu_has_vmx_virtualize_x2apic_mode()) 5980 sec_exec_control |= 5981 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; 5982 break; 5983 } 5984 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control); 5985 5986 vmx_update_msr_bitmap(vcpu); 5987 } 5988 5989 static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa) 5990 { 5991 if (!is_guest_mode(vcpu)) { 5992 vmcs_write64(APIC_ACCESS_ADDR, hpa); 5993 vmx_flush_tlb(vcpu, true); 5994 } 5995 } 5996 5997 static void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr) 5998 { 5999 u16 status; 6000 u8 old; 6001 6002 if (max_isr == -1) 6003 max_isr = 0; 6004 6005 status = vmcs_read16(GUEST_INTR_STATUS); 6006 old = status >> 8; 6007 if (max_isr != old) { 6008 status &= 0xff; 6009 status |= max_isr << 8; 6010 vmcs_write16(GUEST_INTR_STATUS, status); 6011 } 6012 } 6013 6014 static void vmx_set_rvi(int vector) 6015 { 6016 u16 status; 6017 u8 old; 6018 6019 if (vector == -1) 6020 vector = 0; 6021 6022 status = vmcs_read16(GUEST_INTR_STATUS); 6023 old = (u8)status & 0xff; 6024 if ((u8)vector != old) { 6025 status &= ~0xff; 6026 status |= (u8)vector; 6027 vmcs_write16(GUEST_INTR_STATUS, status); 6028 } 6029 } 6030 6031 static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr) 6032 { 6033 /* 6034 * When running L2, updating RVI is only relevant when 6035 * vmcs12 virtual-interrupt-delivery enabled. 6036 * However, it can be enabled only when L1 also 6037 * intercepts external-interrupts and in that case 6038 * we should not update vmcs02 RVI but instead intercept 6039 * interrupt. Therefore, do nothing when running L2. 6040 */ 6041 if (!is_guest_mode(vcpu)) 6042 vmx_set_rvi(max_irr); 6043 } 6044 6045 static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu) 6046 { 6047 struct vcpu_vmx *vmx = to_vmx(vcpu); 6048 int max_irr; 6049 bool max_irr_updated; 6050 6051 WARN_ON(!vcpu->arch.apicv_active); 6052 if (pi_test_on(&vmx->pi_desc)) { 6053 pi_clear_on(&vmx->pi_desc); 6054 /* 6055 * IOMMU can write to PIR.ON, so the barrier matters even on UP. 6056 * But on x86 this is just a compiler barrier anyway. 6057 */ 6058 smp_mb__after_atomic(); 6059 max_irr_updated = 6060 kvm_apic_update_irr(vcpu, vmx->pi_desc.pir, &max_irr); 6061 6062 /* 6063 * If we are running L2 and L1 has a new pending interrupt 6064 * which can be injected, we should re-evaluate 6065 * what should be done with this new L1 interrupt. 6066 * If L1 intercepts external-interrupts, we should 6067 * exit from L2 to L1. Otherwise, interrupt should be 6068 * delivered directly to L2. 6069 */ 6070 if (is_guest_mode(vcpu) && max_irr_updated) { 6071 if (nested_exit_on_intr(vcpu)) 6072 kvm_vcpu_exiting_guest_mode(vcpu); 6073 else 6074 kvm_make_request(KVM_REQ_EVENT, vcpu); 6075 } 6076 } else { 6077 max_irr = kvm_lapic_find_highest_irr(vcpu); 6078 } 6079 vmx_hwapic_irr_update(vcpu, max_irr); 6080 return max_irr; 6081 } 6082 6083 static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap) 6084 { 6085 if (!kvm_vcpu_apicv_active(vcpu)) 6086 return; 6087 6088 vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]); 6089 vmcs_write64(EOI_EXIT_BITMAP1, eoi_exit_bitmap[1]); 6090 vmcs_write64(EOI_EXIT_BITMAP2, eoi_exit_bitmap[2]); 6091 vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]); 6092 } 6093 6094 static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu) 6095 { 6096 struct vcpu_vmx *vmx = to_vmx(vcpu); 6097 6098 pi_clear_on(&vmx->pi_desc); 6099 memset(vmx->pi_desc.pir, 0, sizeof(vmx->pi_desc.pir)); 6100 } 6101 6102 static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx) 6103 { 6104 u32 exit_intr_info = 0; 6105 u16 basic_exit_reason = (u16)vmx->exit_reason; 6106 6107 if (!(basic_exit_reason == EXIT_REASON_MCE_DURING_VMENTRY 6108 || basic_exit_reason == EXIT_REASON_EXCEPTION_NMI)) 6109 return; 6110 6111 if (!(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) 6112 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); 6113 vmx->exit_intr_info = exit_intr_info; 6114 6115 /* if exit due to PF check for async PF */ 6116 if (is_page_fault(exit_intr_info)) 6117 vmx->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason(); 6118 6119 /* Handle machine checks before interrupts are enabled */ 6120 if (basic_exit_reason == EXIT_REASON_MCE_DURING_VMENTRY || 6121 is_machine_check(exit_intr_info)) 6122 kvm_machine_check(); 6123 6124 /* We need to handle NMIs before interrupts are enabled */ 6125 if (is_nmi(exit_intr_info)) { 6126 kvm_before_interrupt(&vmx->vcpu); 6127 asm("int $2"); 6128 kvm_after_interrupt(&vmx->vcpu); 6129 } 6130 } 6131 6132 static void vmx_handle_external_intr(struct kvm_vcpu *vcpu) 6133 { 6134 u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); 6135 6136 if ((exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK)) 6137 == (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR)) { 6138 unsigned int vector; 6139 unsigned long entry; 6140 gate_desc *desc; 6141 struct vcpu_vmx *vmx = to_vmx(vcpu); 6142 #ifdef CONFIG_X86_64 6143 unsigned long tmp; 6144 #endif 6145 6146 vector = exit_intr_info & INTR_INFO_VECTOR_MASK; 6147 desc = (gate_desc *)vmx->host_idt_base + vector; 6148 entry = gate_offset(desc); 6149 asm volatile( 6150 #ifdef CONFIG_X86_64 6151 "mov %%" _ASM_SP ", %[sp]\n\t" 6152 "and $0xfffffffffffffff0, %%" _ASM_SP "\n\t" 6153 "push $%c[ss]\n\t" 6154 "push %[sp]\n\t" 6155 #endif 6156 "pushf\n\t" 6157 __ASM_SIZE(push) " $%c[cs]\n\t" 6158 CALL_NOSPEC 6159 : 6160 #ifdef CONFIG_X86_64 6161 [sp]"=&r"(tmp), 6162 #endif 6163 ASM_CALL_CONSTRAINT 6164 : 6165 THUNK_TARGET(entry), 6166 [ss]"i"(__KERNEL_DS), 6167 [cs]"i"(__KERNEL_CS) 6168 ); 6169 } 6170 } 6171 STACK_FRAME_NON_STANDARD(vmx_handle_external_intr); 6172 6173 static bool vmx_has_emulated_msr(int index) 6174 { 6175 switch (index) { 6176 case MSR_IA32_SMBASE: 6177 /* 6178 * We cannot do SMM unless we can run the guest in big 6179 * real mode. 6180 */ 6181 return enable_unrestricted_guest || emulate_invalid_guest_state; 6182 case MSR_AMD64_VIRT_SPEC_CTRL: 6183 /* This is AMD only. */ 6184 return false; 6185 default: 6186 return true; 6187 } 6188 } 6189 6190 static bool vmx_pt_supported(void) 6191 { 6192 return pt_mode == PT_MODE_HOST_GUEST; 6193 } 6194 6195 static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx) 6196 { 6197 u32 exit_intr_info; 6198 bool unblock_nmi; 6199 u8 vector; 6200 bool idtv_info_valid; 6201 6202 idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK; 6203 6204 if (enable_vnmi) { 6205 if (vmx->loaded_vmcs->nmi_known_unmasked) 6206 return; 6207 /* 6208 * Can't use vmx->exit_intr_info since we're not sure what 6209 * the exit reason is. 6210 */ 6211 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); 6212 unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0; 6213 vector = exit_intr_info & INTR_INFO_VECTOR_MASK; 6214 /* 6215 * SDM 3: 27.7.1.2 (September 2008) 6216 * Re-set bit "block by NMI" before VM entry if vmexit caused by 6217 * a guest IRET fault. 6218 * SDM 3: 23.2.2 (September 2008) 6219 * Bit 12 is undefined in any of the following cases: 6220 * If the VM exit sets the valid bit in the IDT-vectoring 6221 * information field. 6222 * If the VM exit is due to a double fault. 6223 */ 6224 if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi && 6225 vector != DF_VECTOR && !idtv_info_valid) 6226 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, 6227 GUEST_INTR_STATE_NMI); 6228 else 6229 vmx->loaded_vmcs->nmi_known_unmasked = 6230 !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) 6231 & GUEST_INTR_STATE_NMI); 6232 } else if (unlikely(vmx->loaded_vmcs->soft_vnmi_blocked)) 6233 vmx->loaded_vmcs->vnmi_blocked_time += 6234 ktime_to_ns(ktime_sub(ktime_get(), 6235 vmx->loaded_vmcs->entry_time)); 6236 } 6237 6238 static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu, 6239 u32 idt_vectoring_info, 6240 int instr_len_field, 6241 int error_code_field) 6242 { 6243 u8 vector; 6244 int type; 6245 bool idtv_info_valid; 6246 6247 idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK; 6248 6249 vcpu->arch.nmi_injected = false; 6250 kvm_clear_exception_queue(vcpu); 6251 kvm_clear_interrupt_queue(vcpu); 6252 6253 if (!idtv_info_valid) 6254 return; 6255 6256 kvm_make_request(KVM_REQ_EVENT, vcpu); 6257 6258 vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK; 6259 type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK; 6260 6261 switch (type) { 6262 case INTR_TYPE_NMI_INTR: 6263 vcpu->arch.nmi_injected = true; 6264 /* 6265 * SDM 3: 27.7.1.2 (September 2008) 6266 * Clear bit "block by NMI" before VM entry if a NMI 6267 * delivery faulted. 6268 */ 6269 vmx_set_nmi_mask(vcpu, false); 6270 break; 6271 case INTR_TYPE_SOFT_EXCEPTION: 6272 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); 6273 /* fall through */ 6274 case INTR_TYPE_HARD_EXCEPTION: 6275 if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) { 6276 u32 err = vmcs_read32(error_code_field); 6277 kvm_requeue_exception_e(vcpu, vector, err); 6278 } else 6279 kvm_requeue_exception(vcpu, vector); 6280 break; 6281 case INTR_TYPE_SOFT_INTR: 6282 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); 6283 /* fall through */ 6284 case INTR_TYPE_EXT_INTR: 6285 kvm_queue_interrupt(vcpu, vector, type == INTR_TYPE_SOFT_INTR); 6286 break; 6287 default: 6288 break; 6289 } 6290 } 6291 6292 static void vmx_complete_interrupts(struct vcpu_vmx *vmx) 6293 { 6294 __vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info, 6295 VM_EXIT_INSTRUCTION_LEN, 6296 IDT_VECTORING_ERROR_CODE); 6297 } 6298 6299 static void vmx_cancel_injection(struct kvm_vcpu *vcpu) 6300 { 6301 __vmx_complete_interrupts(vcpu, 6302 vmcs_read32(VM_ENTRY_INTR_INFO_FIELD), 6303 VM_ENTRY_INSTRUCTION_LEN, 6304 VM_ENTRY_EXCEPTION_ERROR_CODE); 6305 6306 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); 6307 } 6308 6309 static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx) 6310 { 6311 int i, nr_msrs; 6312 struct perf_guest_switch_msr *msrs; 6313 6314 msrs = perf_guest_get_msrs(&nr_msrs); 6315 6316 if (!msrs) 6317 return; 6318 6319 for (i = 0; i < nr_msrs; i++) 6320 if (msrs[i].host == msrs[i].guest) 6321 clear_atomic_switch_msr(vmx, msrs[i].msr); 6322 else 6323 add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest, 6324 msrs[i].host, false); 6325 } 6326 6327 static void vmx_arm_hv_timer(struct vcpu_vmx *vmx, u32 val) 6328 { 6329 vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, val); 6330 if (!vmx->loaded_vmcs->hv_timer_armed) 6331 vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL, 6332 PIN_BASED_VMX_PREEMPTION_TIMER); 6333 vmx->loaded_vmcs->hv_timer_armed = true; 6334 } 6335 6336 static void vmx_update_hv_timer(struct kvm_vcpu *vcpu) 6337 { 6338 struct vcpu_vmx *vmx = to_vmx(vcpu); 6339 u64 tscl; 6340 u32 delta_tsc; 6341 6342 if (vmx->req_immediate_exit) { 6343 vmx_arm_hv_timer(vmx, 0); 6344 return; 6345 } 6346 6347 if (vmx->hv_deadline_tsc != -1) { 6348 tscl = rdtsc(); 6349 if (vmx->hv_deadline_tsc > tscl) 6350 /* set_hv_timer ensures the delta fits in 32-bits */ 6351 delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >> 6352 cpu_preemption_timer_multi); 6353 else 6354 delta_tsc = 0; 6355 6356 vmx_arm_hv_timer(vmx, delta_tsc); 6357 return; 6358 } 6359 6360 if (vmx->loaded_vmcs->hv_timer_armed) 6361 vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL, 6362 PIN_BASED_VMX_PREEMPTION_TIMER); 6363 vmx->loaded_vmcs->hv_timer_armed = false; 6364 } 6365 6366 static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx) 6367 { 6368 unsigned long evmcs_rsp; 6369 6370 vmx->__launched = vmx->loaded_vmcs->launched; 6371 6372 evmcs_rsp = static_branch_unlikely(&enable_evmcs) ? 6373 (unsigned long)¤t_evmcs->host_rsp : 0; 6374 6375 if (static_branch_unlikely(&vmx_l1d_should_flush)) 6376 vmx_l1d_flush(vcpu); 6377 6378 asm( 6379 /* Store host registers */ 6380 "push %%" _ASM_DX "; push %%" _ASM_BP ";" 6381 "push %%" _ASM_CX " \n\t" /* placeholder for guest rcx */ 6382 "push %%" _ASM_CX " \n\t" 6383 "sub $%c[wordsize], %%" _ASM_SP "\n\t" /* temporarily adjust RSP for CALL */ 6384 "cmp %%" _ASM_SP ", %c[host_rsp](%%" _ASM_CX ") \n\t" 6385 "je 1f \n\t" 6386 "mov %%" _ASM_SP ", %c[host_rsp](%%" _ASM_CX ") \n\t" 6387 /* Avoid VMWRITE when Enlightened VMCS is in use */ 6388 "test %%" _ASM_SI ", %%" _ASM_SI " \n\t" 6389 "jz 2f \n\t" 6390 "mov %%" _ASM_SP ", (%%" _ASM_SI ") \n\t" 6391 "jmp 1f \n\t" 6392 "2: \n\t" 6393 __ex("vmwrite %%" _ASM_SP ", %%" _ASM_DX) "\n\t" 6394 "1: \n\t" 6395 "add $%c[wordsize], %%" _ASM_SP "\n\t" /* un-adjust RSP */ 6396 6397 /* Reload cr2 if changed */ 6398 "mov %c[cr2](%%" _ASM_CX "), %%" _ASM_AX " \n\t" 6399 "mov %%cr2, %%" _ASM_DX " \n\t" 6400 "cmp %%" _ASM_AX ", %%" _ASM_DX " \n\t" 6401 "je 3f \n\t" 6402 "mov %%" _ASM_AX", %%cr2 \n\t" 6403 "3: \n\t" 6404 /* Check if vmlaunch or vmresume is needed */ 6405 "cmpl $0, %c[launched](%%" _ASM_CX ") \n\t" 6406 /* Load guest registers. Don't clobber flags. */ 6407 "mov %c[rax](%%" _ASM_CX "), %%" _ASM_AX " \n\t" 6408 "mov %c[rbx](%%" _ASM_CX "), %%" _ASM_BX " \n\t" 6409 "mov %c[rdx](%%" _ASM_CX "), %%" _ASM_DX " \n\t" 6410 "mov %c[rsi](%%" _ASM_CX "), %%" _ASM_SI " \n\t" 6411 "mov %c[rdi](%%" _ASM_CX "), %%" _ASM_DI " \n\t" 6412 "mov %c[rbp](%%" _ASM_CX "), %%" _ASM_BP " \n\t" 6413 #ifdef CONFIG_X86_64 6414 "mov %c[r8](%%" _ASM_CX "), %%r8 \n\t" 6415 "mov %c[r9](%%" _ASM_CX "), %%r9 \n\t" 6416 "mov %c[r10](%%" _ASM_CX "), %%r10 \n\t" 6417 "mov %c[r11](%%" _ASM_CX "), %%r11 \n\t" 6418 "mov %c[r12](%%" _ASM_CX "), %%r12 \n\t" 6419 "mov %c[r13](%%" _ASM_CX "), %%r13 \n\t" 6420 "mov %c[r14](%%" _ASM_CX "), %%r14 \n\t" 6421 "mov %c[r15](%%" _ASM_CX "), %%r15 \n\t" 6422 #endif 6423 /* Load guest RCX. This kills the vmx_vcpu pointer! */ 6424 "mov %c[rcx](%%" _ASM_CX "), %%" _ASM_CX " \n\t" 6425 6426 /* Enter guest mode */ 6427 "call vmx_vmenter\n\t" 6428 6429 /* Save guest's RCX to the stack placeholder (see above) */ 6430 "mov %%" _ASM_CX ", %c[wordsize](%%" _ASM_SP ") \n\t" 6431 6432 /* Load host's RCX, i.e. the vmx_vcpu pointer */ 6433 "pop %%" _ASM_CX " \n\t" 6434 6435 /* Set vmx->fail based on EFLAGS.{CF,ZF} */ 6436 "setbe %c[fail](%%" _ASM_CX ")\n\t" 6437 6438 /* Save all guest registers, including RCX from the stack */ 6439 "mov %%" _ASM_AX ", %c[rax](%%" _ASM_CX ") \n\t" 6440 "mov %%" _ASM_BX ", %c[rbx](%%" _ASM_CX ") \n\t" 6441 __ASM_SIZE(pop) " %c[rcx](%%" _ASM_CX ") \n\t" 6442 "mov %%" _ASM_DX ", %c[rdx](%%" _ASM_CX ") \n\t" 6443 "mov %%" _ASM_SI ", %c[rsi](%%" _ASM_CX ") \n\t" 6444 "mov %%" _ASM_DI ", %c[rdi](%%" _ASM_CX ") \n\t" 6445 "mov %%" _ASM_BP ", %c[rbp](%%" _ASM_CX ") \n\t" 6446 #ifdef CONFIG_X86_64 6447 "mov %%r8, %c[r8](%%" _ASM_CX ") \n\t" 6448 "mov %%r9, %c[r9](%%" _ASM_CX ") \n\t" 6449 "mov %%r10, %c[r10](%%" _ASM_CX ") \n\t" 6450 "mov %%r11, %c[r11](%%" _ASM_CX ") \n\t" 6451 "mov %%r12, %c[r12](%%" _ASM_CX ") \n\t" 6452 "mov %%r13, %c[r13](%%" _ASM_CX ") \n\t" 6453 "mov %%r14, %c[r14](%%" _ASM_CX ") \n\t" 6454 "mov %%r15, %c[r15](%%" _ASM_CX ") \n\t" 6455 /* 6456 * Clear host registers marked as clobbered to prevent 6457 * speculative use. 6458 */ 6459 "xor %%r8d, %%r8d \n\t" 6460 "xor %%r9d, %%r9d \n\t" 6461 "xor %%r10d, %%r10d \n\t" 6462 "xor %%r11d, %%r11d \n\t" 6463 "xor %%r12d, %%r12d \n\t" 6464 "xor %%r13d, %%r13d \n\t" 6465 "xor %%r14d, %%r14d \n\t" 6466 "xor %%r15d, %%r15d \n\t" 6467 #endif 6468 "mov %%cr2, %%" _ASM_AX " \n\t" 6469 "mov %%" _ASM_AX ", %c[cr2](%%" _ASM_CX ") \n\t" 6470 6471 "xor %%eax, %%eax \n\t" 6472 "xor %%ebx, %%ebx \n\t" 6473 "xor %%esi, %%esi \n\t" 6474 "xor %%edi, %%edi \n\t" 6475 "pop %%" _ASM_BP "; pop %%" _ASM_DX " \n\t" 6476 : ASM_CALL_CONSTRAINT 6477 : "c"(vmx), "d"((unsigned long)HOST_RSP), "S"(evmcs_rsp), 6478 [launched]"i"(offsetof(struct vcpu_vmx, __launched)), 6479 [fail]"i"(offsetof(struct vcpu_vmx, fail)), 6480 [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)), 6481 [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])), 6482 [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])), 6483 [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])), 6484 [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDX])), 6485 [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RSI])), 6486 [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDI])), 6487 [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBP])), 6488 #ifdef CONFIG_X86_64 6489 [r8]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R8])), 6490 [r9]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R9])), 6491 [r10]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R10])), 6492 [r11]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R11])), 6493 [r12]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R12])), 6494 [r13]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R13])), 6495 [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])), 6496 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])), 6497 #endif 6498 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)), 6499 [wordsize]"i"(sizeof(ulong)) 6500 : "cc", "memory" 6501 #ifdef CONFIG_X86_64 6502 , "rax", "rbx", "rdi" 6503 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" 6504 #else 6505 , "eax", "ebx", "edi" 6506 #endif 6507 ); 6508 } 6509 STACK_FRAME_NON_STANDARD(__vmx_vcpu_run); 6510 6511 static void vmx_vcpu_run(struct kvm_vcpu *vcpu) 6512 { 6513 struct vcpu_vmx *vmx = to_vmx(vcpu); 6514 unsigned long cr3, cr4; 6515 6516 /* Record the guest's net vcpu time for enforced NMI injections. */ 6517 if (unlikely(!enable_vnmi && 6518 vmx->loaded_vmcs->soft_vnmi_blocked)) 6519 vmx->loaded_vmcs->entry_time = ktime_get(); 6520 6521 /* Don't enter VMX if guest state is invalid, let the exit handler 6522 start emulation until we arrive back to a valid state */ 6523 if (vmx->emulation_required) 6524 return; 6525 6526 if (vmx->ple_window_dirty) { 6527 vmx->ple_window_dirty = false; 6528 vmcs_write32(PLE_WINDOW, vmx->ple_window); 6529 } 6530 6531 if (vmx->nested.need_vmcs12_sync) 6532 nested_sync_from_vmcs12(vcpu); 6533 6534 if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty)) 6535 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); 6536 if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty)) 6537 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); 6538 6539 cr3 = __get_current_cr3_fast(); 6540 if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) { 6541 vmcs_writel(HOST_CR3, cr3); 6542 vmx->loaded_vmcs->host_state.cr3 = cr3; 6543 } 6544 6545 cr4 = cr4_read_shadow(); 6546 if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) { 6547 vmcs_writel(HOST_CR4, cr4); 6548 vmx->loaded_vmcs->host_state.cr4 = cr4; 6549 } 6550 6551 /* When single-stepping over STI and MOV SS, we must clear the 6552 * corresponding interruptibility bits in the guest state. Otherwise 6553 * vmentry fails as it then expects bit 14 (BS) in pending debug 6554 * exceptions being set, but that's not correct for the guest debugging 6555 * case. */ 6556 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) 6557 vmx_set_interrupt_shadow(vcpu, 0); 6558 6559 if (static_cpu_has(X86_FEATURE_PKU) && 6560 kvm_read_cr4_bits(vcpu, X86_CR4_PKE) && 6561 vcpu->arch.pkru != vmx->host_pkru) 6562 __write_pkru(vcpu->arch.pkru); 6563 6564 pt_guest_enter(vmx); 6565 6566 atomic_switch_perf_msrs(vmx); 6567 6568 vmx_update_hv_timer(vcpu); 6569 6570 /* 6571 * If this vCPU has touched SPEC_CTRL, restore the guest's value if 6572 * it's non-zero. Since vmentry is serialising on affected CPUs, there 6573 * is no need to worry about the conditional branch over the wrmsr 6574 * being speculatively taken. 6575 */ 6576 x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0); 6577 6578 __vmx_vcpu_run(vcpu, vmx); 6579 6580 /* 6581 * We do not use IBRS in the kernel. If this vCPU has used the 6582 * SPEC_CTRL MSR it may have left it on; save the value and 6583 * turn it off. This is much more efficient than blindly adding 6584 * it to the atomic save/restore list. Especially as the former 6585 * (Saving guest MSRs on vmexit) doesn't even exist in KVM. 6586 * 6587 * For non-nested case: 6588 * If the L01 MSR bitmap does not intercept the MSR, then we need to 6589 * save it. 6590 * 6591 * For nested case: 6592 * If the L02 MSR bitmap does not intercept the MSR, then we need to 6593 * save it. 6594 */ 6595 if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) 6596 vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); 6597 6598 x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0); 6599 6600 /* Eliminate branch target predictions from guest mode */ 6601 vmexit_fill_RSB(); 6602 6603 /* All fields are clean at this point */ 6604 if (static_branch_unlikely(&enable_evmcs)) 6605 current_evmcs->hv_clean_fields |= 6606 HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; 6607 6608 /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */ 6609 if (vmx->host_debugctlmsr) 6610 update_debugctlmsr(vmx->host_debugctlmsr); 6611 6612 #ifndef CONFIG_X86_64 6613 /* 6614 * The sysexit path does not restore ds/es, so we must set them to 6615 * a reasonable value ourselves. 6616 * 6617 * We can't defer this to vmx_prepare_switch_to_host() since that 6618 * function may be executed in interrupt context, which saves and 6619 * restore segments around it, nullifying its effect. 6620 */ 6621 loadsegment(ds, __USER_DS); 6622 loadsegment(es, __USER_DS); 6623 #endif 6624 6625 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP) 6626 | (1 << VCPU_EXREG_RFLAGS) 6627 | (1 << VCPU_EXREG_PDPTR) 6628 | (1 << VCPU_EXREG_SEGMENTS) 6629 | (1 << VCPU_EXREG_CR3)); 6630 vcpu->arch.regs_dirty = 0; 6631 6632 pt_guest_exit(vmx); 6633 6634 /* 6635 * eager fpu is enabled if PKEY is supported and CR4 is switched 6636 * back on host, so it is safe to read guest PKRU from current 6637 * XSAVE. 6638 */ 6639 if (static_cpu_has(X86_FEATURE_PKU) && 6640 kvm_read_cr4_bits(vcpu, X86_CR4_PKE)) { 6641 vcpu->arch.pkru = __read_pkru(); 6642 if (vcpu->arch.pkru != vmx->host_pkru) 6643 __write_pkru(vmx->host_pkru); 6644 } 6645 6646 vmx->nested.nested_run_pending = 0; 6647 vmx->idt_vectoring_info = 0; 6648 6649 vmx->exit_reason = vmx->fail ? 0xdead : vmcs_read32(VM_EXIT_REASON); 6650 if (vmx->fail || (vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) 6651 return; 6652 6653 vmx->loaded_vmcs->launched = 1; 6654 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); 6655 6656 vmx_complete_atomic_exit(vmx); 6657 vmx_recover_nmi_blocking(vmx); 6658 vmx_complete_interrupts(vmx); 6659 } 6660 6661 static struct kvm *vmx_vm_alloc(void) 6662 { 6663 struct kvm_vmx *kvm_vmx = vzalloc(sizeof(struct kvm_vmx)); 6664 return &kvm_vmx->kvm; 6665 } 6666 6667 static void vmx_vm_free(struct kvm *kvm) 6668 { 6669 vfree(to_kvm_vmx(kvm)); 6670 } 6671 6672 static void vmx_free_vcpu(struct kvm_vcpu *vcpu) 6673 { 6674 struct vcpu_vmx *vmx = to_vmx(vcpu); 6675 6676 if (enable_pml) 6677 vmx_destroy_pml_buffer(vmx); 6678 free_vpid(vmx->vpid); 6679 leave_guest_mode(vcpu); 6680 nested_vmx_free_vcpu(vcpu); 6681 free_loaded_vmcs(vmx->loaded_vmcs); 6682 kfree(vmx->guest_msrs); 6683 kvm_vcpu_uninit(vcpu); 6684 kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.guest_fpu); 6685 kmem_cache_free(kvm_vcpu_cache, vmx); 6686 } 6687 6688 static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) 6689 { 6690 int err; 6691 struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); 6692 unsigned long *msr_bitmap; 6693 int cpu; 6694 6695 if (!vmx) 6696 return ERR_PTR(-ENOMEM); 6697 6698 vmx->vcpu.arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache, GFP_KERNEL); 6699 if (!vmx->vcpu.arch.guest_fpu) { 6700 printk(KERN_ERR "kvm: failed to allocate vcpu's fpu\n"); 6701 err = -ENOMEM; 6702 goto free_partial_vcpu; 6703 } 6704 6705 vmx->vpid = allocate_vpid(); 6706 6707 err = kvm_vcpu_init(&vmx->vcpu, kvm, id); 6708 if (err) 6709 goto free_vcpu; 6710 6711 err = -ENOMEM; 6712 6713 /* 6714 * If PML is turned on, failure on enabling PML just results in failure 6715 * of creating the vcpu, therefore we can simplify PML logic (by 6716 * avoiding dealing with cases, such as enabling PML partially on vcpus 6717 * for the guest, etc. 6718 */ 6719 if (enable_pml) { 6720 vmx->pml_pg = alloc_page(GFP_KERNEL | __GFP_ZERO); 6721 if (!vmx->pml_pg) 6722 goto uninit_vcpu; 6723 } 6724 6725 vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); 6726 BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) * sizeof(vmx->guest_msrs[0]) 6727 > PAGE_SIZE); 6728 6729 if (!vmx->guest_msrs) 6730 goto free_pml; 6731 6732 err = alloc_loaded_vmcs(&vmx->vmcs01); 6733 if (err < 0) 6734 goto free_msrs; 6735 6736 msr_bitmap = vmx->vmcs01.msr_bitmap; 6737 vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_TSC, MSR_TYPE_R); 6738 vmx_disable_intercept_for_msr(msr_bitmap, MSR_FS_BASE, MSR_TYPE_RW); 6739 vmx_disable_intercept_for_msr(msr_bitmap, MSR_GS_BASE, MSR_TYPE_RW); 6740 vmx_disable_intercept_for_msr(msr_bitmap, MSR_KERNEL_GS_BASE, MSR_TYPE_RW); 6741 vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW); 6742 vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW); 6743 vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW); 6744 vmx->msr_bitmap_mode = 0; 6745 6746 vmx->loaded_vmcs = &vmx->vmcs01; 6747 cpu = get_cpu(); 6748 vmx_vcpu_load(&vmx->vcpu, cpu); 6749 vmx->vcpu.cpu = cpu; 6750 vmx_vcpu_setup(vmx); 6751 vmx_vcpu_put(&vmx->vcpu); 6752 put_cpu(); 6753 if (cpu_need_virtualize_apic_accesses(&vmx->vcpu)) { 6754 err = alloc_apic_access_page(kvm); 6755 if (err) 6756 goto free_vmcs; 6757 } 6758 6759 if (enable_ept && !enable_unrestricted_guest) { 6760 err = init_rmode_identity_map(kvm); 6761 if (err) 6762 goto free_vmcs; 6763 } 6764 6765 if (nested) 6766 nested_vmx_setup_ctls_msrs(&vmx->nested.msrs, 6767 vmx_capability.ept, 6768 kvm_vcpu_apicv_active(&vmx->vcpu)); 6769 else 6770 memset(&vmx->nested.msrs, 0, sizeof(vmx->nested.msrs)); 6771 6772 vmx->nested.posted_intr_nv = -1; 6773 vmx->nested.current_vmptr = -1ull; 6774 6775 vmx->msr_ia32_feature_control_valid_bits = FEATURE_CONTROL_LOCKED; 6776 6777 /* 6778 * Enforce invariant: pi_desc.nv is always either POSTED_INTR_VECTOR 6779 * or POSTED_INTR_WAKEUP_VECTOR. 6780 */ 6781 vmx->pi_desc.nv = POSTED_INTR_VECTOR; 6782 vmx->pi_desc.sn = 1; 6783 6784 vmx->ept_pointer = INVALID_PAGE; 6785 6786 return &vmx->vcpu; 6787 6788 free_vmcs: 6789 free_loaded_vmcs(vmx->loaded_vmcs); 6790 free_msrs: 6791 kfree(vmx->guest_msrs); 6792 free_pml: 6793 vmx_destroy_pml_buffer(vmx); 6794 uninit_vcpu: 6795 kvm_vcpu_uninit(&vmx->vcpu); 6796 free_vcpu: 6797 free_vpid(vmx->vpid); 6798 kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.guest_fpu); 6799 free_partial_vcpu: 6800 kmem_cache_free(kvm_vcpu_cache, vmx); 6801 return ERR_PTR(err); 6802 } 6803 6804 #define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html for details.\n" 6805 #define L1TF_MSG_L1D "L1TF CPU bug present and virtualization mitigation disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html for details.\n" 6806 6807 static int vmx_vm_init(struct kvm *kvm) 6808 { 6809 spin_lock_init(&to_kvm_vmx(kvm)->ept_pointer_lock); 6810 6811 if (!ple_gap) 6812 kvm->arch.pause_in_guest = true; 6813 6814 if (boot_cpu_has(X86_BUG_L1TF) && enable_ept) { 6815 switch (l1tf_mitigation) { 6816 case L1TF_MITIGATION_OFF: 6817 case L1TF_MITIGATION_FLUSH_NOWARN: 6818 /* 'I explicitly don't care' is set */ 6819 break; 6820 case L1TF_MITIGATION_FLUSH: 6821 case L1TF_MITIGATION_FLUSH_NOSMT: 6822 case L1TF_MITIGATION_FULL: 6823 /* 6824 * Warn upon starting the first VM in a potentially 6825 * insecure environment. 6826 */ 6827 if (sched_smt_active()) 6828 pr_warn_once(L1TF_MSG_SMT); 6829 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER) 6830 pr_warn_once(L1TF_MSG_L1D); 6831 break; 6832 case L1TF_MITIGATION_FULL_FORCE: 6833 /* Flush is enforced */ 6834 break; 6835 } 6836 } 6837 return 0; 6838 } 6839 6840 static void __init vmx_check_processor_compat(void *rtn) 6841 { 6842 struct vmcs_config vmcs_conf; 6843 struct vmx_capability vmx_cap; 6844 6845 *(int *)rtn = 0; 6846 if (setup_vmcs_config(&vmcs_conf, &vmx_cap) < 0) 6847 *(int *)rtn = -EIO; 6848 if (nested) 6849 nested_vmx_setup_ctls_msrs(&vmcs_conf.nested, vmx_cap.ept, 6850 enable_apicv); 6851 if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) { 6852 printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n", 6853 smp_processor_id()); 6854 *(int *)rtn = -EIO; 6855 } 6856 } 6857 6858 static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) 6859 { 6860 u8 cache; 6861 u64 ipat = 0; 6862 6863 /* For VT-d and EPT combination 6864 * 1. MMIO: always map as UC 6865 * 2. EPT with VT-d: 6866 * a. VT-d without snooping control feature: can't guarantee the 6867 * result, try to trust guest. 6868 * b. VT-d with snooping control feature: snooping control feature of 6869 * VT-d engine can guarantee the cache correctness. Just set it 6870 * to WB to keep consistent with host. So the same as item 3. 6871 * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep 6872 * consistent with host MTRR 6873 */ 6874 if (is_mmio) { 6875 cache = MTRR_TYPE_UNCACHABLE; 6876 goto exit; 6877 } 6878 6879 if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) { 6880 ipat = VMX_EPT_IPAT_BIT; 6881 cache = MTRR_TYPE_WRBACK; 6882 goto exit; 6883 } 6884 6885 if (kvm_read_cr0(vcpu) & X86_CR0_CD) { 6886 ipat = VMX_EPT_IPAT_BIT; 6887 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) 6888 cache = MTRR_TYPE_WRBACK; 6889 else 6890 cache = MTRR_TYPE_UNCACHABLE; 6891 goto exit; 6892 } 6893 6894 cache = kvm_mtrr_get_guest_memory_type(vcpu, gfn); 6895 6896 exit: 6897 return (cache << VMX_EPT_MT_EPTE_SHIFT) | ipat; 6898 } 6899 6900 static int vmx_get_lpage_level(void) 6901 { 6902 if (enable_ept && !cpu_has_vmx_ept_1g_page()) 6903 return PT_DIRECTORY_LEVEL; 6904 else 6905 /* For shadow and EPT supported 1GB page */ 6906 return PT_PDPE_LEVEL; 6907 } 6908 6909 static void vmcs_set_secondary_exec_control(u32 new_ctl) 6910 { 6911 /* 6912 * These bits in the secondary execution controls field 6913 * are dynamic, the others are mostly based on the hypervisor 6914 * architecture and the guest's CPUID. Do not touch the 6915 * dynamic bits. 6916 */ 6917 u32 mask = 6918 SECONDARY_EXEC_SHADOW_VMCS | 6919 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 6920 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 6921 SECONDARY_EXEC_DESC; 6922 6923 u32 cur_ctl = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); 6924 6925 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, 6926 (new_ctl & ~mask) | (cur_ctl & mask)); 6927 } 6928 6929 /* 6930 * Generate MSR_IA32_VMX_CR{0,4}_FIXED1 according to CPUID. Only set bits 6931 * (indicating "allowed-1") if they are supported in the guest's CPUID. 6932 */ 6933 static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu) 6934 { 6935 struct vcpu_vmx *vmx = to_vmx(vcpu); 6936 struct kvm_cpuid_entry2 *entry; 6937 6938 vmx->nested.msrs.cr0_fixed1 = 0xffffffff; 6939 vmx->nested.msrs.cr4_fixed1 = X86_CR4_PCE; 6940 6941 #define cr4_fixed1_update(_cr4_mask, _reg, _cpuid_mask) do { \ 6942 if (entry && (entry->_reg & (_cpuid_mask))) \ 6943 vmx->nested.msrs.cr4_fixed1 |= (_cr4_mask); \ 6944 } while (0) 6945 6946 entry = kvm_find_cpuid_entry(vcpu, 0x1, 0); 6947 cr4_fixed1_update(X86_CR4_VME, edx, bit(X86_FEATURE_VME)); 6948 cr4_fixed1_update(X86_CR4_PVI, edx, bit(X86_FEATURE_VME)); 6949 cr4_fixed1_update(X86_CR4_TSD, edx, bit(X86_FEATURE_TSC)); 6950 cr4_fixed1_update(X86_CR4_DE, edx, bit(X86_FEATURE_DE)); 6951 cr4_fixed1_update(X86_CR4_PSE, edx, bit(X86_FEATURE_PSE)); 6952 cr4_fixed1_update(X86_CR4_PAE, edx, bit(X86_FEATURE_PAE)); 6953 cr4_fixed1_update(X86_CR4_MCE, edx, bit(X86_FEATURE_MCE)); 6954 cr4_fixed1_update(X86_CR4_PGE, edx, bit(X86_FEATURE_PGE)); 6955 cr4_fixed1_update(X86_CR4_OSFXSR, edx, bit(X86_FEATURE_FXSR)); 6956 cr4_fixed1_update(X86_CR4_OSXMMEXCPT, edx, bit(X86_FEATURE_XMM)); 6957 cr4_fixed1_update(X86_CR4_VMXE, ecx, bit(X86_FEATURE_VMX)); 6958 cr4_fixed1_update(X86_CR4_SMXE, ecx, bit(X86_FEATURE_SMX)); 6959 cr4_fixed1_update(X86_CR4_PCIDE, ecx, bit(X86_FEATURE_PCID)); 6960 cr4_fixed1_update(X86_CR4_OSXSAVE, ecx, bit(X86_FEATURE_XSAVE)); 6961 6962 entry = kvm_find_cpuid_entry(vcpu, 0x7, 0); 6963 cr4_fixed1_update(X86_CR4_FSGSBASE, ebx, bit(X86_FEATURE_FSGSBASE)); 6964 cr4_fixed1_update(X86_CR4_SMEP, ebx, bit(X86_FEATURE_SMEP)); 6965 cr4_fixed1_update(X86_CR4_SMAP, ebx, bit(X86_FEATURE_SMAP)); 6966 cr4_fixed1_update(X86_CR4_PKE, ecx, bit(X86_FEATURE_PKU)); 6967 cr4_fixed1_update(X86_CR4_UMIP, ecx, bit(X86_FEATURE_UMIP)); 6968 6969 #undef cr4_fixed1_update 6970 } 6971 6972 static void nested_vmx_entry_exit_ctls_update(struct kvm_vcpu *vcpu) 6973 { 6974 struct vcpu_vmx *vmx = to_vmx(vcpu); 6975 6976 if (kvm_mpx_supported()) { 6977 bool mpx_enabled = guest_cpuid_has(vcpu, X86_FEATURE_MPX); 6978 6979 if (mpx_enabled) { 6980 vmx->nested.msrs.entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS; 6981 vmx->nested.msrs.exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS; 6982 } else { 6983 vmx->nested.msrs.entry_ctls_high &= ~VM_ENTRY_LOAD_BNDCFGS; 6984 vmx->nested.msrs.exit_ctls_high &= ~VM_EXIT_CLEAR_BNDCFGS; 6985 } 6986 } 6987 } 6988 6989 static void update_intel_pt_cfg(struct kvm_vcpu *vcpu) 6990 { 6991 struct vcpu_vmx *vmx = to_vmx(vcpu); 6992 struct kvm_cpuid_entry2 *best = NULL; 6993 int i; 6994 6995 for (i = 0; i < PT_CPUID_LEAVES; i++) { 6996 best = kvm_find_cpuid_entry(vcpu, 0x14, i); 6997 if (!best) 6998 return; 6999 vmx->pt_desc.caps[CPUID_EAX + i*PT_CPUID_REGS_NUM] = best->eax; 7000 vmx->pt_desc.caps[CPUID_EBX + i*PT_CPUID_REGS_NUM] = best->ebx; 7001 vmx->pt_desc.caps[CPUID_ECX + i*PT_CPUID_REGS_NUM] = best->ecx; 7002 vmx->pt_desc.caps[CPUID_EDX + i*PT_CPUID_REGS_NUM] = best->edx; 7003 } 7004 7005 /* Get the number of configurable Address Ranges for filtering */ 7006 vmx->pt_desc.addr_range = intel_pt_validate_cap(vmx->pt_desc.caps, 7007 PT_CAP_num_address_ranges); 7008 7009 /* Initialize and clear the no dependency bits */ 7010 vmx->pt_desc.ctl_bitmask = ~(RTIT_CTL_TRACEEN | RTIT_CTL_OS | 7011 RTIT_CTL_USR | RTIT_CTL_TSC_EN | RTIT_CTL_DISRETC); 7012 7013 /* 7014 * If CPUID.(EAX=14H,ECX=0):EBX[0]=1 CR3Filter can be set otherwise 7015 * will inject an #GP 7016 */ 7017 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_cr3_filtering)) 7018 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_CR3EN; 7019 7020 /* 7021 * If CPUID.(EAX=14H,ECX=0):EBX[1]=1 CYCEn, CycThresh and 7022 * PSBFreq can be set 7023 */ 7024 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc)) 7025 vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_CYCLEACC | 7026 RTIT_CTL_CYC_THRESH | RTIT_CTL_PSB_FREQ); 7027 7028 /* 7029 * If CPUID.(EAX=14H,ECX=0):EBX[3]=1 MTCEn BranchEn and 7030 * MTCFreq can be set 7031 */ 7032 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc)) 7033 vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_MTC_EN | 7034 RTIT_CTL_BRANCH_EN | RTIT_CTL_MTC_RANGE); 7035 7036 /* If CPUID.(EAX=14H,ECX=0):EBX[4]=1 FUPonPTW and PTWEn can be set */ 7037 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_ptwrite)) 7038 vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_FUP_ON_PTW | 7039 RTIT_CTL_PTW_EN); 7040 7041 /* If CPUID.(EAX=14H,ECX=0):EBX[5]=1 PwrEvEn can be set */ 7042 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_power_event_trace)) 7043 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_PWR_EVT_EN; 7044 7045 /* If CPUID.(EAX=14H,ECX=0):ECX[0]=1 ToPA can be set */ 7046 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_topa_output)) 7047 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_TOPA; 7048 7049 /* If CPUID.(EAX=14H,ECX=0):ECX[3]=1 FabircEn can be set */ 7050 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_output_subsys)) 7051 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_FABRIC_EN; 7052 7053 /* unmask address range configure area */ 7054 for (i = 0; i < vmx->pt_desc.addr_range; i++) 7055 vmx->pt_desc.ctl_bitmask &= ~(0xfULL << (32 + i * 4)); 7056 } 7057 7058 static void vmx_cpuid_update(struct kvm_vcpu *vcpu) 7059 { 7060 struct vcpu_vmx *vmx = to_vmx(vcpu); 7061 7062 if (cpu_has_secondary_exec_ctrls()) { 7063 vmx_compute_secondary_exec_control(vmx); 7064 vmcs_set_secondary_exec_control(vmx->secondary_exec_control); 7065 } 7066 7067 if (nested_vmx_allowed(vcpu)) 7068 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |= 7069 FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; 7070 else 7071 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &= 7072 ~FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; 7073 7074 if (nested_vmx_allowed(vcpu)) { 7075 nested_vmx_cr_fixed1_bits_update(vcpu); 7076 nested_vmx_entry_exit_ctls_update(vcpu); 7077 } 7078 7079 if (boot_cpu_has(X86_FEATURE_INTEL_PT) && 7080 guest_cpuid_has(vcpu, X86_FEATURE_INTEL_PT)) 7081 update_intel_pt_cfg(vcpu); 7082 } 7083 7084 static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry) 7085 { 7086 if (func == 1 && nested) 7087 entry->ecx |= bit(X86_FEATURE_VMX); 7088 } 7089 7090 static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu) 7091 { 7092 to_vmx(vcpu)->req_immediate_exit = true; 7093 } 7094 7095 static int vmx_check_intercept(struct kvm_vcpu *vcpu, 7096 struct x86_instruction_info *info, 7097 enum x86_intercept_stage stage) 7098 { 7099 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 7100 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; 7101 7102 /* 7103 * RDPID causes #UD if disabled through secondary execution controls. 7104 * Because it is marked as EmulateOnUD, we need to intercept it here. 7105 */ 7106 if (info->intercept == x86_intercept_rdtscp && 7107 !nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDTSCP)) { 7108 ctxt->exception.vector = UD_VECTOR; 7109 ctxt->exception.error_code_valid = false; 7110 return X86EMUL_PROPAGATE_FAULT; 7111 } 7112 7113 /* TODO: check more intercepts... */ 7114 return X86EMUL_CONTINUE; 7115 } 7116 7117 #ifdef CONFIG_X86_64 7118 /* (a << shift) / divisor, return 1 if overflow otherwise 0 */ 7119 static inline int u64_shl_div_u64(u64 a, unsigned int shift, 7120 u64 divisor, u64 *result) 7121 { 7122 u64 low = a << shift, high = a >> (64 - shift); 7123 7124 /* To avoid the overflow on divq */ 7125 if (high >= divisor) 7126 return 1; 7127 7128 /* Low hold the result, high hold rem which is discarded */ 7129 asm("divq %2\n\t" : "=a" (low), "=d" (high) : 7130 "rm" (divisor), "0" (low), "1" (high)); 7131 *result = low; 7132 7133 return 0; 7134 } 7135 7136 static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc) 7137 { 7138 struct vcpu_vmx *vmx; 7139 u64 tscl, guest_tscl, delta_tsc, lapic_timer_advance_cycles; 7140 7141 if (kvm_mwait_in_guest(vcpu->kvm)) 7142 return -EOPNOTSUPP; 7143 7144 vmx = to_vmx(vcpu); 7145 tscl = rdtsc(); 7146 guest_tscl = kvm_read_l1_tsc(vcpu, tscl); 7147 delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl; 7148 lapic_timer_advance_cycles = nsec_to_cycles(vcpu, lapic_timer_advance_ns); 7149 7150 if (delta_tsc > lapic_timer_advance_cycles) 7151 delta_tsc -= lapic_timer_advance_cycles; 7152 else 7153 delta_tsc = 0; 7154 7155 /* Convert to host delta tsc if tsc scaling is enabled */ 7156 if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio && 7157 u64_shl_div_u64(delta_tsc, 7158 kvm_tsc_scaling_ratio_frac_bits, 7159 vcpu->arch.tsc_scaling_ratio, 7160 &delta_tsc)) 7161 return -ERANGE; 7162 7163 /* 7164 * If the delta tsc can't fit in the 32 bit after the multi shift, 7165 * we can't use the preemption timer. 7166 * It's possible that it fits on later vmentries, but checking 7167 * on every vmentry is costly so we just use an hrtimer. 7168 */ 7169 if (delta_tsc >> (cpu_preemption_timer_multi + 32)) 7170 return -ERANGE; 7171 7172 vmx->hv_deadline_tsc = tscl + delta_tsc; 7173 return delta_tsc == 0; 7174 } 7175 7176 static void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu) 7177 { 7178 to_vmx(vcpu)->hv_deadline_tsc = -1; 7179 } 7180 #endif 7181 7182 static void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu) 7183 { 7184 if (!kvm_pause_in_guest(vcpu->kvm)) 7185 shrink_ple_window(vcpu); 7186 } 7187 7188 static void vmx_slot_enable_log_dirty(struct kvm *kvm, 7189 struct kvm_memory_slot *slot) 7190 { 7191 kvm_mmu_slot_leaf_clear_dirty(kvm, slot); 7192 kvm_mmu_slot_largepage_remove_write_access(kvm, slot); 7193 } 7194 7195 static void vmx_slot_disable_log_dirty(struct kvm *kvm, 7196 struct kvm_memory_slot *slot) 7197 { 7198 kvm_mmu_slot_set_dirty(kvm, slot); 7199 } 7200 7201 static void vmx_flush_log_dirty(struct kvm *kvm) 7202 { 7203 kvm_flush_pml_buffers(kvm); 7204 } 7205 7206 static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu) 7207 { 7208 struct vmcs12 *vmcs12; 7209 struct vcpu_vmx *vmx = to_vmx(vcpu); 7210 gpa_t gpa; 7211 struct page *page = NULL; 7212 u64 *pml_address; 7213 7214 if (is_guest_mode(vcpu)) { 7215 WARN_ON_ONCE(vmx->nested.pml_full); 7216 7217 /* 7218 * Check if PML is enabled for the nested guest. 7219 * Whether eptp bit 6 is set is already checked 7220 * as part of A/D emulation. 7221 */ 7222 vmcs12 = get_vmcs12(vcpu); 7223 if (!nested_cpu_has_pml(vmcs12)) 7224 return 0; 7225 7226 if (vmcs12->guest_pml_index >= PML_ENTITY_NUM) { 7227 vmx->nested.pml_full = true; 7228 return 1; 7229 } 7230 7231 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS) & ~0xFFFull; 7232 7233 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->pml_address); 7234 if (is_error_page(page)) 7235 return 0; 7236 7237 pml_address = kmap(page); 7238 pml_address[vmcs12->guest_pml_index--] = gpa; 7239 kunmap(page); 7240 kvm_release_page_clean(page); 7241 } 7242 7243 return 0; 7244 } 7245 7246 static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm, 7247 struct kvm_memory_slot *memslot, 7248 gfn_t offset, unsigned long mask) 7249 { 7250 kvm_mmu_clear_dirty_pt_masked(kvm, memslot, offset, mask); 7251 } 7252 7253 static void __pi_post_block(struct kvm_vcpu *vcpu) 7254 { 7255 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); 7256 struct pi_desc old, new; 7257 unsigned int dest; 7258 7259 do { 7260 old.control = new.control = pi_desc->control; 7261 WARN(old.nv != POSTED_INTR_WAKEUP_VECTOR, 7262 "Wakeup handler not enabled while the VCPU is blocked\n"); 7263 7264 dest = cpu_physical_id(vcpu->cpu); 7265 7266 if (x2apic_enabled()) 7267 new.ndst = dest; 7268 else 7269 new.ndst = (dest << 8) & 0xFF00; 7270 7271 /* set 'NV' to 'notification vector' */ 7272 new.nv = POSTED_INTR_VECTOR; 7273 } while (cmpxchg64(&pi_desc->control, old.control, 7274 new.control) != old.control); 7275 7276 if (!WARN_ON_ONCE(vcpu->pre_pcpu == -1)) { 7277 spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); 7278 list_del(&vcpu->blocked_vcpu_list); 7279 spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); 7280 vcpu->pre_pcpu = -1; 7281 } 7282 } 7283 7284 /* 7285 * This routine does the following things for vCPU which is going 7286 * to be blocked if VT-d PI is enabled. 7287 * - Store the vCPU to the wakeup list, so when interrupts happen 7288 * we can find the right vCPU to wake up. 7289 * - Change the Posted-interrupt descriptor as below: 7290 * 'NDST' <-- vcpu->pre_pcpu 7291 * 'NV' <-- POSTED_INTR_WAKEUP_VECTOR 7292 * - If 'ON' is set during this process, which means at least one 7293 * interrupt is posted for this vCPU, we cannot block it, in 7294 * this case, return 1, otherwise, return 0. 7295 * 7296 */ 7297 static int pi_pre_block(struct kvm_vcpu *vcpu) 7298 { 7299 unsigned int dest; 7300 struct pi_desc old, new; 7301 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); 7302 7303 if (!kvm_arch_has_assigned_device(vcpu->kvm) || 7304 !irq_remapping_cap(IRQ_POSTING_CAP) || 7305 !kvm_vcpu_apicv_active(vcpu)) 7306 return 0; 7307 7308 WARN_ON(irqs_disabled()); 7309 local_irq_disable(); 7310 if (!WARN_ON_ONCE(vcpu->pre_pcpu != -1)) { 7311 vcpu->pre_pcpu = vcpu->cpu; 7312 spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); 7313 list_add_tail(&vcpu->blocked_vcpu_list, 7314 &per_cpu(blocked_vcpu_on_cpu, 7315 vcpu->pre_pcpu)); 7316 spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); 7317 } 7318 7319 do { 7320 old.control = new.control = pi_desc->control; 7321 7322 WARN((pi_desc->sn == 1), 7323 "Warning: SN field of posted-interrupts " 7324 "is set before blocking\n"); 7325 7326 /* 7327 * Since vCPU can be preempted during this process, 7328 * vcpu->cpu could be different with pre_pcpu, we 7329 * need to set pre_pcpu as the destination of wakeup 7330 * notification event, then we can find the right vCPU 7331 * to wakeup in wakeup handler if interrupts happen 7332 * when the vCPU is in blocked state. 7333 */ 7334 dest = cpu_physical_id(vcpu->pre_pcpu); 7335 7336 if (x2apic_enabled()) 7337 new.ndst = dest; 7338 else 7339 new.ndst = (dest << 8) & 0xFF00; 7340 7341 /* set 'NV' to 'wakeup vector' */ 7342 new.nv = POSTED_INTR_WAKEUP_VECTOR; 7343 } while (cmpxchg64(&pi_desc->control, old.control, 7344 new.control) != old.control); 7345 7346 /* We should not block the vCPU if an interrupt is posted for it. */ 7347 if (pi_test_on(pi_desc) == 1) 7348 __pi_post_block(vcpu); 7349 7350 local_irq_enable(); 7351 return (vcpu->pre_pcpu == -1); 7352 } 7353 7354 static int vmx_pre_block(struct kvm_vcpu *vcpu) 7355 { 7356 if (pi_pre_block(vcpu)) 7357 return 1; 7358 7359 if (kvm_lapic_hv_timer_in_use(vcpu)) 7360 kvm_lapic_switch_to_sw_timer(vcpu); 7361 7362 return 0; 7363 } 7364 7365 static void pi_post_block(struct kvm_vcpu *vcpu) 7366 { 7367 if (vcpu->pre_pcpu == -1) 7368 return; 7369 7370 WARN_ON(irqs_disabled()); 7371 local_irq_disable(); 7372 __pi_post_block(vcpu); 7373 local_irq_enable(); 7374 } 7375 7376 static void vmx_post_block(struct kvm_vcpu *vcpu) 7377 { 7378 if (kvm_x86_ops->set_hv_timer) 7379 kvm_lapic_switch_to_hv_timer(vcpu); 7380 7381 pi_post_block(vcpu); 7382 } 7383 7384 /* 7385 * vmx_update_pi_irte - set IRTE for Posted-Interrupts 7386 * 7387 * @kvm: kvm 7388 * @host_irq: host irq of the interrupt 7389 * @guest_irq: gsi of the interrupt 7390 * @set: set or unset PI 7391 * returns 0 on success, < 0 on failure 7392 */ 7393 static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq, 7394 uint32_t guest_irq, bool set) 7395 { 7396 struct kvm_kernel_irq_routing_entry *e; 7397 struct kvm_irq_routing_table *irq_rt; 7398 struct kvm_lapic_irq irq; 7399 struct kvm_vcpu *vcpu; 7400 struct vcpu_data vcpu_info; 7401 int idx, ret = 0; 7402 7403 if (!kvm_arch_has_assigned_device(kvm) || 7404 !irq_remapping_cap(IRQ_POSTING_CAP) || 7405 !kvm_vcpu_apicv_active(kvm->vcpus[0])) 7406 return 0; 7407 7408 idx = srcu_read_lock(&kvm->irq_srcu); 7409 irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu); 7410 if (guest_irq >= irq_rt->nr_rt_entries || 7411 hlist_empty(&irq_rt->map[guest_irq])) { 7412 pr_warn_once("no route for guest_irq %u/%u (broken user space?)\n", 7413 guest_irq, irq_rt->nr_rt_entries); 7414 goto out; 7415 } 7416 7417 hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) { 7418 if (e->type != KVM_IRQ_ROUTING_MSI) 7419 continue; 7420 /* 7421 * VT-d PI cannot support posting multicast/broadcast 7422 * interrupts to a vCPU, we still use interrupt remapping 7423 * for these kind of interrupts. 7424 * 7425 * For lowest-priority interrupts, we only support 7426 * those with single CPU as the destination, e.g. user 7427 * configures the interrupts via /proc/irq or uses 7428 * irqbalance to make the interrupts single-CPU. 7429 * 7430 * We will support full lowest-priority interrupt later. 7431 */ 7432 7433 kvm_set_msi_irq(kvm, e, &irq); 7434 if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu)) { 7435 /* 7436 * Make sure the IRTE is in remapped mode if 7437 * we don't handle it in posted mode. 7438 */ 7439 ret = irq_set_vcpu_affinity(host_irq, NULL); 7440 if (ret < 0) { 7441 printk(KERN_INFO 7442 "failed to back to remapped mode, irq: %u\n", 7443 host_irq); 7444 goto out; 7445 } 7446 7447 continue; 7448 } 7449 7450 vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu)); 7451 vcpu_info.vector = irq.vector; 7452 7453 trace_kvm_pi_irte_update(host_irq, vcpu->vcpu_id, e->gsi, 7454 vcpu_info.vector, vcpu_info.pi_desc_addr, set); 7455 7456 if (set) 7457 ret = irq_set_vcpu_affinity(host_irq, &vcpu_info); 7458 else 7459 ret = irq_set_vcpu_affinity(host_irq, NULL); 7460 7461 if (ret < 0) { 7462 printk(KERN_INFO "%s: failed to update PI IRTE\n", 7463 __func__); 7464 goto out; 7465 } 7466 } 7467 7468 ret = 0; 7469 out: 7470 srcu_read_unlock(&kvm->irq_srcu, idx); 7471 return ret; 7472 } 7473 7474 static void vmx_setup_mce(struct kvm_vcpu *vcpu) 7475 { 7476 if (vcpu->arch.mcg_cap & MCG_LMCE_P) 7477 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |= 7478 FEATURE_CONTROL_LMCE; 7479 else 7480 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &= 7481 ~FEATURE_CONTROL_LMCE; 7482 } 7483 7484 static int vmx_smi_allowed(struct kvm_vcpu *vcpu) 7485 { 7486 /* we need a nested vmexit to enter SMM, postpone if run is pending */ 7487 if (to_vmx(vcpu)->nested.nested_run_pending) 7488 return 0; 7489 return 1; 7490 } 7491 7492 static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate) 7493 { 7494 struct vcpu_vmx *vmx = to_vmx(vcpu); 7495 7496 vmx->nested.smm.guest_mode = is_guest_mode(vcpu); 7497 if (vmx->nested.smm.guest_mode) 7498 nested_vmx_vmexit(vcpu, -1, 0, 0); 7499 7500 vmx->nested.smm.vmxon = vmx->nested.vmxon; 7501 vmx->nested.vmxon = false; 7502 vmx_clear_hlt(vcpu); 7503 return 0; 7504 } 7505 7506 static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase) 7507 { 7508 struct vcpu_vmx *vmx = to_vmx(vcpu); 7509 int ret; 7510 7511 if (vmx->nested.smm.vmxon) { 7512 vmx->nested.vmxon = true; 7513 vmx->nested.smm.vmxon = false; 7514 } 7515 7516 if (vmx->nested.smm.guest_mode) { 7517 vcpu->arch.hflags &= ~HF_SMM_MASK; 7518 ret = nested_vmx_enter_non_root_mode(vcpu, false); 7519 vcpu->arch.hflags |= HF_SMM_MASK; 7520 if (ret) 7521 return ret; 7522 7523 vmx->nested.smm.guest_mode = false; 7524 } 7525 return 0; 7526 } 7527 7528 static int enable_smi_window(struct kvm_vcpu *vcpu) 7529 { 7530 return 0; 7531 } 7532 7533 static __init int hardware_setup(void) 7534 { 7535 unsigned long host_bndcfgs; 7536 int r, i; 7537 7538 rdmsrl_safe(MSR_EFER, &host_efer); 7539 7540 for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) 7541 kvm_define_shared_msr(i, vmx_msr_index[i]); 7542 7543 if (setup_vmcs_config(&vmcs_config, &vmx_capability) < 0) 7544 return -EIO; 7545 7546 if (boot_cpu_has(X86_FEATURE_NX)) 7547 kvm_enable_efer_bits(EFER_NX); 7548 7549 if (boot_cpu_has(X86_FEATURE_MPX)) { 7550 rdmsrl(MSR_IA32_BNDCFGS, host_bndcfgs); 7551 WARN_ONCE(host_bndcfgs, "KVM: BNDCFGS in host will be lost"); 7552 } 7553 7554 if (boot_cpu_has(X86_FEATURE_XSAVES)) 7555 rdmsrl(MSR_IA32_XSS, host_xss); 7556 7557 if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() || 7558 !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global())) 7559 enable_vpid = 0; 7560 7561 if (!cpu_has_vmx_ept() || 7562 !cpu_has_vmx_ept_4levels() || 7563 !cpu_has_vmx_ept_mt_wb() || 7564 !cpu_has_vmx_invept_global()) 7565 enable_ept = 0; 7566 7567 if (!cpu_has_vmx_ept_ad_bits() || !enable_ept) 7568 enable_ept_ad_bits = 0; 7569 7570 if (!cpu_has_vmx_unrestricted_guest() || !enable_ept) 7571 enable_unrestricted_guest = 0; 7572 7573 if (!cpu_has_vmx_flexpriority()) 7574 flexpriority_enabled = 0; 7575 7576 if (!cpu_has_virtual_nmis()) 7577 enable_vnmi = 0; 7578 7579 /* 7580 * set_apic_access_page_addr() is used to reload apic access 7581 * page upon invalidation. No need to do anything if not 7582 * using the APIC_ACCESS_ADDR VMCS field. 7583 */ 7584 if (!flexpriority_enabled) 7585 kvm_x86_ops->set_apic_access_page_addr = NULL; 7586 7587 if (!cpu_has_vmx_tpr_shadow()) 7588 kvm_x86_ops->update_cr8_intercept = NULL; 7589 7590 if (enable_ept && !cpu_has_vmx_ept_2m_page()) 7591 kvm_disable_largepages(); 7592 7593 #if IS_ENABLED(CONFIG_HYPERV) 7594 if (ms_hyperv.nested_features & HV_X64_NESTED_GUEST_MAPPING_FLUSH 7595 && enable_ept) { 7596 kvm_x86_ops->tlb_remote_flush = hv_remote_flush_tlb; 7597 kvm_x86_ops->tlb_remote_flush_with_range = 7598 hv_remote_flush_tlb_with_range; 7599 } 7600 #endif 7601 7602 if (!cpu_has_vmx_ple()) { 7603 ple_gap = 0; 7604 ple_window = 0; 7605 ple_window_grow = 0; 7606 ple_window_max = 0; 7607 ple_window_shrink = 0; 7608 } 7609 7610 if (!cpu_has_vmx_apicv()) { 7611 enable_apicv = 0; 7612 kvm_x86_ops->sync_pir_to_irr = NULL; 7613 } 7614 7615 if (cpu_has_vmx_tsc_scaling()) { 7616 kvm_has_tsc_control = true; 7617 kvm_max_tsc_scaling_ratio = KVM_VMX_TSC_MULTIPLIER_MAX; 7618 kvm_tsc_scaling_ratio_frac_bits = 48; 7619 } 7620 7621 set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */ 7622 7623 if (enable_ept) 7624 vmx_enable_tdp(); 7625 else 7626 kvm_disable_tdp(); 7627 7628 /* 7629 * Only enable PML when hardware supports PML feature, and both EPT 7630 * and EPT A/D bit features are enabled -- PML depends on them to work. 7631 */ 7632 if (!enable_ept || !enable_ept_ad_bits || !cpu_has_vmx_pml()) 7633 enable_pml = 0; 7634 7635 if (!enable_pml) { 7636 kvm_x86_ops->slot_enable_log_dirty = NULL; 7637 kvm_x86_ops->slot_disable_log_dirty = NULL; 7638 kvm_x86_ops->flush_log_dirty = NULL; 7639 kvm_x86_ops->enable_log_dirty_pt_masked = NULL; 7640 } 7641 7642 if (!cpu_has_vmx_preemption_timer()) 7643 kvm_x86_ops->request_immediate_exit = __kvm_request_immediate_exit; 7644 7645 if (cpu_has_vmx_preemption_timer() && enable_preemption_timer) { 7646 u64 vmx_msr; 7647 7648 rdmsrl(MSR_IA32_VMX_MISC, vmx_msr); 7649 cpu_preemption_timer_multi = 7650 vmx_msr & VMX_MISC_PREEMPTION_TIMER_RATE_MASK; 7651 } else { 7652 kvm_x86_ops->set_hv_timer = NULL; 7653 kvm_x86_ops->cancel_hv_timer = NULL; 7654 } 7655 7656 kvm_set_posted_intr_wakeup_handler(wakeup_handler); 7657 7658 kvm_mce_cap_supported |= MCG_LMCE_P; 7659 7660 if (pt_mode != PT_MODE_SYSTEM && pt_mode != PT_MODE_HOST_GUEST) 7661 return -EINVAL; 7662 if (!enable_ept || !cpu_has_vmx_intel_pt()) 7663 pt_mode = PT_MODE_SYSTEM; 7664 7665 if (nested) { 7666 nested_vmx_setup_ctls_msrs(&vmcs_config.nested, 7667 vmx_capability.ept, enable_apicv); 7668 7669 r = nested_vmx_hardware_setup(kvm_vmx_exit_handlers); 7670 if (r) 7671 return r; 7672 } 7673 7674 r = alloc_kvm_area(); 7675 if (r) 7676 nested_vmx_hardware_unsetup(); 7677 return r; 7678 } 7679 7680 static __exit void hardware_unsetup(void) 7681 { 7682 if (nested) 7683 nested_vmx_hardware_unsetup(); 7684 7685 free_kvm_area(); 7686 } 7687 7688 static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { 7689 .cpu_has_kvm_support = cpu_has_kvm_support, 7690 .disabled_by_bios = vmx_disabled_by_bios, 7691 .hardware_setup = hardware_setup, 7692 .hardware_unsetup = hardware_unsetup, 7693 .check_processor_compatibility = vmx_check_processor_compat, 7694 .hardware_enable = hardware_enable, 7695 .hardware_disable = hardware_disable, 7696 .cpu_has_accelerated_tpr = report_flexpriority, 7697 .has_emulated_msr = vmx_has_emulated_msr, 7698 7699 .vm_init = vmx_vm_init, 7700 .vm_alloc = vmx_vm_alloc, 7701 .vm_free = vmx_vm_free, 7702 7703 .vcpu_create = vmx_create_vcpu, 7704 .vcpu_free = vmx_free_vcpu, 7705 .vcpu_reset = vmx_vcpu_reset, 7706 7707 .prepare_guest_switch = vmx_prepare_switch_to_guest, 7708 .vcpu_load = vmx_vcpu_load, 7709 .vcpu_put = vmx_vcpu_put, 7710 7711 .update_bp_intercept = update_exception_bitmap, 7712 .get_msr_feature = vmx_get_msr_feature, 7713 .get_msr = vmx_get_msr, 7714 .set_msr = vmx_set_msr, 7715 .get_segment_base = vmx_get_segment_base, 7716 .get_segment = vmx_get_segment, 7717 .set_segment = vmx_set_segment, 7718 .get_cpl = vmx_get_cpl, 7719 .get_cs_db_l_bits = vmx_get_cs_db_l_bits, 7720 .decache_cr0_guest_bits = vmx_decache_cr0_guest_bits, 7721 .decache_cr3 = vmx_decache_cr3, 7722 .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits, 7723 .set_cr0 = vmx_set_cr0, 7724 .set_cr3 = vmx_set_cr3, 7725 .set_cr4 = vmx_set_cr4, 7726 .set_efer = vmx_set_efer, 7727 .get_idt = vmx_get_idt, 7728 .set_idt = vmx_set_idt, 7729 .get_gdt = vmx_get_gdt, 7730 .set_gdt = vmx_set_gdt, 7731 .get_dr6 = vmx_get_dr6, 7732 .set_dr6 = vmx_set_dr6, 7733 .set_dr7 = vmx_set_dr7, 7734 .sync_dirty_debug_regs = vmx_sync_dirty_debug_regs, 7735 .cache_reg = vmx_cache_reg, 7736 .get_rflags = vmx_get_rflags, 7737 .set_rflags = vmx_set_rflags, 7738 7739 .tlb_flush = vmx_flush_tlb, 7740 .tlb_flush_gva = vmx_flush_tlb_gva, 7741 7742 .run = vmx_vcpu_run, 7743 .handle_exit = vmx_handle_exit, 7744 .skip_emulated_instruction = skip_emulated_instruction, 7745 .set_interrupt_shadow = vmx_set_interrupt_shadow, 7746 .get_interrupt_shadow = vmx_get_interrupt_shadow, 7747 .patch_hypercall = vmx_patch_hypercall, 7748 .set_irq = vmx_inject_irq, 7749 .set_nmi = vmx_inject_nmi, 7750 .queue_exception = vmx_queue_exception, 7751 .cancel_injection = vmx_cancel_injection, 7752 .interrupt_allowed = vmx_interrupt_allowed, 7753 .nmi_allowed = vmx_nmi_allowed, 7754 .get_nmi_mask = vmx_get_nmi_mask, 7755 .set_nmi_mask = vmx_set_nmi_mask, 7756 .enable_nmi_window = enable_nmi_window, 7757 .enable_irq_window = enable_irq_window, 7758 .update_cr8_intercept = update_cr8_intercept, 7759 .set_virtual_apic_mode = vmx_set_virtual_apic_mode, 7760 .set_apic_access_page_addr = vmx_set_apic_access_page_addr, 7761 .get_enable_apicv = vmx_get_enable_apicv, 7762 .refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl, 7763 .load_eoi_exitmap = vmx_load_eoi_exitmap, 7764 .apicv_post_state_restore = vmx_apicv_post_state_restore, 7765 .hwapic_irr_update = vmx_hwapic_irr_update, 7766 .hwapic_isr_update = vmx_hwapic_isr_update, 7767 .guest_apic_has_interrupt = vmx_guest_apic_has_interrupt, 7768 .sync_pir_to_irr = vmx_sync_pir_to_irr, 7769 .deliver_posted_interrupt = vmx_deliver_posted_interrupt, 7770 7771 .set_tss_addr = vmx_set_tss_addr, 7772 .set_identity_map_addr = vmx_set_identity_map_addr, 7773 .get_tdp_level = get_ept_level, 7774 .get_mt_mask = vmx_get_mt_mask, 7775 7776 .get_exit_info = vmx_get_exit_info, 7777 7778 .get_lpage_level = vmx_get_lpage_level, 7779 7780 .cpuid_update = vmx_cpuid_update, 7781 7782 .rdtscp_supported = vmx_rdtscp_supported, 7783 .invpcid_supported = vmx_invpcid_supported, 7784 7785 .set_supported_cpuid = vmx_set_supported_cpuid, 7786 7787 .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit, 7788 7789 .read_l1_tsc_offset = vmx_read_l1_tsc_offset, 7790 .write_l1_tsc_offset = vmx_write_l1_tsc_offset, 7791 7792 .set_tdp_cr3 = vmx_set_cr3, 7793 7794 .check_intercept = vmx_check_intercept, 7795 .handle_external_intr = vmx_handle_external_intr, 7796 .mpx_supported = vmx_mpx_supported, 7797 .xsaves_supported = vmx_xsaves_supported, 7798 .umip_emulated = vmx_umip_emulated, 7799 .pt_supported = vmx_pt_supported, 7800 7801 .request_immediate_exit = vmx_request_immediate_exit, 7802 7803 .sched_in = vmx_sched_in, 7804 7805 .slot_enable_log_dirty = vmx_slot_enable_log_dirty, 7806 .slot_disable_log_dirty = vmx_slot_disable_log_dirty, 7807 .flush_log_dirty = vmx_flush_log_dirty, 7808 .enable_log_dirty_pt_masked = vmx_enable_log_dirty_pt_masked, 7809 .write_log_dirty = vmx_write_pml_buffer, 7810 7811 .pre_block = vmx_pre_block, 7812 .post_block = vmx_post_block, 7813 7814 .pmu_ops = &intel_pmu_ops, 7815 7816 .update_pi_irte = vmx_update_pi_irte, 7817 7818 #ifdef CONFIG_X86_64 7819 .set_hv_timer = vmx_set_hv_timer, 7820 .cancel_hv_timer = vmx_cancel_hv_timer, 7821 #endif 7822 7823 .setup_mce = vmx_setup_mce, 7824 7825 .smi_allowed = vmx_smi_allowed, 7826 .pre_enter_smm = vmx_pre_enter_smm, 7827 .pre_leave_smm = vmx_pre_leave_smm, 7828 .enable_smi_window = enable_smi_window, 7829 7830 .check_nested_events = NULL, 7831 .get_nested_state = NULL, 7832 .set_nested_state = NULL, 7833 .get_vmcs12_pages = NULL, 7834 .nested_enable_evmcs = NULL, 7835 }; 7836 7837 static void vmx_cleanup_l1d_flush(void) 7838 { 7839 if (vmx_l1d_flush_pages) { 7840 free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER); 7841 vmx_l1d_flush_pages = NULL; 7842 } 7843 /* Restore state so sysfs ignores VMX */ 7844 l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; 7845 } 7846 7847 static void vmx_exit(void) 7848 { 7849 #ifdef CONFIG_KEXEC_CORE 7850 RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL); 7851 synchronize_rcu(); 7852 #endif 7853 7854 kvm_exit(); 7855 7856 #if IS_ENABLED(CONFIG_HYPERV) 7857 if (static_branch_unlikely(&enable_evmcs)) { 7858 int cpu; 7859 struct hv_vp_assist_page *vp_ap; 7860 /* 7861 * Reset everything to support using non-enlightened VMCS 7862 * access later (e.g. when we reload the module with 7863 * enlightened_vmcs=0) 7864 */ 7865 for_each_online_cpu(cpu) { 7866 vp_ap = hv_get_vp_assist_page(cpu); 7867 7868 if (!vp_ap) 7869 continue; 7870 7871 vp_ap->current_nested_vmcs = 0; 7872 vp_ap->enlighten_vmentry = 0; 7873 } 7874 7875 static_branch_disable(&enable_evmcs); 7876 } 7877 #endif 7878 vmx_cleanup_l1d_flush(); 7879 } 7880 module_exit(vmx_exit); 7881 7882 static int __init vmx_init(void) 7883 { 7884 int r; 7885 7886 #if IS_ENABLED(CONFIG_HYPERV) 7887 /* 7888 * Enlightened VMCS usage should be recommended and the host needs 7889 * to support eVMCS v1 or above. We can also disable eVMCS support 7890 * with module parameter. 7891 */ 7892 if (enlightened_vmcs && 7893 ms_hyperv.hints & HV_X64_ENLIGHTENED_VMCS_RECOMMENDED && 7894 (ms_hyperv.nested_features & HV_X64_ENLIGHTENED_VMCS_VERSION) >= 7895 KVM_EVMCS_VERSION) { 7896 int cpu; 7897 7898 /* Check that we have assist pages on all online CPUs */ 7899 for_each_online_cpu(cpu) { 7900 if (!hv_get_vp_assist_page(cpu)) { 7901 enlightened_vmcs = false; 7902 break; 7903 } 7904 } 7905 7906 if (enlightened_vmcs) { 7907 pr_info("KVM: vmx: using Hyper-V Enlightened VMCS\n"); 7908 static_branch_enable(&enable_evmcs); 7909 } 7910 } else { 7911 enlightened_vmcs = false; 7912 } 7913 #endif 7914 7915 r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), 7916 __alignof__(struct vcpu_vmx), THIS_MODULE); 7917 if (r) 7918 return r; 7919 7920 /* 7921 * Must be called after kvm_init() so enable_ept is properly set 7922 * up. Hand the parameter mitigation value in which was stored in 7923 * the pre module init parser. If no parameter was given, it will 7924 * contain 'auto' which will be turned into the default 'cond' 7925 * mitigation mode. 7926 */ 7927 if (boot_cpu_has(X86_BUG_L1TF)) { 7928 r = vmx_setup_l1d_flush(vmentry_l1d_flush_param); 7929 if (r) { 7930 vmx_exit(); 7931 return r; 7932 } 7933 } 7934 7935 #ifdef CONFIG_KEXEC_CORE 7936 rcu_assign_pointer(crash_vmclear_loaded_vmcss, 7937 crash_vmclear_local_loaded_vmcss); 7938 #endif 7939 vmx_check_vmcs12_offsets(); 7940 7941 return 0; 7942 } 7943 module_init(vmx_init); 7944