1 // SPDX-License-Identifier: GPL-2.0 2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 3 4 #include <linux/objtool.h> 5 #include <linux/percpu.h> 6 7 #include <asm/debugreg.h> 8 #include <asm/mmu_context.h> 9 10 #include "cpuid.h" 11 #include "hyperv.h" 12 #include "mmu.h" 13 #include "nested.h" 14 #include "pmu.h" 15 #include "posted_intr.h" 16 #include "sgx.h" 17 #include "trace.h" 18 #include "vmx.h" 19 #include "x86.h" 20 #include "smm.h" 21 22 static bool __read_mostly enable_shadow_vmcs = 1; 23 module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO); 24 25 static bool __read_mostly nested_early_check = 0; 26 module_param(nested_early_check, bool, S_IRUGO); 27 28 #define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK 29 30 /* 31 * Hyper-V requires all of these, so mark them as supported even though 32 * they are just treated the same as all-context. 33 */ 34 #define VMX_VPID_EXTENT_SUPPORTED_MASK \ 35 (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \ 36 VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \ 37 VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \ 38 VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT) 39 40 #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5 41 42 enum { 43 VMX_VMREAD_BITMAP, 44 VMX_VMWRITE_BITMAP, 45 VMX_BITMAP_NR 46 }; 47 static unsigned long *vmx_bitmap[VMX_BITMAP_NR]; 48 49 #define vmx_vmread_bitmap (vmx_bitmap[VMX_VMREAD_BITMAP]) 50 #define vmx_vmwrite_bitmap (vmx_bitmap[VMX_VMWRITE_BITMAP]) 51 52 struct shadow_vmcs_field { 53 u16 encoding; 54 u16 offset; 55 }; 56 static struct shadow_vmcs_field shadow_read_only_fields[] = { 57 #define SHADOW_FIELD_RO(x, y) { x, offsetof(struct vmcs12, y) }, 58 #include "vmcs_shadow_fields.h" 59 }; 60 static int max_shadow_read_only_fields = 61 ARRAY_SIZE(shadow_read_only_fields); 62 63 static struct shadow_vmcs_field shadow_read_write_fields[] = { 64 #define SHADOW_FIELD_RW(x, y) { x, offsetof(struct vmcs12, y) }, 65 #include "vmcs_shadow_fields.h" 66 }; 67 static int max_shadow_read_write_fields = 68 ARRAY_SIZE(shadow_read_write_fields); 69 70 static void init_vmcs_shadow_fields(void) 71 { 72 int i, j; 73 74 memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE); 75 memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE); 76 77 for (i = j = 0; i < max_shadow_read_only_fields; i++) { 78 struct shadow_vmcs_field entry = shadow_read_only_fields[i]; 79 u16 field = entry.encoding; 80 81 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 && 82 (i + 1 == max_shadow_read_only_fields || 83 shadow_read_only_fields[i + 1].encoding != field + 1)) 84 pr_err("Missing field from shadow_read_only_field %x\n", 85 field + 1); 86 87 clear_bit(field, vmx_vmread_bitmap); 88 if (field & 1) 89 #ifdef CONFIG_X86_64 90 continue; 91 #else 92 entry.offset += sizeof(u32); 93 #endif 94 shadow_read_only_fields[j++] = entry; 95 } 96 max_shadow_read_only_fields = j; 97 98 for (i = j = 0; i < max_shadow_read_write_fields; i++) { 99 struct shadow_vmcs_field entry = shadow_read_write_fields[i]; 100 u16 field = entry.encoding; 101 102 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 && 103 (i + 1 == max_shadow_read_write_fields || 104 shadow_read_write_fields[i + 1].encoding != field + 1)) 105 pr_err("Missing field from shadow_read_write_field %x\n", 106 field + 1); 107 108 WARN_ONCE(field >= GUEST_ES_AR_BYTES && 109 field <= GUEST_TR_AR_BYTES, 110 "Update vmcs12_write_any() to drop reserved bits from AR_BYTES"); 111 112 /* 113 * PML and the preemption timer can be emulated, but the 114 * processor cannot vmwrite to fields that don't exist 115 * on bare metal. 116 */ 117 switch (field) { 118 case GUEST_PML_INDEX: 119 if (!cpu_has_vmx_pml()) 120 continue; 121 break; 122 case VMX_PREEMPTION_TIMER_VALUE: 123 if (!cpu_has_vmx_preemption_timer()) 124 continue; 125 break; 126 case GUEST_INTR_STATUS: 127 if (!cpu_has_vmx_apicv()) 128 continue; 129 break; 130 default: 131 break; 132 } 133 134 clear_bit(field, vmx_vmwrite_bitmap); 135 clear_bit(field, vmx_vmread_bitmap); 136 if (field & 1) 137 #ifdef CONFIG_X86_64 138 continue; 139 #else 140 entry.offset += sizeof(u32); 141 #endif 142 shadow_read_write_fields[j++] = entry; 143 } 144 max_shadow_read_write_fields = j; 145 } 146 147 /* 148 * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(), 149 * set the success or error code of an emulated VMX instruction (as specified 150 * by Vol 2B, VMX Instruction Reference, "Conventions"), and skip the emulated 151 * instruction. 152 */ 153 static int nested_vmx_succeed(struct kvm_vcpu *vcpu) 154 { 155 vmx_set_rflags(vcpu, vmx_get_rflags(vcpu) 156 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | 157 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF)); 158 return kvm_skip_emulated_instruction(vcpu); 159 } 160 161 static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu) 162 { 163 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) 164 & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF | 165 X86_EFLAGS_SF | X86_EFLAGS_OF)) 166 | X86_EFLAGS_CF); 167 return kvm_skip_emulated_instruction(vcpu); 168 } 169 170 static int nested_vmx_failValid(struct kvm_vcpu *vcpu, 171 u32 vm_instruction_error) 172 { 173 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) 174 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | 175 X86_EFLAGS_SF | X86_EFLAGS_OF)) 176 | X86_EFLAGS_ZF); 177 get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error; 178 /* 179 * We don't need to force sync to shadow VMCS because 180 * VM_INSTRUCTION_ERROR is not shadowed. Enlightened VMCS 'shadows' all 181 * fields and thus must be synced. 182 */ 183 if (to_vmx(vcpu)->nested.hv_evmcs_vmptr != EVMPTR_INVALID) 184 to_vmx(vcpu)->nested.need_vmcs12_to_shadow_sync = true; 185 186 return kvm_skip_emulated_instruction(vcpu); 187 } 188 189 static int nested_vmx_fail(struct kvm_vcpu *vcpu, u32 vm_instruction_error) 190 { 191 struct vcpu_vmx *vmx = to_vmx(vcpu); 192 193 /* 194 * failValid writes the error number to the current VMCS, which 195 * can't be done if there isn't a current VMCS. 196 */ 197 if (vmx->nested.current_vmptr == INVALID_GPA && 198 !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) 199 return nested_vmx_failInvalid(vcpu); 200 201 return nested_vmx_failValid(vcpu, vm_instruction_error); 202 } 203 204 static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator) 205 { 206 /* TODO: not to reset guest simply here. */ 207 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 208 pr_debug_ratelimited("nested vmx abort, indicator %d\n", indicator); 209 } 210 211 static inline bool vmx_control_verify(u32 control, u32 low, u32 high) 212 { 213 return fixed_bits_valid(control, low, high); 214 } 215 216 static inline u64 vmx_control_msr(u32 low, u32 high) 217 { 218 return low | ((u64)high << 32); 219 } 220 221 static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx) 222 { 223 secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_SHADOW_VMCS); 224 vmcs_write64(VMCS_LINK_POINTER, INVALID_GPA); 225 vmx->nested.need_vmcs12_to_shadow_sync = false; 226 } 227 228 static inline void nested_release_evmcs(struct kvm_vcpu *vcpu) 229 { 230 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); 231 struct vcpu_vmx *vmx = to_vmx(vcpu); 232 233 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) { 234 kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true); 235 vmx->nested.hv_evmcs = NULL; 236 } 237 238 vmx->nested.hv_evmcs_vmptr = EVMPTR_INVALID; 239 240 if (hv_vcpu) { 241 hv_vcpu->nested.pa_page_gpa = INVALID_GPA; 242 hv_vcpu->nested.vm_id = 0; 243 hv_vcpu->nested.vp_id = 0; 244 } 245 } 246 247 static void vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx, 248 struct loaded_vmcs *prev) 249 { 250 struct vmcs_host_state *dest, *src; 251 252 if (unlikely(!vmx->guest_state_loaded)) 253 return; 254 255 src = &prev->host_state; 256 dest = &vmx->loaded_vmcs->host_state; 257 258 vmx_set_host_fs_gs(dest, src->fs_sel, src->gs_sel, src->fs_base, src->gs_base); 259 dest->ldt_sel = src->ldt_sel; 260 #ifdef CONFIG_X86_64 261 dest->ds_sel = src->ds_sel; 262 dest->es_sel = src->es_sel; 263 #endif 264 } 265 266 static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs) 267 { 268 struct vcpu_vmx *vmx = to_vmx(vcpu); 269 struct loaded_vmcs *prev; 270 int cpu; 271 272 if (WARN_ON_ONCE(vmx->loaded_vmcs == vmcs)) 273 return; 274 275 cpu = get_cpu(); 276 prev = vmx->loaded_vmcs; 277 vmx->loaded_vmcs = vmcs; 278 vmx_vcpu_load_vmcs(vcpu, cpu, prev); 279 vmx_sync_vmcs_host_state(vmx, prev); 280 put_cpu(); 281 282 vcpu->arch.regs_avail = ~VMX_REGS_LAZY_LOAD_SET; 283 284 /* 285 * All lazily updated registers will be reloaded from VMCS12 on both 286 * vmentry and vmexit. 287 */ 288 vcpu->arch.regs_dirty = 0; 289 } 290 291 /* 292 * Free whatever needs to be freed from vmx->nested when L1 goes down, or 293 * just stops using VMX. 294 */ 295 static void free_nested(struct kvm_vcpu *vcpu) 296 { 297 struct vcpu_vmx *vmx = to_vmx(vcpu); 298 299 if (WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01)) 300 vmx_switch_vmcs(vcpu, &vmx->vmcs01); 301 302 if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon) 303 return; 304 305 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); 306 307 vmx->nested.vmxon = false; 308 vmx->nested.smm.vmxon = false; 309 vmx->nested.vmxon_ptr = INVALID_GPA; 310 free_vpid(vmx->nested.vpid02); 311 vmx->nested.posted_intr_nv = -1; 312 vmx->nested.current_vmptr = INVALID_GPA; 313 if (enable_shadow_vmcs) { 314 vmx_disable_shadow_vmcs(vmx); 315 vmcs_clear(vmx->vmcs01.shadow_vmcs); 316 free_vmcs(vmx->vmcs01.shadow_vmcs); 317 vmx->vmcs01.shadow_vmcs = NULL; 318 } 319 kfree(vmx->nested.cached_vmcs12); 320 vmx->nested.cached_vmcs12 = NULL; 321 kfree(vmx->nested.cached_shadow_vmcs12); 322 vmx->nested.cached_shadow_vmcs12 = NULL; 323 /* 324 * Unpin physical memory we referred to in the vmcs02. The APIC access 325 * page's backing page (yeah, confusing) shouldn't actually be accessed, 326 * and if it is written, the contents are irrelevant. 327 */ 328 kvm_vcpu_unmap(vcpu, &vmx->nested.apic_access_page_map, false); 329 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true); 330 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true); 331 vmx->nested.pi_desc = NULL; 332 333 kvm_mmu_free_roots(vcpu->kvm, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); 334 335 nested_release_evmcs(vcpu); 336 337 free_loaded_vmcs(&vmx->nested.vmcs02); 338 } 339 340 /* 341 * Ensure that the current vmcs of the logical processor is the 342 * vmcs01 of the vcpu before calling free_nested(). 343 */ 344 void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu) 345 { 346 vcpu_load(vcpu); 347 vmx_leave_nested(vcpu); 348 vcpu_put(vcpu); 349 } 350 351 #define EPTP_PA_MASK GENMASK_ULL(51, 12) 352 353 static bool nested_ept_root_matches(hpa_t root_hpa, u64 root_eptp, u64 eptp) 354 { 355 return VALID_PAGE(root_hpa) && 356 ((root_eptp & EPTP_PA_MASK) == (eptp & EPTP_PA_MASK)); 357 } 358 359 static void nested_ept_invalidate_addr(struct kvm_vcpu *vcpu, gpa_t eptp, 360 gpa_t addr) 361 { 362 unsigned long roots = 0; 363 uint i; 364 struct kvm_mmu_root_info *cached_root; 365 366 WARN_ON_ONCE(!mmu_is_nested(vcpu)); 367 368 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { 369 cached_root = &vcpu->arch.mmu->prev_roots[i]; 370 371 if (nested_ept_root_matches(cached_root->hpa, cached_root->pgd, 372 eptp)) 373 roots |= KVM_MMU_ROOT_PREVIOUS(i); 374 } 375 if (roots) 376 kvm_mmu_invalidate_addr(vcpu, vcpu->arch.mmu, addr, roots); 377 } 378 379 static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu, 380 struct x86_exception *fault) 381 { 382 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 383 struct vcpu_vmx *vmx = to_vmx(vcpu); 384 u32 vm_exit_reason; 385 unsigned long exit_qualification = vcpu->arch.exit_qualification; 386 387 if (vmx->nested.pml_full) { 388 vm_exit_reason = EXIT_REASON_PML_FULL; 389 vmx->nested.pml_full = false; 390 exit_qualification &= INTR_INFO_UNBLOCK_NMI; 391 } else { 392 if (fault->error_code & PFERR_RSVD_MASK) 393 vm_exit_reason = EXIT_REASON_EPT_MISCONFIG; 394 else 395 vm_exit_reason = EXIT_REASON_EPT_VIOLATION; 396 397 /* 398 * Although the caller (kvm_inject_emulated_page_fault) would 399 * have already synced the faulting address in the shadow EPT 400 * tables for the current EPTP12, we also need to sync it for 401 * any other cached EPTP02s based on the same EP4TA, since the 402 * TLB associates mappings to the EP4TA rather than the full EPTP. 403 */ 404 nested_ept_invalidate_addr(vcpu, vmcs12->ept_pointer, 405 fault->address); 406 } 407 408 nested_vmx_vmexit(vcpu, vm_exit_reason, 0, exit_qualification); 409 vmcs12->guest_physical_address = fault->address; 410 } 411 412 static void nested_ept_new_eptp(struct kvm_vcpu *vcpu) 413 { 414 struct vcpu_vmx *vmx = to_vmx(vcpu); 415 bool execonly = vmx->nested.msrs.ept_caps & VMX_EPT_EXECUTE_ONLY_BIT; 416 int ept_lpage_level = ept_caps_to_lpage_level(vmx->nested.msrs.ept_caps); 417 418 kvm_init_shadow_ept_mmu(vcpu, execonly, ept_lpage_level, 419 nested_ept_ad_enabled(vcpu), 420 nested_ept_get_eptp(vcpu)); 421 } 422 423 static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) 424 { 425 WARN_ON(mmu_is_nested(vcpu)); 426 427 vcpu->arch.mmu = &vcpu->arch.guest_mmu; 428 nested_ept_new_eptp(vcpu); 429 vcpu->arch.mmu->get_guest_pgd = nested_ept_get_eptp; 430 vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault; 431 vcpu->arch.mmu->get_pdptr = kvm_pdptr_read; 432 433 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; 434 } 435 436 static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu) 437 { 438 vcpu->arch.mmu = &vcpu->arch.root_mmu; 439 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; 440 } 441 442 static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12, 443 u16 error_code) 444 { 445 bool inequality, bit; 446 447 bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0; 448 inequality = 449 (error_code & vmcs12->page_fault_error_code_mask) != 450 vmcs12->page_fault_error_code_match; 451 return inequality ^ bit; 452 } 453 454 static bool nested_vmx_is_exception_vmexit(struct kvm_vcpu *vcpu, u8 vector, 455 u32 error_code) 456 { 457 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 458 459 /* 460 * Drop bits 31:16 of the error code when performing the #PF mask+match 461 * check. All VMCS fields involved are 32 bits, but Intel CPUs never 462 * set bits 31:16 and VMX disallows setting bits 31:16 in the injected 463 * error code. Including the to-be-dropped bits in the check might 464 * result in an "impossible" or missed exit from L1's perspective. 465 */ 466 if (vector == PF_VECTOR) 467 return nested_vmx_is_page_fault_vmexit(vmcs12, (u16)error_code); 468 469 return (vmcs12->exception_bitmap & (1u << vector)); 470 } 471 472 static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu, 473 struct vmcs12 *vmcs12) 474 { 475 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) 476 return 0; 477 478 if (CC(!page_address_valid(vcpu, vmcs12->io_bitmap_a)) || 479 CC(!page_address_valid(vcpu, vmcs12->io_bitmap_b))) 480 return -EINVAL; 481 482 return 0; 483 } 484 485 static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu, 486 struct vmcs12 *vmcs12) 487 { 488 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) 489 return 0; 490 491 if (CC(!page_address_valid(vcpu, vmcs12->msr_bitmap))) 492 return -EINVAL; 493 494 return 0; 495 } 496 497 static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu, 498 struct vmcs12 *vmcs12) 499 { 500 if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) 501 return 0; 502 503 if (CC(!page_address_valid(vcpu, vmcs12->virtual_apic_page_addr))) 504 return -EINVAL; 505 506 return 0; 507 } 508 509 /* 510 * For x2APIC MSRs, ignore the vmcs01 bitmap. L1 can enable x2APIC without L1 511 * itself utilizing x2APIC. All MSRs were previously set to be intercepted, 512 * only the "disable intercept" case needs to be handled. 513 */ 514 static void nested_vmx_disable_intercept_for_x2apic_msr(unsigned long *msr_bitmap_l1, 515 unsigned long *msr_bitmap_l0, 516 u32 msr, int type) 517 { 518 if (type & MSR_TYPE_R && !vmx_test_msr_bitmap_read(msr_bitmap_l1, msr)) 519 vmx_clear_msr_bitmap_read(msr_bitmap_l0, msr); 520 521 if (type & MSR_TYPE_W && !vmx_test_msr_bitmap_write(msr_bitmap_l1, msr)) 522 vmx_clear_msr_bitmap_write(msr_bitmap_l0, msr); 523 } 524 525 static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap) 526 { 527 int msr; 528 529 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { 530 unsigned word = msr / BITS_PER_LONG; 531 532 msr_bitmap[word] = ~0; 533 msr_bitmap[word + (0x800 / sizeof(long))] = ~0; 534 } 535 } 536 537 #define BUILD_NVMX_MSR_INTERCEPT_HELPER(rw) \ 538 static inline \ 539 void nested_vmx_set_msr_##rw##_intercept(struct vcpu_vmx *vmx, \ 540 unsigned long *msr_bitmap_l1, \ 541 unsigned long *msr_bitmap_l0, u32 msr) \ 542 { \ 543 if (vmx_test_msr_bitmap_##rw(vmx->vmcs01.msr_bitmap, msr) || \ 544 vmx_test_msr_bitmap_##rw(msr_bitmap_l1, msr)) \ 545 vmx_set_msr_bitmap_##rw(msr_bitmap_l0, msr); \ 546 else \ 547 vmx_clear_msr_bitmap_##rw(msr_bitmap_l0, msr); \ 548 } 549 BUILD_NVMX_MSR_INTERCEPT_HELPER(read) 550 BUILD_NVMX_MSR_INTERCEPT_HELPER(write) 551 552 static inline void nested_vmx_set_intercept_for_msr(struct vcpu_vmx *vmx, 553 unsigned long *msr_bitmap_l1, 554 unsigned long *msr_bitmap_l0, 555 u32 msr, int types) 556 { 557 if (types & MSR_TYPE_R) 558 nested_vmx_set_msr_read_intercept(vmx, msr_bitmap_l1, 559 msr_bitmap_l0, msr); 560 if (types & MSR_TYPE_W) 561 nested_vmx_set_msr_write_intercept(vmx, msr_bitmap_l1, 562 msr_bitmap_l0, msr); 563 } 564 565 /* 566 * Merge L0's and L1's MSR bitmap, return false to indicate that 567 * we do not use the hardware. 568 */ 569 static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu, 570 struct vmcs12 *vmcs12) 571 { 572 struct vcpu_vmx *vmx = to_vmx(vcpu); 573 int msr; 574 unsigned long *msr_bitmap_l1; 575 unsigned long *msr_bitmap_l0 = vmx->nested.vmcs02.msr_bitmap; 576 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; 577 struct kvm_host_map *map = &vmx->nested.msr_bitmap_map; 578 579 /* Nothing to do if the MSR bitmap is not in use. */ 580 if (!cpu_has_vmx_msr_bitmap() || 581 !nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) 582 return false; 583 584 /* 585 * MSR bitmap update can be skipped when: 586 * - MSR bitmap for L1 hasn't changed. 587 * - Nested hypervisor (L1) is attempting to launch the same L2 as 588 * before. 589 * - Nested hypervisor (L1) has enabled 'Enlightened MSR Bitmap' feature 590 * and tells KVM (L0) there were no changes in MSR bitmap for L2. 591 */ 592 if (!vmx->nested.force_msr_bitmap_recalc && evmcs && 593 evmcs->hv_enlightenments_control.msr_bitmap && 594 evmcs->hv_clean_fields & HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP) 595 return true; 596 597 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->msr_bitmap), map)) 598 return false; 599 600 msr_bitmap_l1 = (unsigned long *)map->hva; 601 602 /* 603 * To keep the control flow simple, pay eight 8-byte writes (sixteen 604 * 4-byte writes on 32-bit systems) up front to enable intercepts for 605 * the x2APIC MSR range and selectively toggle those relevant to L2. 606 */ 607 enable_x2apic_msr_intercepts(msr_bitmap_l0); 608 609 if (nested_cpu_has_virt_x2apic_mode(vmcs12)) { 610 if (nested_cpu_has_apic_reg_virt(vmcs12)) { 611 /* 612 * L0 need not intercept reads for MSRs between 0x800 613 * and 0x8ff, it just lets the processor take the value 614 * from the virtual-APIC page; take those 256 bits 615 * directly from the L1 bitmap. 616 */ 617 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { 618 unsigned word = msr / BITS_PER_LONG; 619 620 msr_bitmap_l0[word] = msr_bitmap_l1[word]; 621 } 622 } 623 624 nested_vmx_disable_intercept_for_x2apic_msr( 625 msr_bitmap_l1, msr_bitmap_l0, 626 X2APIC_MSR(APIC_TASKPRI), 627 MSR_TYPE_R | MSR_TYPE_W); 628 629 if (nested_cpu_has_vid(vmcs12)) { 630 nested_vmx_disable_intercept_for_x2apic_msr( 631 msr_bitmap_l1, msr_bitmap_l0, 632 X2APIC_MSR(APIC_EOI), 633 MSR_TYPE_W); 634 nested_vmx_disable_intercept_for_x2apic_msr( 635 msr_bitmap_l1, msr_bitmap_l0, 636 X2APIC_MSR(APIC_SELF_IPI), 637 MSR_TYPE_W); 638 } 639 } 640 641 /* 642 * Always check vmcs01's bitmap to honor userspace MSR filters and any 643 * other runtime changes to vmcs01's bitmap, e.g. dynamic pass-through. 644 */ 645 #ifdef CONFIG_X86_64 646 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0, 647 MSR_FS_BASE, MSR_TYPE_RW); 648 649 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0, 650 MSR_GS_BASE, MSR_TYPE_RW); 651 652 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0, 653 MSR_KERNEL_GS_BASE, MSR_TYPE_RW); 654 #endif 655 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0, 656 MSR_IA32_SPEC_CTRL, MSR_TYPE_RW); 657 658 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0, 659 MSR_IA32_PRED_CMD, MSR_TYPE_W); 660 661 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0, 662 MSR_IA32_FLUSH_CMD, MSR_TYPE_W); 663 664 kvm_vcpu_unmap(vcpu, &vmx->nested.msr_bitmap_map, false); 665 666 vmx->nested.force_msr_bitmap_recalc = false; 667 668 return true; 669 } 670 671 static void nested_cache_shadow_vmcs12(struct kvm_vcpu *vcpu, 672 struct vmcs12 *vmcs12) 673 { 674 struct vcpu_vmx *vmx = to_vmx(vcpu); 675 struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache; 676 677 if (!nested_cpu_has_shadow_vmcs(vmcs12) || 678 vmcs12->vmcs_link_pointer == INVALID_GPA) 679 return; 680 681 if (ghc->gpa != vmcs12->vmcs_link_pointer && 682 kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, 683 vmcs12->vmcs_link_pointer, VMCS12_SIZE)) 684 return; 685 686 kvm_read_guest_cached(vmx->vcpu.kvm, ghc, get_shadow_vmcs12(vcpu), 687 VMCS12_SIZE); 688 } 689 690 static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu *vcpu, 691 struct vmcs12 *vmcs12) 692 { 693 struct vcpu_vmx *vmx = to_vmx(vcpu); 694 struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache; 695 696 if (!nested_cpu_has_shadow_vmcs(vmcs12) || 697 vmcs12->vmcs_link_pointer == INVALID_GPA) 698 return; 699 700 if (ghc->gpa != vmcs12->vmcs_link_pointer && 701 kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, 702 vmcs12->vmcs_link_pointer, VMCS12_SIZE)) 703 return; 704 705 kvm_write_guest_cached(vmx->vcpu.kvm, ghc, get_shadow_vmcs12(vcpu), 706 VMCS12_SIZE); 707 } 708 709 /* 710 * In nested virtualization, check if L1 has set 711 * VM_EXIT_ACK_INTR_ON_EXIT 712 */ 713 static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu) 714 { 715 return get_vmcs12(vcpu)->vm_exit_controls & 716 VM_EXIT_ACK_INTR_ON_EXIT; 717 } 718 719 static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu, 720 struct vmcs12 *vmcs12) 721 { 722 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) && 723 CC(!page_address_valid(vcpu, vmcs12->apic_access_addr))) 724 return -EINVAL; 725 else 726 return 0; 727 } 728 729 static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, 730 struct vmcs12 *vmcs12) 731 { 732 if (!nested_cpu_has_virt_x2apic_mode(vmcs12) && 733 !nested_cpu_has_apic_reg_virt(vmcs12) && 734 !nested_cpu_has_vid(vmcs12) && 735 !nested_cpu_has_posted_intr(vmcs12)) 736 return 0; 737 738 /* 739 * If virtualize x2apic mode is enabled, 740 * virtualize apic access must be disabled. 741 */ 742 if (CC(nested_cpu_has_virt_x2apic_mode(vmcs12) && 743 nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))) 744 return -EINVAL; 745 746 /* 747 * If virtual interrupt delivery is enabled, 748 * we must exit on external interrupts. 749 */ 750 if (CC(nested_cpu_has_vid(vmcs12) && !nested_exit_on_intr(vcpu))) 751 return -EINVAL; 752 753 /* 754 * bits 15:8 should be zero in posted_intr_nv, 755 * the descriptor address has been already checked 756 * in nested_get_vmcs12_pages. 757 * 758 * bits 5:0 of posted_intr_desc_addr should be zero. 759 */ 760 if (nested_cpu_has_posted_intr(vmcs12) && 761 (CC(!nested_cpu_has_vid(vmcs12)) || 762 CC(!nested_exit_intr_ack_set(vcpu)) || 763 CC((vmcs12->posted_intr_nv & 0xff00)) || 764 CC(!kvm_vcpu_is_legal_aligned_gpa(vcpu, vmcs12->posted_intr_desc_addr, 64)))) 765 return -EINVAL; 766 767 /* tpr shadow is needed by all apicv features. */ 768 if (CC(!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))) 769 return -EINVAL; 770 771 return 0; 772 } 773 774 static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu, 775 u32 count, u64 addr) 776 { 777 if (count == 0) 778 return 0; 779 780 if (!kvm_vcpu_is_legal_aligned_gpa(vcpu, addr, 16) || 781 !kvm_vcpu_is_legal_gpa(vcpu, (addr + count * sizeof(struct vmx_msr_entry) - 1))) 782 return -EINVAL; 783 784 return 0; 785 } 786 787 static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu *vcpu, 788 struct vmcs12 *vmcs12) 789 { 790 if (CC(nested_vmx_check_msr_switch(vcpu, 791 vmcs12->vm_exit_msr_load_count, 792 vmcs12->vm_exit_msr_load_addr)) || 793 CC(nested_vmx_check_msr_switch(vcpu, 794 vmcs12->vm_exit_msr_store_count, 795 vmcs12->vm_exit_msr_store_addr))) 796 return -EINVAL; 797 798 return 0; 799 } 800 801 static int nested_vmx_check_entry_msr_switch_controls(struct kvm_vcpu *vcpu, 802 struct vmcs12 *vmcs12) 803 { 804 if (CC(nested_vmx_check_msr_switch(vcpu, 805 vmcs12->vm_entry_msr_load_count, 806 vmcs12->vm_entry_msr_load_addr))) 807 return -EINVAL; 808 809 return 0; 810 } 811 812 static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu, 813 struct vmcs12 *vmcs12) 814 { 815 if (!nested_cpu_has_pml(vmcs12)) 816 return 0; 817 818 if (CC(!nested_cpu_has_ept(vmcs12)) || 819 CC(!page_address_valid(vcpu, vmcs12->pml_address))) 820 return -EINVAL; 821 822 return 0; 823 } 824 825 static int nested_vmx_check_unrestricted_guest_controls(struct kvm_vcpu *vcpu, 826 struct vmcs12 *vmcs12) 827 { 828 if (CC(nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST) && 829 !nested_cpu_has_ept(vmcs12))) 830 return -EINVAL; 831 return 0; 832 } 833 834 static int nested_vmx_check_mode_based_ept_exec_controls(struct kvm_vcpu *vcpu, 835 struct vmcs12 *vmcs12) 836 { 837 if (CC(nested_cpu_has2(vmcs12, SECONDARY_EXEC_MODE_BASED_EPT_EXEC) && 838 !nested_cpu_has_ept(vmcs12))) 839 return -EINVAL; 840 return 0; 841 } 842 843 static int nested_vmx_check_shadow_vmcs_controls(struct kvm_vcpu *vcpu, 844 struct vmcs12 *vmcs12) 845 { 846 if (!nested_cpu_has_shadow_vmcs(vmcs12)) 847 return 0; 848 849 if (CC(!page_address_valid(vcpu, vmcs12->vmread_bitmap)) || 850 CC(!page_address_valid(vcpu, vmcs12->vmwrite_bitmap))) 851 return -EINVAL; 852 853 return 0; 854 } 855 856 static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu, 857 struct vmx_msr_entry *e) 858 { 859 /* x2APIC MSR accesses are not allowed */ 860 if (CC(vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8)) 861 return -EINVAL; 862 if (CC(e->index == MSR_IA32_UCODE_WRITE) || /* SDM Table 35-2 */ 863 CC(e->index == MSR_IA32_UCODE_REV)) 864 return -EINVAL; 865 if (CC(e->reserved != 0)) 866 return -EINVAL; 867 return 0; 868 } 869 870 static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu, 871 struct vmx_msr_entry *e) 872 { 873 if (CC(e->index == MSR_FS_BASE) || 874 CC(e->index == MSR_GS_BASE) || 875 CC(e->index == MSR_IA32_SMM_MONITOR_CTL) || /* SMM is not supported */ 876 nested_vmx_msr_check_common(vcpu, e)) 877 return -EINVAL; 878 return 0; 879 } 880 881 static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu, 882 struct vmx_msr_entry *e) 883 { 884 if (CC(e->index == MSR_IA32_SMBASE) || /* SMM is not supported */ 885 nested_vmx_msr_check_common(vcpu, e)) 886 return -EINVAL; 887 return 0; 888 } 889 890 static u32 nested_vmx_max_atomic_switch_msrs(struct kvm_vcpu *vcpu) 891 { 892 struct vcpu_vmx *vmx = to_vmx(vcpu); 893 u64 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low, 894 vmx->nested.msrs.misc_high); 895 896 return (vmx_misc_max_msr(vmx_misc) + 1) * VMX_MISC_MSR_LIST_MULTIPLIER; 897 } 898 899 /* 900 * Load guest's/host's msr at nested entry/exit. 901 * return 0 for success, entry index for failure. 902 * 903 * One of the failure modes for MSR load/store is when a list exceeds the 904 * virtual hardware's capacity. To maintain compatibility with hardware inasmuch 905 * as possible, process all valid entries before failing rather than precheck 906 * for a capacity violation. 907 */ 908 static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) 909 { 910 u32 i; 911 struct vmx_msr_entry e; 912 u32 max_msr_list_size = nested_vmx_max_atomic_switch_msrs(vcpu); 913 914 for (i = 0; i < count; i++) { 915 if (unlikely(i >= max_msr_list_size)) 916 goto fail; 917 918 if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e), 919 &e, sizeof(e))) { 920 pr_debug_ratelimited( 921 "%s cannot read MSR entry (%u, 0x%08llx)\n", 922 __func__, i, gpa + i * sizeof(e)); 923 goto fail; 924 } 925 if (nested_vmx_load_msr_check(vcpu, &e)) { 926 pr_debug_ratelimited( 927 "%s check failed (%u, 0x%x, 0x%x)\n", 928 __func__, i, e.index, e.reserved); 929 goto fail; 930 } 931 if (kvm_set_msr(vcpu, e.index, e.value)) { 932 pr_debug_ratelimited( 933 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n", 934 __func__, i, e.index, e.value); 935 goto fail; 936 } 937 } 938 return 0; 939 fail: 940 /* Note, max_msr_list_size is at most 4096, i.e. this can't wrap. */ 941 return i + 1; 942 } 943 944 static bool nested_vmx_get_vmexit_msr_value(struct kvm_vcpu *vcpu, 945 u32 msr_index, 946 u64 *data) 947 { 948 struct vcpu_vmx *vmx = to_vmx(vcpu); 949 950 /* 951 * If the L0 hypervisor stored a more accurate value for the TSC that 952 * does not include the time taken for emulation of the L2->L1 953 * VM-exit in L0, use the more accurate value. 954 */ 955 if (msr_index == MSR_IA32_TSC) { 956 int i = vmx_find_loadstore_msr_slot(&vmx->msr_autostore.guest, 957 MSR_IA32_TSC); 958 959 if (i >= 0) { 960 u64 val = vmx->msr_autostore.guest.val[i].value; 961 962 *data = kvm_read_l1_tsc(vcpu, val); 963 return true; 964 } 965 } 966 967 if (kvm_get_msr(vcpu, msr_index, data)) { 968 pr_debug_ratelimited("%s cannot read MSR (0x%x)\n", __func__, 969 msr_index); 970 return false; 971 } 972 return true; 973 } 974 975 static bool read_and_check_msr_entry(struct kvm_vcpu *vcpu, u64 gpa, int i, 976 struct vmx_msr_entry *e) 977 { 978 if (kvm_vcpu_read_guest(vcpu, 979 gpa + i * sizeof(*e), 980 e, 2 * sizeof(u32))) { 981 pr_debug_ratelimited( 982 "%s cannot read MSR entry (%u, 0x%08llx)\n", 983 __func__, i, gpa + i * sizeof(*e)); 984 return false; 985 } 986 if (nested_vmx_store_msr_check(vcpu, e)) { 987 pr_debug_ratelimited( 988 "%s check failed (%u, 0x%x, 0x%x)\n", 989 __func__, i, e->index, e->reserved); 990 return false; 991 } 992 return true; 993 } 994 995 static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) 996 { 997 u64 data; 998 u32 i; 999 struct vmx_msr_entry e; 1000 u32 max_msr_list_size = nested_vmx_max_atomic_switch_msrs(vcpu); 1001 1002 for (i = 0; i < count; i++) { 1003 if (unlikely(i >= max_msr_list_size)) 1004 return -EINVAL; 1005 1006 if (!read_and_check_msr_entry(vcpu, gpa, i, &e)) 1007 return -EINVAL; 1008 1009 if (!nested_vmx_get_vmexit_msr_value(vcpu, e.index, &data)) 1010 return -EINVAL; 1011 1012 if (kvm_vcpu_write_guest(vcpu, 1013 gpa + i * sizeof(e) + 1014 offsetof(struct vmx_msr_entry, value), 1015 &data, sizeof(data))) { 1016 pr_debug_ratelimited( 1017 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n", 1018 __func__, i, e.index, data); 1019 return -EINVAL; 1020 } 1021 } 1022 return 0; 1023 } 1024 1025 static bool nested_msr_store_list_has_msr(struct kvm_vcpu *vcpu, u32 msr_index) 1026 { 1027 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 1028 u32 count = vmcs12->vm_exit_msr_store_count; 1029 u64 gpa = vmcs12->vm_exit_msr_store_addr; 1030 struct vmx_msr_entry e; 1031 u32 i; 1032 1033 for (i = 0; i < count; i++) { 1034 if (!read_and_check_msr_entry(vcpu, gpa, i, &e)) 1035 return false; 1036 1037 if (e.index == msr_index) 1038 return true; 1039 } 1040 return false; 1041 } 1042 1043 static void prepare_vmx_msr_autostore_list(struct kvm_vcpu *vcpu, 1044 u32 msr_index) 1045 { 1046 struct vcpu_vmx *vmx = to_vmx(vcpu); 1047 struct vmx_msrs *autostore = &vmx->msr_autostore.guest; 1048 bool in_vmcs12_store_list; 1049 int msr_autostore_slot; 1050 bool in_autostore_list; 1051 int last; 1052 1053 msr_autostore_slot = vmx_find_loadstore_msr_slot(autostore, msr_index); 1054 in_autostore_list = msr_autostore_slot >= 0; 1055 in_vmcs12_store_list = nested_msr_store_list_has_msr(vcpu, msr_index); 1056 1057 if (in_vmcs12_store_list && !in_autostore_list) { 1058 if (autostore->nr == MAX_NR_LOADSTORE_MSRS) { 1059 /* 1060 * Emulated VMEntry does not fail here. Instead a less 1061 * accurate value will be returned by 1062 * nested_vmx_get_vmexit_msr_value() using kvm_get_msr() 1063 * instead of reading the value from the vmcs02 VMExit 1064 * MSR-store area. 1065 */ 1066 pr_warn_ratelimited( 1067 "Not enough msr entries in msr_autostore. Can't add msr %x\n", 1068 msr_index); 1069 return; 1070 } 1071 last = autostore->nr++; 1072 autostore->val[last].index = msr_index; 1073 } else if (!in_vmcs12_store_list && in_autostore_list) { 1074 last = --autostore->nr; 1075 autostore->val[msr_autostore_slot] = autostore->val[last]; 1076 } 1077 } 1078 1079 /* 1080 * Load guest's/host's cr3 at nested entry/exit. @nested_ept is true if we are 1081 * emulating VM-Entry into a guest with EPT enabled. On failure, the expected 1082 * Exit Qualification (for a VM-Entry consistency check VM-Exit) is assigned to 1083 * @entry_failure_code. 1084 */ 1085 static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, 1086 bool nested_ept, bool reload_pdptrs, 1087 enum vm_entry_failure_code *entry_failure_code) 1088 { 1089 if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3))) { 1090 *entry_failure_code = ENTRY_FAIL_DEFAULT; 1091 return -EINVAL; 1092 } 1093 1094 /* 1095 * If PAE paging and EPT are both on, CR3 is not used by the CPU and 1096 * must not be dereferenced. 1097 */ 1098 if (reload_pdptrs && !nested_ept && is_pae_paging(vcpu) && 1099 CC(!load_pdptrs(vcpu, cr3))) { 1100 *entry_failure_code = ENTRY_FAIL_PDPTE; 1101 return -EINVAL; 1102 } 1103 1104 vcpu->arch.cr3 = cr3; 1105 kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); 1106 1107 /* Re-initialize the MMU, e.g. to pick up CR4 MMU role changes. */ 1108 kvm_init_mmu(vcpu); 1109 1110 if (!nested_ept) 1111 kvm_mmu_new_pgd(vcpu, cr3); 1112 1113 return 0; 1114 } 1115 1116 /* 1117 * Returns if KVM is able to config CPU to tag TLB entries 1118 * populated by L2 differently than TLB entries populated 1119 * by L1. 1120 * 1121 * If L0 uses EPT, L1 and L2 run with different EPTP because 1122 * guest_mode is part of kvm_mmu_page_role. Thus, TLB entries 1123 * are tagged with different EPTP. 1124 * 1125 * If L1 uses VPID and we allocated a vpid02, TLB entries are tagged 1126 * with different VPID (L1 entries are tagged with vmx->vpid 1127 * while L2 entries are tagged with vmx->nested.vpid02). 1128 */ 1129 static bool nested_has_guest_tlb_tag(struct kvm_vcpu *vcpu) 1130 { 1131 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 1132 1133 return enable_ept || 1134 (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02); 1135 } 1136 1137 static void nested_vmx_transition_tlb_flush(struct kvm_vcpu *vcpu, 1138 struct vmcs12 *vmcs12, 1139 bool is_vmenter) 1140 { 1141 struct vcpu_vmx *vmx = to_vmx(vcpu); 1142 1143 /* 1144 * KVM_REQ_HV_TLB_FLUSH flushes entries from either L1's VP_ID or 1145 * L2's VP_ID upon request from the guest. Make sure we check for 1146 * pending entries in the right FIFO upon L1/L2 transition as these 1147 * requests are put by other vCPUs asynchronously. 1148 */ 1149 if (to_hv_vcpu(vcpu) && enable_ept) 1150 kvm_make_request(KVM_REQ_HV_TLB_FLUSH, vcpu); 1151 1152 /* 1153 * If vmcs12 doesn't use VPID, L1 expects linear and combined mappings 1154 * for *all* contexts to be flushed on VM-Enter/VM-Exit, i.e. it's a 1155 * full TLB flush from the guest's perspective. This is required even 1156 * if VPID is disabled in the host as KVM may need to synchronize the 1157 * MMU in response to the guest TLB flush. 1158 * 1159 * Note, using TLB_FLUSH_GUEST is correct even if nested EPT is in use. 1160 * EPT is a special snowflake, as guest-physical mappings aren't 1161 * flushed on VPID invalidations, including VM-Enter or VM-Exit with 1162 * VPID disabled. As a result, KVM _never_ needs to sync nEPT 1163 * entries on VM-Enter because L1 can't rely on VM-Enter to flush 1164 * those mappings. 1165 */ 1166 if (!nested_cpu_has_vpid(vmcs12)) { 1167 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 1168 return; 1169 } 1170 1171 /* L2 should never have a VPID if VPID is disabled. */ 1172 WARN_ON(!enable_vpid); 1173 1174 /* 1175 * VPID is enabled and in use by vmcs12. If vpid12 is changing, then 1176 * emulate a guest TLB flush as KVM does not track vpid12 history nor 1177 * is the VPID incorporated into the MMU context. I.e. KVM must assume 1178 * that the new vpid12 has never been used and thus represents a new 1179 * guest ASID that cannot have entries in the TLB. 1180 */ 1181 if (is_vmenter && vmcs12->virtual_processor_id != vmx->nested.last_vpid) { 1182 vmx->nested.last_vpid = vmcs12->virtual_processor_id; 1183 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 1184 return; 1185 } 1186 1187 /* 1188 * If VPID is enabled, used by vmc12, and vpid12 is not changing but 1189 * does not have a unique TLB tag (ASID), i.e. EPT is disabled and 1190 * KVM was unable to allocate a VPID for L2, flush the current context 1191 * as the effective ASID is common to both L1 and L2. 1192 */ 1193 if (!nested_has_guest_tlb_tag(vcpu)) 1194 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); 1195 } 1196 1197 static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask) 1198 { 1199 superset &= mask; 1200 subset &= mask; 1201 1202 return (superset | subset) == superset; 1203 } 1204 1205 static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data) 1206 { 1207 const u64 feature_and_reserved = 1208 /* feature (except bit 48; see below) */ 1209 BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) | 1210 /* reserved */ 1211 BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56); 1212 u64 vmx_basic = vmcs_config.nested.basic; 1213 1214 if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved)) 1215 return -EINVAL; 1216 1217 /* 1218 * KVM does not emulate a version of VMX that constrains physical 1219 * addresses of VMX structures (e.g. VMCS) to 32-bits. 1220 */ 1221 if (data & BIT_ULL(48)) 1222 return -EINVAL; 1223 1224 if (vmx_basic_vmcs_revision_id(vmx_basic) != 1225 vmx_basic_vmcs_revision_id(data)) 1226 return -EINVAL; 1227 1228 if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data)) 1229 return -EINVAL; 1230 1231 vmx->nested.msrs.basic = data; 1232 return 0; 1233 } 1234 1235 static void vmx_get_control_msr(struct nested_vmx_msrs *msrs, u32 msr_index, 1236 u32 **low, u32 **high) 1237 { 1238 switch (msr_index) { 1239 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: 1240 *low = &msrs->pinbased_ctls_low; 1241 *high = &msrs->pinbased_ctls_high; 1242 break; 1243 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: 1244 *low = &msrs->procbased_ctls_low; 1245 *high = &msrs->procbased_ctls_high; 1246 break; 1247 case MSR_IA32_VMX_TRUE_EXIT_CTLS: 1248 *low = &msrs->exit_ctls_low; 1249 *high = &msrs->exit_ctls_high; 1250 break; 1251 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: 1252 *low = &msrs->entry_ctls_low; 1253 *high = &msrs->entry_ctls_high; 1254 break; 1255 case MSR_IA32_VMX_PROCBASED_CTLS2: 1256 *low = &msrs->secondary_ctls_low; 1257 *high = &msrs->secondary_ctls_high; 1258 break; 1259 default: 1260 BUG(); 1261 } 1262 } 1263 1264 static int 1265 vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) 1266 { 1267 u32 *lowp, *highp; 1268 u64 supported; 1269 1270 vmx_get_control_msr(&vmcs_config.nested, msr_index, &lowp, &highp); 1271 1272 supported = vmx_control_msr(*lowp, *highp); 1273 1274 /* Check must-be-1 bits are still 1. */ 1275 if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0))) 1276 return -EINVAL; 1277 1278 /* Check must-be-0 bits are still 0. */ 1279 if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32))) 1280 return -EINVAL; 1281 1282 vmx_get_control_msr(&vmx->nested.msrs, msr_index, &lowp, &highp); 1283 *lowp = data; 1284 *highp = data >> 32; 1285 return 0; 1286 } 1287 1288 static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data) 1289 { 1290 const u64 feature_and_reserved_bits = 1291 /* feature */ 1292 BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) | 1293 BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) | 1294 /* reserved */ 1295 GENMASK_ULL(13, 9) | BIT_ULL(31); 1296 u64 vmx_misc = vmx_control_msr(vmcs_config.nested.misc_low, 1297 vmcs_config.nested.misc_high); 1298 1299 if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits)) 1300 return -EINVAL; 1301 1302 if ((vmx->nested.msrs.pinbased_ctls_high & 1303 PIN_BASED_VMX_PREEMPTION_TIMER) && 1304 vmx_misc_preemption_timer_rate(data) != 1305 vmx_misc_preemption_timer_rate(vmx_misc)) 1306 return -EINVAL; 1307 1308 if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc)) 1309 return -EINVAL; 1310 1311 if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc)) 1312 return -EINVAL; 1313 1314 if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc)) 1315 return -EINVAL; 1316 1317 vmx->nested.msrs.misc_low = data; 1318 vmx->nested.msrs.misc_high = data >> 32; 1319 1320 return 0; 1321 } 1322 1323 static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data) 1324 { 1325 u64 vmx_ept_vpid_cap = vmx_control_msr(vmcs_config.nested.ept_caps, 1326 vmcs_config.nested.vpid_caps); 1327 1328 /* Every bit is either reserved or a feature bit. */ 1329 if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL)) 1330 return -EINVAL; 1331 1332 vmx->nested.msrs.ept_caps = data; 1333 vmx->nested.msrs.vpid_caps = data >> 32; 1334 return 0; 1335 } 1336 1337 static u64 *vmx_get_fixed0_msr(struct nested_vmx_msrs *msrs, u32 msr_index) 1338 { 1339 switch (msr_index) { 1340 case MSR_IA32_VMX_CR0_FIXED0: 1341 return &msrs->cr0_fixed0; 1342 case MSR_IA32_VMX_CR4_FIXED0: 1343 return &msrs->cr4_fixed0; 1344 default: 1345 BUG(); 1346 } 1347 } 1348 1349 static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) 1350 { 1351 const u64 *msr = vmx_get_fixed0_msr(&vmcs_config.nested, msr_index); 1352 1353 /* 1354 * 1 bits (which indicates bits which "must-be-1" during VMX operation) 1355 * must be 1 in the restored value. 1356 */ 1357 if (!is_bitwise_subset(data, *msr, -1ULL)) 1358 return -EINVAL; 1359 1360 *vmx_get_fixed0_msr(&vmx->nested.msrs, msr_index) = data; 1361 return 0; 1362 } 1363 1364 /* 1365 * Called when userspace is restoring VMX MSRs. 1366 * 1367 * Returns 0 on success, non-0 otherwise. 1368 */ 1369 int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) 1370 { 1371 struct vcpu_vmx *vmx = to_vmx(vcpu); 1372 1373 /* 1374 * Don't allow changes to the VMX capability MSRs while the vCPU 1375 * is in VMX operation. 1376 */ 1377 if (vmx->nested.vmxon) 1378 return -EBUSY; 1379 1380 switch (msr_index) { 1381 case MSR_IA32_VMX_BASIC: 1382 return vmx_restore_vmx_basic(vmx, data); 1383 case MSR_IA32_VMX_PINBASED_CTLS: 1384 case MSR_IA32_VMX_PROCBASED_CTLS: 1385 case MSR_IA32_VMX_EXIT_CTLS: 1386 case MSR_IA32_VMX_ENTRY_CTLS: 1387 /* 1388 * The "non-true" VMX capability MSRs are generated from the 1389 * "true" MSRs, so we do not support restoring them directly. 1390 * 1391 * If userspace wants to emulate VMX_BASIC[55]=0, userspace 1392 * should restore the "true" MSRs with the must-be-1 bits 1393 * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND 1394 * DEFAULT SETTINGS". 1395 */ 1396 return -EINVAL; 1397 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: 1398 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: 1399 case MSR_IA32_VMX_TRUE_EXIT_CTLS: 1400 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: 1401 case MSR_IA32_VMX_PROCBASED_CTLS2: 1402 return vmx_restore_control_msr(vmx, msr_index, data); 1403 case MSR_IA32_VMX_MISC: 1404 return vmx_restore_vmx_misc(vmx, data); 1405 case MSR_IA32_VMX_CR0_FIXED0: 1406 case MSR_IA32_VMX_CR4_FIXED0: 1407 return vmx_restore_fixed0_msr(vmx, msr_index, data); 1408 case MSR_IA32_VMX_CR0_FIXED1: 1409 case MSR_IA32_VMX_CR4_FIXED1: 1410 /* 1411 * These MSRs are generated based on the vCPU's CPUID, so we 1412 * do not support restoring them directly. 1413 */ 1414 return -EINVAL; 1415 case MSR_IA32_VMX_EPT_VPID_CAP: 1416 return vmx_restore_vmx_ept_vpid_cap(vmx, data); 1417 case MSR_IA32_VMX_VMCS_ENUM: 1418 vmx->nested.msrs.vmcs_enum = data; 1419 return 0; 1420 case MSR_IA32_VMX_VMFUNC: 1421 if (data & ~vmcs_config.nested.vmfunc_controls) 1422 return -EINVAL; 1423 vmx->nested.msrs.vmfunc_controls = data; 1424 return 0; 1425 default: 1426 /* 1427 * The rest of the VMX capability MSRs do not support restore. 1428 */ 1429 return -EINVAL; 1430 } 1431 } 1432 1433 /* Returns 0 on success, non-0 otherwise. */ 1434 int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata) 1435 { 1436 switch (msr_index) { 1437 case MSR_IA32_VMX_BASIC: 1438 *pdata = msrs->basic; 1439 break; 1440 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: 1441 case MSR_IA32_VMX_PINBASED_CTLS: 1442 *pdata = vmx_control_msr( 1443 msrs->pinbased_ctls_low, 1444 msrs->pinbased_ctls_high); 1445 if (msr_index == MSR_IA32_VMX_PINBASED_CTLS) 1446 *pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR; 1447 break; 1448 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: 1449 case MSR_IA32_VMX_PROCBASED_CTLS: 1450 *pdata = vmx_control_msr( 1451 msrs->procbased_ctls_low, 1452 msrs->procbased_ctls_high); 1453 if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS) 1454 *pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR; 1455 break; 1456 case MSR_IA32_VMX_TRUE_EXIT_CTLS: 1457 case MSR_IA32_VMX_EXIT_CTLS: 1458 *pdata = vmx_control_msr( 1459 msrs->exit_ctls_low, 1460 msrs->exit_ctls_high); 1461 if (msr_index == MSR_IA32_VMX_EXIT_CTLS) 1462 *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR; 1463 break; 1464 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: 1465 case MSR_IA32_VMX_ENTRY_CTLS: 1466 *pdata = vmx_control_msr( 1467 msrs->entry_ctls_low, 1468 msrs->entry_ctls_high); 1469 if (msr_index == MSR_IA32_VMX_ENTRY_CTLS) 1470 *pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR; 1471 break; 1472 case MSR_IA32_VMX_MISC: 1473 *pdata = vmx_control_msr( 1474 msrs->misc_low, 1475 msrs->misc_high); 1476 break; 1477 case MSR_IA32_VMX_CR0_FIXED0: 1478 *pdata = msrs->cr0_fixed0; 1479 break; 1480 case MSR_IA32_VMX_CR0_FIXED1: 1481 *pdata = msrs->cr0_fixed1; 1482 break; 1483 case MSR_IA32_VMX_CR4_FIXED0: 1484 *pdata = msrs->cr4_fixed0; 1485 break; 1486 case MSR_IA32_VMX_CR4_FIXED1: 1487 *pdata = msrs->cr4_fixed1; 1488 break; 1489 case MSR_IA32_VMX_VMCS_ENUM: 1490 *pdata = msrs->vmcs_enum; 1491 break; 1492 case MSR_IA32_VMX_PROCBASED_CTLS2: 1493 *pdata = vmx_control_msr( 1494 msrs->secondary_ctls_low, 1495 msrs->secondary_ctls_high); 1496 break; 1497 case MSR_IA32_VMX_EPT_VPID_CAP: 1498 *pdata = msrs->ept_caps | 1499 ((u64)msrs->vpid_caps << 32); 1500 break; 1501 case MSR_IA32_VMX_VMFUNC: 1502 *pdata = msrs->vmfunc_controls; 1503 break; 1504 default: 1505 return 1; 1506 } 1507 1508 return 0; 1509 } 1510 1511 /* 1512 * Copy the writable VMCS shadow fields back to the VMCS12, in case they have 1513 * been modified by the L1 guest. Note, "writable" in this context means 1514 * "writable by the guest", i.e. tagged SHADOW_FIELD_RW; the set of 1515 * fields tagged SHADOW_FIELD_RO may or may not align with the "read-only" 1516 * VM-exit information fields (which are actually writable if the vCPU is 1517 * configured to support "VMWRITE to any supported field in the VMCS"). 1518 */ 1519 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx) 1520 { 1521 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; 1522 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu); 1523 struct shadow_vmcs_field field; 1524 unsigned long val; 1525 int i; 1526 1527 if (WARN_ON(!shadow_vmcs)) 1528 return; 1529 1530 preempt_disable(); 1531 1532 vmcs_load(shadow_vmcs); 1533 1534 for (i = 0; i < max_shadow_read_write_fields; i++) { 1535 field = shadow_read_write_fields[i]; 1536 val = __vmcs_readl(field.encoding); 1537 vmcs12_write_any(vmcs12, field.encoding, field.offset, val); 1538 } 1539 1540 vmcs_clear(shadow_vmcs); 1541 vmcs_load(vmx->loaded_vmcs->vmcs); 1542 1543 preempt_enable(); 1544 } 1545 1546 static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx) 1547 { 1548 const struct shadow_vmcs_field *fields[] = { 1549 shadow_read_write_fields, 1550 shadow_read_only_fields 1551 }; 1552 const int max_fields[] = { 1553 max_shadow_read_write_fields, 1554 max_shadow_read_only_fields 1555 }; 1556 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; 1557 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu); 1558 struct shadow_vmcs_field field; 1559 unsigned long val; 1560 int i, q; 1561 1562 if (WARN_ON(!shadow_vmcs)) 1563 return; 1564 1565 vmcs_load(shadow_vmcs); 1566 1567 for (q = 0; q < ARRAY_SIZE(fields); q++) { 1568 for (i = 0; i < max_fields[q]; i++) { 1569 field = fields[q][i]; 1570 val = vmcs12_read_any(vmcs12, field.encoding, 1571 field.offset); 1572 __vmcs_writel(field.encoding, val); 1573 } 1574 } 1575 1576 vmcs_clear(shadow_vmcs); 1577 vmcs_load(vmx->loaded_vmcs->vmcs); 1578 } 1579 1580 static void copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx, u32 hv_clean_fields) 1581 { 1582 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; 1583 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; 1584 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(&vmx->vcpu); 1585 1586 /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */ 1587 vmcs12->tpr_threshold = evmcs->tpr_threshold; 1588 vmcs12->guest_rip = evmcs->guest_rip; 1589 1590 if (unlikely(!(hv_clean_fields & 1591 HV_VMX_ENLIGHTENED_CLEAN_FIELD_ENLIGHTENMENTSCONTROL))) { 1592 hv_vcpu->nested.pa_page_gpa = evmcs->partition_assist_page; 1593 hv_vcpu->nested.vm_id = evmcs->hv_vm_id; 1594 hv_vcpu->nested.vp_id = evmcs->hv_vp_id; 1595 } 1596 1597 if (unlikely(!(hv_clean_fields & 1598 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC))) { 1599 vmcs12->guest_rsp = evmcs->guest_rsp; 1600 vmcs12->guest_rflags = evmcs->guest_rflags; 1601 vmcs12->guest_interruptibility_info = 1602 evmcs->guest_interruptibility_info; 1603 /* 1604 * Not present in struct vmcs12: 1605 * vmcs12->guest_ssp = evmcs->guest_ssp; 1606 */ 1607 } 1608 1609 if (unlikely(!(hv_clean_fields & 1610 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) { 1611 vmcs12->cpu_based_vm_exec_control = 1612 evmcs->cpu_based_vm_exec_control; 1613 } 1614 1615 if (unlikely(!(hv_clean_fields & 1616 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EXCPN))) { 1617 vmcs12->exception_bitmap = evmcs->exception_bitmap; 1618 } 1619 1620 if (unlikely(!(hv_clean_fields & 1621 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY))) { 1622 vmcs12->vm_entry_controls = evmcs->vm_entry_controls; 1623 } 1624 1625 if (unlikely(!(hv_clean_fields & 1626 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT))) { 1627 vmcs12->vm_entry_intr_info_field = 1628 evmcs->vm_entry_intr_info_field; 1629 vmcs12->vm_entry_exception_error_code = 1630 evmcs->vm_entry_exception_error_code; 1631 vmcs12->vm_entry_instruction_len = 1632 evmcs->vm_entry_instruction_len; 1633 } 1634 1635 if (unlikely(!(hv_clean_fields & 1636 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) { 1637 vmcs12->host_ia32_pat = evmcs->host_ia32_pat; 1638 vmcs12->host_ia32_efer = evmcs->host_ia32_efer; 1639 vmcs12->host_cr0 = evmcs->host_cr0; 1640 vmcs12->host_cr3 = evmcs->host_cr3; 1641 vmcs12->host_cr4 = evmcs->host_cr4; 1642 vmcs12->host_ia32_sysenter_esp = evmcs->host_ia32_sysenter_esp; 1643 vmcs12->host_ia32_sysenter_eip = evmcs->host_ia32_sysenter_eip; 1644 vmcs12->host_rip = evmcs->host_rip; 1645 vmcs12->host_ia32_sysenter_cs = evmcs->host_ia32_sysenter_cs; 1646 vmcs12->host_es_selector = evmcs->host_es_selector; 1647 vmcs12->host_cs_selector = evmcs->host_cs_selector; 1648 vmcs12->host_ss_selector = evmcs->host_ss_selector; 1649 vmcs12->host_ds_selector = evmcs->host_ds_selector; 1650 vmcs12->host_fs_selector = evmcs->host_fs_selector; 1651 vmcs12->host_gs_selector = evmcs->host_gs_selector; 1652 vmcs12->host_tr_selector = evmcs->host_tr_selector; 1653 vmcs12->host_ia32_perf_global_ctrl = evmcs->host_ia32_perf_global_ctrl; 1654 /* 1655 * Not present in struct vmcs12: 1656 * vmcs12->host_ia32_s_cet = evmcs->host_ia32_s_cet; 1657 * vmcs12->host_ssp = evmcs->host_ssp; 1658 * vmcs12->host_ia32_int_ssp_table_addr = evmcs->host_ia32_int_ssp_table_addr; 1659 */ 1660 } 1661 1662 if (unlikely(!(hv_clean_fields & 1663 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP1))) { 1664 vmcs12->pin_based_vm_exec_control = 1665 evmcs->pin_based_vm_exec_control; 1666 vmcs12->vm_exit_controls = evmcs->vm_exit_controls; 1667 vmcs12->secondary_vm_exec_control = 1668 evmcs->secondary_vm_exec_control; 1669 } 1670 1671 if (unlikely(!(hv_clean_fields & 1672 HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP))) { 1673 vmcs12->io_bitmap_a = evmcs->io_bitmap_a; 1674 vmcs12->io_bitmap_b = evmcs->io_bitmap_b; 1675 } 1676 1677 if (unlikely(!(hv_clean_fields & 1678 HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP))) { 1679 vmcs12->msr_bitmap = evmcs->msr_bitmap; 1680 } 1681 1682 if (unlikely(!(hv_clean_fields & 1683 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2))) { 1684 vmcs12->guest_es_base = evmcs->guest_es_base; 1685 vmcs12->guest_cs_base = evmcs->guest_cs_base; 1686 vmcs12->guest_ss_base = evmcs->guest_ss_base; 1687 vmcs12->guest_ds_base = evmcs->guest_ds_base; 1688 vmcs12->guest_fs_base = evmcs->guest_fs_base; 1689 vmcs12->guest_gs_base = evmcs->guest_gs_base; 1690 vmcs12->guest_ldtr_base = evmcs->guest_ldtr_base; 1691 vmcs12->guest_tr_base = evmcs->guest_tr_base; 1692 vmcs12->guest_gdtr_base = evmcs->guest_gdtr_base; 1693 vmcs12->guest_idtr_base = evmcs->guest_idtr_base; 1694 vmcs12->guest_es_limit = evmcs->guest_es_limit; 1695 vmcs12->guest_cs_limit = evmcs->guest_cs_limit; 1696 vmcs12->guest_ss_limit = evmcs->guest_ss_limit; 1697 vmcs12->guest_ds_limit = evmcs->guest_ds_limit; 1698 vmcs12->guest_fs_limit = evmcs->guest_fs_limit; 1699 vmcs12->guest_gs_limit = evmcs->guest_gs_limit; 1700 vmcs12->guest_ldtr_limit = evmcs->guest_ldtr_limit; 1701 vmcs12->guest_tr_limit = evmcs->guest_tr_limit; 1702 vmcs12->guest_gdtr_limit = evmcs->guest_gdtr_limit; 1703 vmcs12->guest_idtr_limit = evmcs->guest_idtr_limit; 1704 vmcs12->guest_es_ar_bytes = evmcs->guest_es_ar_bytes; 1705 vmcs12->guest_cs_ar_bytes = evmcs->guest_cs_ar_bytes; 1706 vmcs12->guest_ss_ar_bytes = evmcs->guest_ss_ar_bytes; 1707 vmcs12->guest_ds_ar_bytes = evmcs->guest_ds_ar_bytes; 1708 vmcs12->guest_fs_ar_bytes = evmcs->guest_fs_ar_bytes; 1709 vmcs12->guest_gs_ar_bytes = evmcs->guest_gs_ar_bytes; 1710 vmcs12->guest_ldtr_ar_bytes = evmcs->guest_ldtr_ar_bytes; 1711 vmcs12->guest_tr_ar_bytes = evmcs->guest_tr_ar_bytes; 1712 vmcs12->guest_es_selector = evmcs->guest_es_selector; 1713 vmcs12->guest_cs_selector = evmcs->guest_cs_selector; 1714 vmcs12->guest_ss_selector = evmcs->guest_ss_selector; 1715 vmcs12->guest_ds_selector = evmcs->guest_ds_selector; 1716 vmcs12->guest_fs_selector = evmcs->guest_fs_selector; 1717 vmcs12->guest_gs_selector = evmcs->guest_gs_selector; 1718 vmcs12->guest_ldtr_selector = evmcs->guest_ldtr_selector; 1719 vmcs12->guest_tr_selector = evmcs->guest_tr_selector; 1720 } 1721 1722 if (unlikely(!(hv_clean_fields & 1723 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2))) { 1724 vmcs12->tsc_offset = evmcs->tsc_offset; 1725 vmcs12->virtual_apic_page_addr = evmcs->virtual_apic_page_addr; 1726 vmcs12->xss_exit_bitmap = evmcs->xss_exit_bitmap; 1727 vmcs12->encls_exiting_bitmap = evmcs->encls_exiting_bitmap; 1728 vmcs12->tsc_multiplier = evmcs->tsc_multiplier; 1729 } 1730 1731 if (unlikely(!(hv_clean_fields & 1732 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR))) { 1733 vmcs12->cr0_guest_host_mask = evmcs->cr0_guest_host_mask; 1734 vmcs12->cr4_guest_host_mask = evmcs->cr4_guest_host_mask; 1735 vmcs12->cr0_read_shadow = evmcs->cr0_read_shadow; 1736 vmcs12->cr4_read_shadow = evmcs->cr4_read_shadow; 1737 vmcs12->guest_cr0 = evmcs->guest_cr0; 1738 vmcs12->guest_cr3 = evmcs->guest_cr3; 1739 vmcs12->guest_cr4 = evmcs->guest_cr4; 1740 vmcs12->guest_dr7 = evmcs->guest_dr7; 1741 } 1742 1743 if (unlikely(!(hv_clean_fields & 1744 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER))) { 1745 vmcs12->host_fs_base = evmcs->host_fs_base; 1746 vmcs12->host_gs_base = evmcs->host_gs_base; 1747 vmcs12->host_tr_base = evmcs->host_tr_base; 1748 vmcs12->host_gdtr_base = evmcs->host_gdtr_base; 1749 vmcs12->host_idtr_base = evmcs->host_idtr_base; 1750 vmcs12->host_rsp = evmcs->host_rsp; 1751 } 1752 1753 if (unlikely(!(hv_clean_fields & 1754 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT))) { 1755 vmcs12->ept_pointer = evmcs->ept_pointer; 1756 vmcs12->virtual_processor_id = evmcs->virtual_processor_id; 1757 } 1758 1759 if (unlikely(!(hv_clean_fields & 1760 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1))) { 1761 vmcs12->vmcs_link_pointer = evmcs->vmcs_link_pointer; 1762 vmcs12->guest_ia32_debugctl = evmcs->guest_ia32_debugctl; 1763 vmcs12->guest_ia32_pat = evmcs->guest_ia32_pat; 1764 vmcs12->guest_ia32_efer = evmcs->guest_ia32_efer; 1765 vmcs12->guest_pdptr0 = evmcs->guest_pdptr0; 1766 vmcs12->guest_pdptr1 = evmcs->guest_pdptr1; 1767 vmcs12->guest_pdptr2 = evmcs->guest_pdptr2; 1768 vmcs12->guest_pdptr3 = evmcs->guest_pdptr3; 1769 vmcs12->guest_pending_dbg_exceptions = 1770 evmcs->guest_pending_dbg_exceptions; 1771 vmcs12->guest_sysenter_esp = evmcs->guest_sysenter_esp; 1772 vmcs12->guest_sysenter_eip = evmcs->guest_sysenter_eip; 1773 vmcs12->guest_bndcfgs = evmcs->guest_bndcfgs; 1774 vmcs12->guest_activity_state = evmcs->guest_activity_state; 1775 vmcs12->guest_sysenter_cs = evmcs->guest_sysenter_cs; 1776 vmcs12->guest_ia32_perf_global_ctrl = evmcs->guest_ia32_perf_global_ctrl; 1777 /* 1778 * Not present in struct vmcs12: 1779 * vmcs12->guest_ia32_s_cet = evmcs->guest_ia32_s_cet; 1780 * vmcs12->guest_ia32_lbr_ctl = evmcs->guest_ia32_lbr_ctl; 1781 * vmcs12->guest_ia32_int_ssp_table_addr = evmcs->guest_ia32_int_ssp_table_addr; 1782 */ 1783 } 1784 1785 /* 1786 * Not used? 1787 * vmcs12->vm_exit_msr_store_addr = evmcs->vm_exit_msr_store_addr; 1788 * vmcs12->vm_exit_msr_load_addr = evmcs->vm_exit_msr_load_addr; 1789 * vmcs12->vm_entry_msr_load_addr = evmcs->vm_entry_msr_load_addr; 1790 * vmcs12->page_fault_error_code_mask = 1791 * evmcs->page_fault_error_code_mask; 1792 * vmcs12->page_fault_error_code_match = 1793 * evmcs->page_fault_error_code_match; 1794 * vmcs12->cr3_target_count = evmcs->cr3_target_count; 1795 * vmcs12->vm_exit_msr_store_count = evmcs->vm_exit_msr_store_count; 1796 * vmcs12->vm_exit_msr_load_count = evmcs->vm_exit_msr_load_count; 1797 * vmcs12->vm_entry_msr_load_count = evmcs->vm_entry_msr_load_count; 1798 */ 1799 1800 /* 1801 * Read only fields: 1802 * vmcs12->guest_physical_address = evmcs->guest_physical_address; 1803 * vmcs12->vm_instruction_error = evmcs->vm_instruction_error; 1804 * vmcs12->vm_exit_reason = evmcs->vm_exit_reason; 1805 * vmcs12->vm_exit_intr_info = evmcs->vm_exit_intr_info; 1806 * vmcs12->vm_exit_intr_error_code = evmcs->vm_exit_intr_error_code; 1807 * vmcs12->idt_vectoring_info_field = evmcs->idt_vectoring_info_field; 1808 * vmcs12->idt_vectoring_error_code = evmcs->idt_vectoring_error_code; 1809 * vmcs12->vm_exit_instruction_len = evmcs->vm_exit_instruction_len; 1810 * vmcs12->vmx_instruction_info = evmcs->vmx_instruction_info; 1811 * vmcs12->exit_qualification = evmcs->exit_qualification; 1812 * vmcs12->guest_linear_address = evmcs->guest_linear_address; 1813 * 1814 * Not present in struct vmcs12: 1815 * vmcs12->exit_io_instruction_ecx = evmcs->exit_io_instruction_ecx; 1816 * vmcs12->exit_io_instruction_esi = evmcs->exit_io_instruction_esi; 1817 * vmcs12->exit_io_instruction_edi = evmcs->exit_io_instruction_edi; 1818 * vmcs12->exit_io_instruction_eip = evmcs->exit_io_instruction_eip; 1819 */ 1820 1821 return; 1822 } 1823 1824 static void copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx) 1825 { 1826 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; 1827 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; 1828 1829 /* 1830 * Should not be changed by KVM: 1831 * 1832 * evmcs->host_es_selector = vmcs12->host_es_selector; 1833 * evmcs->host_cs_selector = vmcs12->host_cs_selector; 1834 * evmcs->host_ss_selector = vmcs12->host_ss_selector; 1835 * evmcs->host_ds_selector = vmcs12->host_ds_selector; 1836 * evmcs->host_fs_selector = vmcs12->host_fs_selector; 1837 * evmcs->host_gs_selector = vmcs12->host_gs_selector; 1838 * evmcs->host_tr_selector = vmcs12->host_tr_selector; 1839 * evmcs->host_ia32_pat = vmcs12->host_ia32_pat; 1840 * evmcs->host_ia32_efer = vmcs12->host_ia32_efer; 1841 * evmcs->host_cr0 = vmcs12->host_cr0; 1842 * evmcs->host_cr3 = vmcs12->host_cr3; 1843 * evmcs->host_cr4 = vmcs12->host_cr4; 1844 * evmcs->host_ia32_sysenter_esp = vmcs12->host_ia32_sysenter_esp; 1845 * evmcs->host_ia32_sysenter_eip = vmcs12->host_ia32_sysenter_eip; 1846 * evmcs->host_rip = vmcs12->host_rip; 1847 * evmcs->host_ia32_sysenter_cs = vmcs12->host_ia32_sysenter_cs; 1848 * evmcs->host_fs_base = vmcs12->host_fs_base; 1849 * evmcs->host_gs_base = vmcs12->host_gs_base; 1850 * evmcs->host_tr_base = vmcs12->host_tr_base; 1851 * evmcs->host_gdtr_base = vmcs12->host_gdtr_base; 1852 * evmcs->host_idtr_base = vmcs12->host_idtr_base; 1853 * evmcs->host_rsp = vmcs12->host_rsp; 1854 * sync_vmcs02_to_vmcs12() doesn't read these: 1855 * evmcs->io_bitmap_a = vmcs12->io_bitmap_a; 1856 * evmcs->io_bitmap_b = vmcs12->io_bitmap_b; 1857 * evmcs->msr_bitmap = vmcs12->msr_bitmap; 1858 * evmcs->ept_pointer = vmcs12->ept_pointer; 1859 * evmcs->xss_exit_bitmap = vmcs12->xss_exit_bitmap; 1860 * evmcs->vm_exit_msr_store_addr = vmcs12->vm_exit_msr_store_addr; 1861 * evmcs->vm_exit_msr_load_addr = vmcs12->vm_exit_msr_load_addr; 1862 * evmcs->vm_entry_msr_load_addr = vmcs12->vm_entry_msr_load_addr; 1863 * evmcs->tpr_threshold = vmcs12->tpr_threshold; 1864 * evmcs->virtual_processor_id = vmcs12->virtual_processor_id; 1865 * evmcs->exception_bitmap = vmcs12->exception_bitmap; 1866 * evmcs->vmcs_link_pointer = vmcs12->vmcs_link_pointer; 1867 * evmcs->pin_based_vm_exec_control = vmcs12->pin_based_vm_exec_control; 1868 * evmcs->vm_exit_controls = vmcs12->vm_exit_controls; 1869 * evmcs->secondary_vm_exec_control = vmcs12->secondary_vm_exec_control; 1870 * evmcs->page_fault_error_code_mask = 1871 * vmcs12->page_fault_error_code_mask; 1872 * evmcs->page_fault_error_code_match = 1873 * vmcs12->page_fault_error_code_match; 1874 * evmcs->cr3_target_count = vmcs12->cr3_target_count; 1875 * evmcs->virtual_apic_page_addr = vmcs12->virtual_apic_page_addr; 1876 * evmcs->tsc_offset = vmcs12->tsc_offset; 1877 * evmcs->guest_ia32_debugctl = vmcs12->guest_ia32_debugctl; 1878 * evmcs->cr0_guest_host_mask = vmcs12->cr0_guest_host_mask; 1879 * evmcs->cr4_guest_host_mask = vmcs12->cr4_guest_host_mask; 1880 * evmcs->cr0_read_shadow = vmcs12->cr0_read_shadow; 1881 * evmcs->cr4_read_shadow = vmcs12->cr4_read_shadow; 1882 * evmcs->vm_exit_msr_store_count = vmcs12->vm_exit_msr_store_count; 1883 * evmcs->vm_exit_msr_load_count = vmcs12->vm_exit_msr_load_count; 1884 * evmcs->vm_entry_msr_load_count = vmcs12->vm_entry_msr_load_count; 1885 * evmcs->guest_ia32_perf_global_ctrl = vmcs12->guest_ia32_perf_global_ctrl; 1886 * evmcs->host_ia32_perf_global_ctrl = vmcs12->host_ia32_perf_global_ctrl; 1887 * evmcs->encls_exiting_bitmap = vmcs12->encls_exiting_bitmap; 1888 * evmcs->tsc_multiplier = vmcs12->tsc_multiplier; 1889 * 1890 * Not present in struct vmcs12: 1891 * evmcs->exit_io_instruction_ecx = vmcs12->exit_io_instruction_ecx; 1892 * evmcs->exit_io_instruction_esi = vmcs12->exit_io_instruction_esi; 1893 * evmcs->exit_io_instruction_edi = vmcs12->exit_io_instruction_edi; 1894 * evmcs->exit_io_instruction_eip = vmcs12->exit_io_instruction_eip; 1895 * evmcs->host_ia32_s_cet = vmcs12->host_ia32_s_cet; 1896 * evmcs->host_ssp = vmcs12->host_ssp; 1897 * evmcs->host_ia32_int_ssp_table_addr = vmcs12->host_ia32_int_ssp_table_addr; 1898 * evmcs->guest_ia32_s_cet = vmcs12->guest_ia32_s_cet; 1899 * evmcs->guest_ia32_lbr_ctl = vmcs12->guest_ia32_lbr_ctl; 1900 * evmcs->guest_ia32_int_ssp_table_addr = vmcs12->guest_ia32_int_ssp_table_addr; 1901 * evmcs->guest_ssp = vmcs12->guest_ssp; 1902 */ 1903 1904 evmcs->guest_es_selector = vmcs12->guest_es_selector; 1905 evmcs->guest_cs_selector = vmcs12->guest_cs_selector; 1906 evmcs->guest_ss_selector = vmcs12->guest_ss_selector; 1907 evmcs->guest_ds_selector = vmcs12->guest_ds_selector; 1908 evmcs->guest_fs_selector = vmcs12->guest_fs_selector; 1909 evmcs->guest_gs_selector = vmcs12->guest_gs_selector; 1910 evmcs->guest_ldtr_selector = vmcs12->guest_ldtr_selector; 1911 evmcs->guest_tr_selector = vmcs12->guest_tr_selector; 1912 1913 evmcs->guest_es_limit = vmcs12->guest_es_limit; 1914 evmcs->guest_cs_limit = vmcs12->guest_cs_limit; 1915 evmcs->guest_ss_limit = vmcs12->guest_ss_limit; 1916 evmcs->guest_ds_limit = vmcs12->guest_ds_limit; 1917 evmcs->guest_fs_limit = vmcs12->guest_fs_limit; 1918 evmcs->guest_gs_limit = vmcs12->guest_gs_limit; 1919 evmcs->guest_ldtr_limit = vmcs12->guest_ldtr_limit; 1920 evmcs->guest_tr_limit = vmcs12->guest_tr_limit; 1921 evmcs->guest_gdtr_limit = vmcs12->guest_gdtr_limit; 1922 evmcs->guest_idtr_limit = vmcs12->guest_idtr_limit; 1923 1924 evmcs->guest_es_ar_bytes = vmcs12->guest_es_ar_bytes; 1925 evmcs->guest_cs_ar_bytes = vmcs12->guest_cs_ar_bytes; 1926 evmcs->guest_ss_ar_bytes = vmcs12->guest_ss_ar_bytes; 1927 evmcs->guest_ds_ar_bytes = vmcs12->guest_ds_ar_bytes; 1928 evmcs->guest_fs_ar_bytes = vmcs12->guest_fs_ar_bytes; 1929 evmcs->guest_gs_ar_bytes = vmcs12->guest_gs_ar_bytes; 1930 evmcs->guest_ldtr_ar_bytes = vmcs12->guest_ldtr_ar_bytes; 1931 evmcs->guest_tr_ar_bytes = vmcs12->guest_tr_ar_bytes; 1932 1933 evmcs->guest_es_base = vmcs12->guest_es_base; 1934 evmcs->guest_cs_base = vmcs12->guest_cs_base; 1935 evmcs->guest_ss_base = vmcs12->guest_ss_base; 1936 evmcs->guest_ds_base = vmcs12->guest_ds_base; 1937 evmcs->guest_fs_base = vmcs12->guest_fs_base; 1938 evmcs->guest_gs_base = vmcs12->guest_gs_base; 1939 evmcs->guest_ldtr_base = vmcs12->guest_ldtr_base; 1940 evmcs->guest_tr_base = vmcs12->guest_tr_base; 1941 evmcs->guest_gdtr_base = vmcs12->guest_gdtr_base; 1942 evmcs->guest_idtr_base = vmcs12->guest_idtr_base; 1943 1944 evmcs->guest_ia32_pat = vmcs12->guest_ia32_pat; 1945 evmcs->guest_ia32_efer = vmcs12->guest_ia32_efer; 1946 1947 evmcs->guest_pdptr0 = vmcs12->guest_pdptr0; 1948 evmcs->guest_pdptr1 = vmcs12->guest_pdptr1; 1949 evmcs->guest_pdptr2 = vmcs12->guest_pdptr2; 1950 evmcs->guest_pdptr3 = vmcs12->guest_pdptr3; 1951 1952 evmcs->guest_pending_dbg_exceptions = 1953 vmcs12->guest_pending_dbg_exceptions; 1954 evmcs->guest_sysenter_esp = vmcs12->guest_sysenter_esp; 1955 evmcs->guest_sysenter_eip = vmcs12->guest_sysenter_eip; 1956 1957 evmcs->guest_activity_state = vmcs12->guest_activity_state; 1958 evmcs->guest_sysenter_cs = vmcs12->guest_sysenter_cs; 1959 1960 evmcs->guest_cr0 = vmcs12->guest_cr0; 1961 evmcs->guest_cr3 = vmcs12->guest_cr3; 1962 evmcs->guest_cr4 = vmcs12->guest_cr4; 1963 evmcs->guest_dr7 = vmcs12->guest_dr7; 1964 1965 evmcs->guest_physical_address = vmcs12->guest_physical_address; 1966 1967 evmcs->vm_instruction_error = vmcs12->vm_instruction_error; 1968 evmcs->vm_exit_reason = vmcs12->vm_exit_reason; 1969 evmcs->vm_exit_intr_info = vmcs12->vm_exit_intr_info; 1970 evmcs->vm_exit_intr_error_code = vmcs12->vm_exit_intr_error_code; 1971 evmcs->idt_vectoring_info_field = vmcs12->idt_vectoring_info_field; 1972 evmcs->idt_vectoring_error_code = vmcs12->idt_vectoring_error_code; 1973 evmcs->vm_exit_instruction_len = vmcs12->vm_exit_instruction_len; 1974 evmcs->vmx_instruction_info = vmcs12->vmx_instruction_info; 1975 1976 evmcs->exit_qualification = vmcs12->exit_qualification; 1977 1978 evmcs->guest_linear_address = vmcs12->guest_linear_address; 1979 evmcs->guest_rsp = vmcs12->guest_rsp; 1980 evmcs->guest_rflags = vmcs12->guest_rflags; 1981 1982 evmcs->guest_interruptibility_info = 1983 vmcs12->guest_interruptibility_info; 1984 evmcs->cpu_based_vm_exec_control = vmcs12->cpu_based_vm_exec_control; 1985 evmcs->vm_entry_controls = vmcs12->vm_entry_controls; 1986 evmcs->vm_entry_intr_info_field = vmcs12->vm_entry_intr_info_field; 1987 evmcs->vm_entry_exception_error_code = 1988 vmcs12->vm_entry_exception_error_code; 1989 evmcs->vm_entry_instruction_len = vmcs12->vm_entry_instruction_len; 1990 1991 evmcs->guest_rip = vmcs12->guest_rip; 1992 1993 evmcs->guest_bndcfgs = vmcs12->guest_bndcfgs; 1994 1995 return; 1996 } 1997 1998 /* 1999 * This is an equivalent of the nested hypervisor executing the vmptrld 2000 * instruction. 2001 */ 2002 static enum nested_evmptrld_status nested_vmx_handle_enlightened_vmptrld( 2003 struct kvm_vcpu *vcpu, bool from_launch) 2004 { 2005 struct vcpu_vmx *vmx = to_vmx(vcpu); 2006 bool evmcs_gpa_changed = false; 2007 u64 evmcs_gpa; 2008 2009 if (likely(!guest_cpuid_has_evmcs(vcpu))) 2010 return EVMPTRLD_DISABLED; 2011 2012 evmcs_gpa = nested_get_evmptr(vcpu); 2013 if (!evmptr_is_valid(evmcs_gpa)) { 2014 nested_release_evmcs(vcpu); 2015 return EVMPTRLD_DISABLED; 2016 } 2017 2018 if (unlikely(evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) { 2019 vmx->nested.current_vmptr = INVALID_GPA; 2020 2021 nested_release_evmcs(vcpu); 2022 2023 if (kvm_vcpu_map(vcpu, gpa_to_gfn(evmcs_gpa), 2024 &vmx->nested.hv_evmcs_map)) 2025 return EVMPTRLD_ERROR; 2026 2027 vmx->nested.hv_evmcs = vmx->nested.hv_evmcs_map.hva; 2028 2029 /* 2030 * Currently, KVM only supports eVMCS version 1 2031 * (== KVM_EVMCS_VERSION) and thus we expect guest to set this 2032 * value to first u32 field of eVMCS which should specify eVMCS 2033 * VersionNumber. 2034 * 2035 * Guest should be aware of supported eVMCS versions by host by 2036 * examining CPUID.0x4000000A.EAX[0:15]. Host userspace VMM is 2037 * expected to set this CPUID leaf according to the value 2038 * returned in vmcs_version from nested_enable_evmcs(). 2039 * 2040 * However, it turns out that Microsoft Hyper-V fails to comply 2041 * to their own invented interface: When Hyper-V use eVMCS, it 2042 * just sets first u32 field of eVMCS to revision_id specified 2043 * in MSR_IA32_VMX_BASIC. Instead of used eVMCS version number 2044 * which is one of the supported versions specified in 2045 * CPUID.0x4000000A.EAX[0:15]. 2046 * 2047 * To overcome Hyper-V bug, we accept here either a supported 2048 * eVMCS version or VMCS12 revision_id as valid values for first 2049 * u32 field of eVMCS. 2050 */ 2051 if ((vmx->nested.hv_evmcs->revision_id != KVM_EVMCS_VERSION) && 2052 (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION)) { 2053 nested_release_evmcs(vcpu); 2054 return EVMPTRLD_VMFAIL; 2055 } 2056 2057 vmx->nested.hv_evmcs_vmptr = evmcs_gpa; 2058 2059 evmcs_gpa_changed = true; 2060 /* 2061 * Unlike normal vmcs12, enlightened vmcs12 is not fully 2062 * reloaded from guest's memory (read only fields, fields not 2063 * present in struct hv_enlightened_vmcs, ...). Make sure there 2064 * are no leftovers. 2065 */ 2066 if (from_launch) { 2067 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 2068 memset(vmcs12, 0, sizeof(*vmcs12)); 2069 vmcs12->hdr.revision_id = VMCS12_REVISION; 2070 } 2071 2072 } 2073 2074 /* 2075 * Clean fields data can't be used on VMLAUNCH and when we switch 2076 * between different L2 guests as KVM keeps a single VMCS12 per L1. 2077 */ 2078 if (from_launch || evmcs_gpa_changed) { 2079 vmx->nested.hv_evmcs->hv_clean_fields &= 2080 ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; 2081 2082 vmx->nested.force_msr_bitmap_recalc = true; 2083 } 2084 2085 return EVMPTRLD_SUCCEEDED; 2086 } 2087 2088 void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu) 2089 { 2090 struct vcpu_vmx *vmx = to_vmx(vcpu); 2091 2092 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) 2093 copy_vmcs12_to_enlightened(vmx); 2094 else 2095 copy_vmcs12_to_shadow(vmx); 2096 2097 vmx->nested.need_vmcs12_to_shadow_sync = false; 2098 } 2099 2100 static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer) 2101 { 2102 struct vcpu_vmx *vmx = 2103 container_of(timer, struct vcpu_vmx, nested.preemption_timer); 2104 2105 vmx->nested.preemption_timer_expired = true; 2106 kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu); 2107 kvm_vcpu_kick(&vmx->vcpu); 2108 2109 return HRTIMER_NORESTART; 2110 } 2111 2112 static u64 vmx_calc_preemption_timer_value(struct kvm_vcpu *vcpu) 2113 { 2114 struct vcpu_vmx *vmx = to_vmx(vcpu); 2115 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 2116 2117 u64 l1_scaled_tsc = kvm_read_l1_tsc(vcpu, rdtsc()) >> 2118 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; 2119 2120 if (!vmx->nested.has_preemption_timer_deadline) { 2121 vmx->nested.preemption_timer_deadline = 2122 vmcs12->vmx_preemption_timer_value + l1_scaled_tsc; 2123 vmx->nested.has_preemption_timer_deadline = true; 2124 } 2125 return vmx->nested.preemption_timer_deadline - l1_scaled_tsc; 2126 } 2127 2128 static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu, 2129 u64 preemption_timeout) 2130 { 2131 struct vcpu_vmx *vmx = to_vmx(vcpu); 2132 2133 /* 2134 * A timer value of zero is architecturally guaranteed to cause 2135 * a VMExit prior to executing any instructions in the guest. 2136 */ 2137 if (preemption_timeout == 0) { 2138 vmx_preemption_timer_fn(&vmx->nested.preemption_timer); 2139 return; 2140 } 2141 2142 if (vcpu->arch.virtual_tsc_khz == 0) 2143 return; 2144 2145 preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; 2146 preemption_timeout *= 1000000; 2147 do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz); 2148 hrtimer_start(&vmx->nested.preemption_timer, 2149 ktime_add_ns(ktime_get(), preemption_timeout), 2150 HRTIMER_MODE_ABS_PINNED); 2151 } 2152 2153 static u64 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) 2154 { 2155 if (vmx->nested.nested_run_pending && 2156 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) 2157 return vmcs12->guest_ia32_efer; 2158 else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) 2159 return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME); 2160 else 2161 return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME); 2162 } 2163 2164 static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx) 2165 { 2166 struct kvm *kvm = vmx->vcpu.kvm; 2167 2168 /* 2169 * If vmcs02 hasn't been initialized, set the constant vmcs02 state 2170 * according to L0's settings (vmcs12 is irrelevant here). Host 2171 * fields that come from L0 and are not constant, e.g. HOST_CR3, 2172 * will be set as needed prior to VMLAUNCH/VMRESUME. 2173 */ 2174 if (vmx->nested.vmcs02_initialized) 2175 return; 2176 vmx->nested.vmcs02_initialized = true; 2177 2178 /* 2179 * We don't care what the EPTP value is we just need to guarantee 2180 * it's valid so we don't get a false positive when doing early 2181 * consistency checks. 2182 */ 2183 if (enable_ept && nested_early_check) 2184 vmcs_write64(EPT_POINTER, 2185 construct_eptp(&vmx->vcpu, 0, PT64_ROOT_4LEVEL)); 2186 2187 /* All VMFUNCs are currently emulated through L0 vmexits. */ 2188 if (cpu_has_vmx_vmfunc()) 2189 vmcs_write64(VM_FUNCTION_CONTROL, 0); 2190 2191 if (cpu_has_vmx_posted_intr()) 2192 vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR); 2193 2194 if (cpu_has_vmx_msr_bitmap()) 2195 vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap)); 2196 2197 /* 2198 * PML is emulated for L2, but never enabled in hardware as the MMU 2199 * handles A/D emulation. Disabling PML for L2 also avoids having to 2200 * deal with filtering out L2 GPAs from the buffer. 2201 */ 2202 if (enable_pml) { 2203 vmcs_write64(PML_ADDRESS, 0); 2204 vmcs_write16(GUEST_PML_INDEX, -1); 2205 } 2206 2207 if (cpu_has_vmx_encls_vmexit()) 2208 vmcs_write64(ENCLS_EXITING_BITMAP, INVALID_GPA); 2209 2210 if (kvm_notify_vmexit_enabled(kvm)) 2211 vmcs_write32(NOTIFY_WINDOW, kvm->arch.notify_window); 2212 2213 /* 2214 * Set the MSR load/store lists to match L0's settings. Only the 2215 * addresses are constant (for vmcs02), the counts can change based 2216 * on L2's behavior, e.g. switching to/from long mode. 2217 */ 2218 vmcs_write64(VM_EXIT_MSR_STORE_ADDR, __pa(vmx->msr_autostore.guest.val)); 2219 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val)); 2220 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val)); 2221 2222 vmx_set_constant_host_state(vmx); 2223 } 2224 2225 static void prepare_vmcs02_early_rare(struct vcpu_vmx *vmx, 2226 struct vmcs12 *vmcs12) 2227 { 2228 prepare_vmcs02_constant_state(vmx); 2229 2230 vmcs_write64(VMCS_LINK_POINTER, INVALID_GPA); 2231 2232 if (enable_vpid) { 2233 if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) 2234 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02); 2235 else 2236 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); 2237 } 2238 } 2239 2240 static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs01, 2241 struct vmcs12 *vmcs12) 2242 { 2243 u32 exec_control; 2244 u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12); 2245 2246 if (vmx->nested.dirty_vmcs12 || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) 2247 prepare_vmcs02_early_rare(vmx, vmcs12); 2248 2249 /* 2250 * PIN CONTROLS 2251 */ 2252 exec_control = __pin_controls_get(vmcs01); 2253 exec_control |= (vmcs12->pin_based_vm_exec_control & 2254 ~PIN_BASED_VMX_PREEMPTION_TIMER); 2255 2256 /* Posted interrupts setting is only taken from vmcs12. */ 2257 vmx->nested.pi_pending = false; 2258 if (nested_cpu_has_posted_intr(vmcs12)) 2259 vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv; 2260 else 2261 exec_control &= ~PIN_BASED_POSTED_INTR; 2262 pin_controls_set(vmx, exec_control); 2263 2264 /* 2265 * EXEC CONTROLS 2266 */ 2267 exec_control = __exec_controls_get(vmcs01); /* L0's desires */ 2268 exec_control &= ~CPU_BASED_INTR_WINDOW_EXITING; 2269 exec_control &= ~CPU_BASED_NMI_WINDOW_EXITING; 2270 exec_control &= ~CPU_BASED_TPR_SHADOW; 2271 exec_control |= vmcs12->cpu_based_vm_exec_control; 2272 2273 vmx->nested.l1_tpr_threshold = -1; 2274 if (exec_control & CPU_BASED_TPR_SHADOW) 2275 vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold); 2276 #ifdef CONFIG_X86_64 2277 else 2278 exec_control |= CPU_BASED_CR8_LOAD_EXITING | 2279 CPU_BASED_CR8_STORE_EXITING; 2280 #endif 2281 2282 /* 2283 * A vmexit (to either L1 hypervisor or L0 userspace) is always needed 2284 * for I/O port accesses. 2285 */ 2286 exec_control |= CPU_BASED_UNCOND_IO_EXITING; 2287 exec_control &= ~CPU_BASED_USE_IO_BITMAPS; 2288 2289 /* 2290 * This bit will be computed in nested_get_vmcs12_pages, because 2291 * we do not have access to L1's MSR bitmap yet. For now, keep 2292 * the same bit as before, hoping to avoid multiple VMWRITEs that 2293 * only set/clear this bit. 2294 */ 2295 exec_control &= ~CPU_BASED_USE_MSR_BITMAPS; 2296 exec_control |= exec_controls_get(vmx) & CPU_BASED_USE_MSR_BITMAPS; 2297 2298 exec_controls_set(vmx, exec_control); 2299 2300 /* 2301 * SECONDARY EXEC CONTROLS 2302 */ 2303 if (cpu_has_secondary_exec_ctrls()) { 2304 exec_control = __secondary_exec_controls_get(vmcs01); 2305 2306 /* Take the following fields only from vmcs12 */ 2307 exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2308 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2309 SECONDARY_EXEC_ENABLE_INVPCID | 2310 SECONDARY_EXEC_ENABLE_RDTSCP | 2311 SECONDARY_EXEC_ENABLE_XSAVES | 2312 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE | 2313 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2314 SECONDARY_EXEC_APIC_REGISTER_VIRT | 2315 SECONDARY_EXEC_ENABLE_VMFUNC | 2316 SECONDARY_EXEC_DESC); 2317 2318 if (nested_cpu_has(vmcs12, 2319 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) 2320 exec_control |= vmcs12->secondary_vm_exec_control; 2321 2322 /* PML is emulated and never enabled in hardware for L2. */ 2323 exec_control &= ~SECONDARY_EXEC_ENABLE_PML; 2324 2325 /* VMCS shadowing for L2 is emulated for now */ 2326 exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS; 2327 2328 /* 2329 * Preset *DT exiting when emulating UMIP, so that vmx_set_cr4() 2330 * will not have to rewrite the controls just for this bit. 2331 */ 2332 if (vmx_umip_emulated() && (vmcs12->guest_cr4 & X86_CR4_UMIP)) 2333 exec_control |= SECONDARY_EXEC_DESC; 2334 2335 if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) 2336 vmcs_write16(GUEST_INTR_STATUS, 2337 vmcs12->guest_intr_status); 2338 2339 if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST)) 2340 exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST; 2341 2342 if (exec_control & SECONDARY_EXEC_ENCLS_EXITING) 2343 vmx_write_encls_bitmap(&vmx->vcpu, vmcs12); 2344 2345 secondary_exec_controls_set(vmx, exec_control); 2346 } 2347 2348 /* 2349 * ENTRY CONTROLS 2350 * 2351 * vmcs12's VM_{ENTRY,EXIT}_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE 2352 * are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate 2353 * on the related bits (if supported by the CPU) in the hope that 2354 * we can avoid VMWrites during vmx_set_efer(). 2355 * 2356 * Similarly, take vmcs01's PERF_GLOBAL_CTRL in the hope that if KVM is 2357 * loading PERF_GLOBAL_CTRL via the VMCS for L1, then KVM will want to 2358 * do the same for L2. 2359 */ 2360 exec_control = __vm_entry_controls_get(vmcs01); 2361 exec_control |= (vmcs12->vm_entry_controls & 2362 ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL); 2363 exec_control &= ~(VM_ENTRY_IA32E_MODE | VM_ENTRY_LOAD_IA32_EFER); 2364 if (cpu_has_load_ia32_efer()) { 2365 if (guest_efer & EFER_LMA) 2366 exec_control |= VM_ENTRY_IA32E_MODE; 2367 if (guest_efer != host_efer) 2368 exec_control |= VM_ENTRY_LOAD_IA32_EFER; 2369 } 2370 vm_entry_controls_set(vmx, exec_control); 2371 2372 /* 2373 * EXIT CONTROLS 2374 * 2375 * L2->L1 exit controls are emulated - the hardware exit is to L0 so 2376 * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER 2377 * bits may be modified by vmx_set_efer() in prepare_vmcs02(). 2378 */ 2379 exec_control = __vm_exit_controls_get(vmcs01); 2380 if (cpu_has_load_ia32_efer() && guest_efer != host_efer) 2381 exec_control |= VM_EXIT_LOAD_IA32_EFER; 2382 else 2383 exec_control &= ~VM_EXIT_LOAD_IA32_EFER; 2384 vm_exit_controls_set(vmx, exec_control); 2385 2386 /* 2387 * Interrupt/Exception Fields 2388 */ 2389 if (vmx->nested.nested_run_pending) { 2390 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 2391 vmcs12->vm_entry_intr_info_field); 2392 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, 2393 vmcs12->vm_entry_exception_error_code); 2394 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 2395 vmcs12->vm_entry_instruction_len); 2396 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 2397 vmcs12->guest_interruptibility_info); 2398 vmx->loaded_vmcs->nmi_known_unmasked = 2399 !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI); 2400 } else { 2401 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); 2402 } 2403 } 2404 2405 static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) 2406 { 2407 struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs; 2408 2409 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields & 2410 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) { 2411 vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector); 2412 vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector); 2413 vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector); 2414 vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector); 2415 vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector); 2416 vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector); 2417 vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector); 2418 vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector); 2419 vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit); 2420 vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit); 2421 vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit); 2422 vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit); 2423 vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit); 2424 vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit); 2425 vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit); 2426 vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit); 2427 vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit); 2428 vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit); 2429 vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes); 2430 vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes); 2431 vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes); 2432 vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes); 2433 vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes); 2434 vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes); 2435 vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes); 2436 vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes); 2437 vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base); 2438 vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base); 2439 vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base); 2440 vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base); 2441 vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base); 2442 vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base); 2443 vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base); 2444 vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base); 2445 vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base); 2446 vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base); 2447 2448 vmx->segment_cache.bitmask = 0; 2449 } 2450 2451 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields & 2452 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1)) { 2453 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs); 2454 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, 2455 vmcs12->guest_pending_dbg_exceptions); 2456 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp); 2457 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip); 2458 2459 /* 2460 * L1 may access the L2's PDPTR, so save them to construct 2461 * vmcs12 2462 */ 2463 if (enable_ept) { 2464 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0); 2465 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); 2466 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); 2467 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); 2468 } 2469 2470 if (kvm_mpx_supported() && vmx->nested.nested_run_pending && 2471 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)) 2472 vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs); 2473 } 2474 2475 if (nested_cpu_has_xsaves(vmcs12)) 2476 vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap); 2477 2478 /* 2479 * Whether page-faults are trapped is determined by a combination of 2480 * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF. If L0 2481 * doesn't care about page faults then we should set all of these to 2482 * L1's desires. However, if L0 does care about (some) page faults, it 2483 * is not easy (if at all possible?) to merge L0 and L1's desires, we 2484 * simply ask to exit on each and every L2 page fault. This is done by 2485 * setting MASK=MATCH=0 and (see below) EB.PF=1. 2486 * Note that below we don't need special code to set EB.PF beyond the 2487 * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept, 2488 * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when 2489 * !enable_ept, EB.PF is 1, so the "or" will always be 1. 2490 */ 2491 if (vmx_need_pf_intercept(&vmx->vcpu)) { 2492 /* 2493 * TODO: if both L0 and L1 need the same MASK and MATCH, 2494 * go ahead and use it? 2495 */ 2496 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0); 2497 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0); 2498 } else { 2499 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, vmcs12->page_fault_error_code_mask); 2500 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, vmcs12->page_fault_error_code_match); 2501 } 2502 2503 if (cpu_has_vmx_apicv()) { 2504 vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0); 2505 vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1); 2506 vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2); 2507 vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3); 2508 } 2509 2510 /* 2511 * Make sure the msr_autostore list is up to date before we set the 2512 * count in the vmcs02. 2513 */ 2514 prepare_vmx_msr_autostore_list(&vmx->vcpu, MSR_IA32_TSC); 2515 2516 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, vmx->msr_autostore.guest.nr); 2517 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); 2518 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); 2519 2520 set_cr4_guest_host_mask(vmx); 2521 } 2522 2523 /* 2524 * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested 2525 * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it 2526 * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2 2527 * guest in a way that will both be appropriate to L1's requests, and our 2528 * needs. In addition to modifying the active vmcs (which is vmcs02), this 2529 * function also has additional necessary side-effects, like setting various 2530 * vcpu->arch fields. 2531 * Returns 0 on success, 1 on failure. Invalid state exit qualification code 2532 * is assigned to entry_failure_code on failure. 2533 */ 2534 static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, 2535 bool from_vmentry, 2536 enum vm_entry_failure_code *entry_failure_code) 2537 { 2538 struct vcpu_vmx *vmx = to_vmx(vcpu); 2539 bool load_guest_pdptrs_vmcs12 = false; 2540 2541 if (vmx->nested.dirty_vmcs12 || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) { 2542 prepare_vmcs02_rare(vmx, vmcs12); 2543 vmx->nested.dirty_vmcs12 = false; 2544 2545 load_guest_pdptrs_vmcs12 = !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr) || 2546 !(vmx->nested.hv_evmcs->hv_clean_fields & 2547 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1); 2548 } 2549 2550 if (vmx->nested.nested_run_pending && 2551 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) { 2552 kvm_set_dr(vcpu, 7, vmcs12->guest_dr7); 2553 vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl); 2554 } else { 2555 kvm_set_dr(vcpu, 7, vcpu->arch.dr7); 2556 vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.pre_vmenter_debugctl); 2557 } 2558 if (kvm_mpx_supported() && (!vmx->nested.nested_run_pending || 2559 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))) 2560 vmcs_write64(GUEST_BNDCFGS, vmx->nested.pre_vmenter_bndcfgs); 2561 vmx_set_rflags(vcpu, vmcs12->guest_rflags); 2562 2563 /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the 2564 * bitwise-or of what L1 wants to trap for L2, and what we want to 2565 * trap. Note that CR0.TS also needs updating - we do this later. 2566 */ 2567 vmx_update_exception_bitmap(vcpu); 2568 vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask; 2569 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); 2570 2571 if (vmx->nested.nested_run_pending && 2572 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) { 2573 vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat); 2574 vcpu->arch.pat = vmcs12->guest_ia32_pat; 2575 } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { 2576 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); 2577 } 2578 2579 vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset( 2580 vcpu->arch.l1_tsc_offset, 2581 vmx_get_l2_tsc_offset(vcpu), 2582 vmx_get_l2_tsc_multiplier(vcpu)); 2583 2584 vcpu->arch.tsc_scaling_ratio = kvm_calc_nested_tsc_multiplier( 2585 vcpu->arch.l1_tsc_scaling_ratio, 2586 vmx_get_l2_tsc_multiplier(vcpu)); 2587 2588 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); 2589 if (kvm_caps.has_tsc_control) 2590 vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio); 2591 2592 nested_vmx_transition_tlb_flush(vcpu, vmcs12, true); 2593 2594 if (nested_cpu_has_ept(vmcs12)) 2595 nested_ept_init_mmu_context(vcpu); 2596 2597 /* 2598 * Override the CR0/CR4 read shadows after setting the effective guest 2599 * CR0/CR4. The common helpers also set the shadows, but they don't 2600 * account for vmcs12's cr0/4_guest_host_mask. 2601 */ 2602 vmx_set_cr0(vcpu, vmcs12->guest_cr0); 2603 vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12)); 2604 2605 vmx_set_cr4(vcpu, vmcs12->guest_cr4); 2606 vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12)); 2607 2608 vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12); 2609 /* Note: may modify VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */ 2610 vmx_set_efer(vcpu, vcpu->arch.efer); 2611 2612 /* 2613 * Guest state is invalid and unrestricted guest is disabled, 2614 * which means L1 attempted VMEntry to L2 with invalid state. 2615 * Fail the VMEntry. 2616 * 2617 * However when force loading the guest state (SMM exit or 2618 * loading nested state after migration, it is possible to 2619 * have invalid guest state now, which will be later fixed by 2620 * restoring L2 register state 2621 */ 2622 if (CC(from_vmentry && !vmx_guest_state_valid(vcpu))) { 2623 *entry_failure_code = ENTRY_FAIL_DEFAULT; 2624 return -EINVAL; 2625 } 2626 2627 /* Shadow page tables on either EPT or shadow page tables. */ 2628 if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12), 2629 from_vmentry, entry_failure_code)) 2630 return -EINVAL; 2631 2632 /* 2633 * Immediately write vmcs02.GUEST_CR3. It will be propagated to vmcs12 2634 * on nested VM-Exit, which can occur without actually running L2 and 2635 * thus without hitting vmx_load_mmu_pgd(), e.g. if L1 is entering L2 with 2636 * vmcs12.GUEST_ACTIVITYSTATE=HLT, in which case KVM will intercept the 2637 * transition to HLT instead of running L2. 2638 */ 2639 if (enable_ept) 2640 vmcs_writel(GUEST_CR3, vmcs12->guest_cr3); 2641 2642 /* Late preparation of GUEST_PDPTRs now that EFER and CRs are set. */ 2643 if (load_guest_pdptrs_vmcs12 && nested_cpu_has_ept(vmcs12) && 2644 is_pae_paging(vcpu)) { 2645 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0); 2646 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); 2647 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); 2648 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); 2649 } 2650 2651 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) && 2652 kvm_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu)) && 2653 WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL, 2654 vmcs12->guest_ia32_perf_global_ctrl))) { 2655 *entry_failure_code = ENTRY_FAIL_DEFAULT; 2656 return -EINVAL; 2657 } 2658 2659 kvm_rsp_write(vcpu, vmcs12->guest_rsp); 2660 kvm_rip_write(vcpu, vmcs12->guest_rip); 2661 2662 /* 2663 * It was observed that genuine Hyper-V running in L1 doesn't reset 2664 * 'hv_clean_fields' by itself, it only sets the corresponding dirty 2665 * bits when it changes a field in eVMCS. Mark all fields as clean 2666 * here. 2667 */ 2668 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) 2669 vmx->nested.hv_evmcs->hv_clean_fields |= 2670 HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; 2671 2672 return 0; 2673 } 2674 2675 static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12) 2676 { 2677 if (CC(!nested_cpu_has_nmi_exiting(vmcs12) && 2678 nested_cpu_has_virtual_nmis(vmcs12))) 2679 return -EINVAL; 2680 2681 if (CC(!nested_cpu_has_virtual_nmis(vmcs12) && 2682 nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING))) 2683 return -EINVAL; 2684 2685 return 0; 2686 } 2687 2688 static bool nested_vmx_check_eptp(struct kvm_vcpu *vcpu, u64 new_eptp) 2689 { 2690 struct vcpu_vmx *vmx = to_vmx(vcpu); 2691 2692 /* Check for memory type validity */ 2693 switch (new_eptp & VMX_EPTP_MT_MASK) { 2694 case VMX_EPTP_MT_UC: 2695 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT))) 2696 return false; 2697 break; 2698 case VMX_EPTP_MT_WB: 2699 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT))) 2700 return false; 2701 break; 2702 default: 2703 return false; 2704 } 2705 2706 /* Page-walk levels validity. */ 2707 switch (new_eptp & VMX_EPTP_PWL_MASK) { 2708 case VMX_EPTP_PWL_5: 2709 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_5_BIT))) 2710 return false; 2711 break; 2712 case VMX_EPTP_PWL_4: 2713 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_4_BIT))) 2714 return false; 2715 break; 2716 default: 2717 return false; 2718 } 2719 2720 /* Reserved bits should not be set */ 2721 if (CC(kvm_vcpu_is_illegal_gpa(vcpu, new_eptp) || ((new_eptp >> 7) & 0x1f))) 2722 return false; 2723 2724 /* AD, if set, should be supported */ 2725 if (new_eptp & VMX_EPTP_AD_ENABLE_BIT) { 2726 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT))) 2727 return false; 2728 } 2729 2730 return true; 2731 } 2732 2733 /* 2734 * Checks related to VM-Execution Control Fields 2735 */ 2736 static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu, 2737 struct vmcs12 *vmcs12) 2738 { 2739 struct vcpu_vmx *vmx = to_vmx(vcpu); 2740 2741 if (CC(!vmx_control_verify(vmcs12->pin_based_vm_exec_control, 2742 vmx->nested.msrs.pinbased_ctls_low, 2743 vmx->nested.msrs.pinbased_ctls_high)) || 2744 CC(!vmx_control_verify(vmcs12->cpu_based_vm_exec_control, 2745 vmx->nested.msrs.procbased_ctls_low, 2746 vmx->nested.msrs.procbased_ctls_high))) 2747 return -EINVAL; 2748 2749 if (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) && 2750 CC(!vmx_control_verify(vmcs12->secondary_vm_exec_control, 2751 vmx->nested.msrs.secondary_ctls_low, 2752 vmx->nested.msrs.secondary_ctls_high))) 2753 return -EINVAL; 2754 2755 if (CC(vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu)) || 2756 nested_vmx_check_io_bitmap_controls(vcpu, vmcs12) || 2757 nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12) || 2758 nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12) || 2759 nested_vmx_check_apic_access_controls(vcpu, vmcs12) || 2760 nested_vmx_check_apicv_controls(vcpu, vmcs12) || 2761 nested_vmx_check_nmi_controls(vmcs12) || 2762 nested_vmx_check_pml_controls(vcpu, vmcs12) || 2763 nested_vmx_check_unrestricted_guest_controls(vcpu, vmcs12) || 2764 nested_vmx_check_mode_based_ept_exec_controls(vcpu, vmcs12) || 2765 nested_vmx_check_shadow_vmcs_controls(vcpu, vmcs12) || 2766 CC(nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id)) 2767 return -EINVAL; 2768 2769 if (!nested_cpu_has_preemption_timer(vmcs12) && 2770 nested_cpu_has_save_preemption_timer(vmcs12)) 2771 return -EINVAL; 2772 2773 if (nested_cpu_has_ept(vmcs12) && 2774 CC(!nested_vmx_check_eptp(vcpu, vmcs12->ept_pointer))) 2775 return -EINVAL; 2776 2777 if (nested_cpu_has_vmfunc(vmcs12)) { 2778 if (CC(vmcs12->vm_function_control & 2779 ~vmx->nested.msrs.vmfunc_controls)) 2780 return -EINVAL; 2781 2782 if (nested_cpu_has_eptp_switching(vmcs12)) { 2783 if (CC(!nested_cpu_has_ept(vmcs12)) || 2784 CC(!page_address_valid(vcpu, vmcs12->eptp_list_address))) 2785 return -EINVAL; 2786 } 2787 } 2788 2789 return 0; 2790 } 2791 2792 /* 2793 * Checks related to VM-Exit Control Fields 2794 */ 2795 static int nested_check_vm_exit_controls(struct kvm_vcpu *vcpu, 2796 struct vmcs12 *vmcs12) 2797 { 2798 struct vcpu_vmx *vmx = to_vmx(vcpu); 2799 2800 if (CC(!vmx_control_verify(vmcs12->vm_exit_controls, 2801 vmx->nested.msrs.exit_ctls_low, 2802 vmx->nested.msrs.exit_ctls_high)) || 2803 CC(nested_vmx_check_exit_msr_switch_controls(vcpu, vmcs12))) 2804 return -EINVAL; 2805 2806 return 0; 2807 } 2808 2809 /* 2810 * Checks related to VM-Entry Control Fields 2811 */ 2812 static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu, 2813 struct vmcs12 *vmcs12) 2814 { 2815 struct vcpu_vmx *vmx = to_vmx(vcpu); 2816 2817 if (CC(!vmx_control_verify(vmcs12->vm_entry_controls, 2818 vmx->nested.msrs.entry_ctls_low, 2819 vmx->nested.msrs.entry_ctls_high))) 2820 return -EINVAL; 2821 2822 /* 2823 * From the Intel SDM, volume 3: 2824 * Fields relevant to VM-entry event injection must be set properly. 2825 * These fields are the VM-entry interruption-information field, the 2826 * VM-entry exception error code, and the VM-entry instruction length. 2827 */ 2828 if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) { 2829 u32 intr_info = vmcs12->vm_entry_intr_info_field; 2830 u8 vector = intr_info & INTR_INFO_VECTOR_MASK; 2831 u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK; 2832 bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK; 2833 bool should_have_error_code; 2834 bool urg = nested_cpu_has2(vmcs12, 2835 SECONDARY_EXEC_UNRESTRICTED_GUEST); 2836 bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE; 2837 2838 /* VM-entry interruption-info field: interruption type */ 2839 if (CC(intr_type == INTR_TYPE_RESERVED) || 2840 CC(intr_type == INTR_TYPE_OTHER_EVENT && 2841 !nested_cpu_supports_monitor_trap_flag(vcpu))) 2842 return -EINVAL; 2843 2844 /* VM-entry interruption-info field: vector */ 2845 if (CC(intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) || 2846 CC(intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) || 2847 CC(intr_type == INTR_TYPE_OTHER_EVENT && vector != 0)) 2848 return -EINVAL; 2849 2850 /* VM-entry interruption-info field: deliver error code */ 2851 should_have_error_code = 2852 intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode && 2853 x86_exception_has_error_code(vector); 2854 if (CC(has_error_code != should_have_error_code)) 2855 return -EINVAL; 2856 2857 /* VM-entry exception error code */ 2858 if (CC(has_error_code && 2859 vmcs12->vm_entry_exception_error_code & GENMASK(31, 16))) 2860 return -EINVAL; 2861 2862 /* VM-entry interruption-info field: reserved bits */ 2863 if (CC(intr_info & INTR_INFO_RESVD_BITS_MASK)) 2864 return -EINVAL; 2865 2866 /* VM-entry instruction length */ 2867 switch (intr_type) { 2868 case INTR_TYPE_SOFT_EXCEPTION: 2869 case INTR_TYPE_SOFT_INTR: 2870 case INTR_TYPE_PRIV_SW_EXCEPTION: 2871 if (CC(vmcs12->vm_entry_instruction_len > 15) || 2872 CC(vmcs12->vm_entry_instruction_len == 0 && 2873 CC(!nested_cpu_has_zero_length_injection(vcpu)))) 2874 return -EINVAL; 2875 } 2876 } 2877 2878 if (nested_vmx_check_entry_msr_switch_controls(vcpu, vmcs12)) 2879 return -EINVAL; 2880 2881 return 0; 2882 } 2883 2884 static int nested_vmx_check_controls(struct kvm_vcpu *vcpu, 2885 struct vmcs12 *vmcs12) 2886 { 2887 if (nested_check_vm_execution_controls(vcpu, vmcs12) || 2888 nested_check_vm_exit_controls(vcpu, vmcs12) || 2889 nested_check_vm_entry_controls(vcpu, vmcs12)) 2890 return -EINVAL; 2891 2892 if (guest_cpuid_has_evmcs(vcpu)) 2893 return nested_evmcs_check_controls(vmcs12); 2894 2895 return 0; 2896 } 2897 2898 static int nested_vmx_check_address_space_size(struct kvm_vcpu *vcpu, 2899 struct vmcs12 *vmcs12) 2900 { 2901 #ifdef CONFIG_X86_64 2902 if (CC(!!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) != 2903 !!(vcpu->arch.efer & EFER_LMA))) 2904 return -EINVAL; 2905 #endif 2906 return 0; 2907 } 2908 2909 static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu, 2910 struct vmcs12 *vmcs12) 2911 { 2912 bool ia32e = !!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE); 2913 2914 if (CC(!nested_host_cr0_valid(vcpu, vmcs12->host_cr0)) || 2915 CC(!nested_host_cr4_valid(vcpu, vmcs12->host_cr4)) || 2916 CC(kvm_vcpu_is_illegal_gpa(vcpu, vmcs12->host_cr3))) 2917 return -EINVAL; 2918 2919 if (CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_esp, vcpu)) || 2920 CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_eip, vcpu))) 2921 return -EINVAL; 2922 2923 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) && 2924 CC(!kvm_pat_valid(vmcs12->host_ia32_pat))) 2925 return -EINVAL; 2926 2927 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) && 2928 CC(!kvm_valid_perf_global_ctrl(vcpu_to_pmu(vcpu), 2929 vmcs12->host_ia32_perf_global_ctrl))) 2930 return -EINVAL; 2931 2932 if (ia32e) { 2933 if (CC(!(vmcs12->host_cr4 & X86_CR4_PAE))) 2934 return -EINVAL; 2935 } else { 2936 if (CC(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) || 2937 CC(vmcs12->host_cr4 & X86_CR4_PCIDE) || 2938 CC((vmcs12->host_rip) >> 32)) 2939 return -EINVAL; 2940 } 2941 2942 if (CC(vmcs12->host_cs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 2943 CC(vmcs12->host_ss_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 2944 CC(vmcs12->host_ds_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 2945 CC(vmcs12->host_es_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 2946 CC(vmcs12->host_fs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 2947 CC(vmcs12->host_gs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 2948 CC(vmcs12->host_tr_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 2949 CC(vmcs12->host_cs_selector == 0) || 2950 CC(vmcs12->host_tr_selector == 0) || 2951 CC(vmcs12->host_ss_selector == 0 && !ia32e)) 2952 return -EINVAL; 2953 2954 if (CC(is_noncanonical_address(vmcs12->host_fs_base, vcpu)) || 2955 CC(is_noncanonical_address(vmcs12->host_gs_base, vcpu)) || 2956 CC(is_noncanonical_address(vmcs12->host_gdtr_base, vcpu)) || 2957 CC(is_noncanonical_address(vmcs12->host_idtr_base, vcpu)) || 2958 CC(is_noncanonical_address(vmcs12->host_tr_base, vcpu)) || 2959 CC(is_noncanonical_address(vmcs12->host_rip, vcpu))) 2960 return -EINVAL; 2961 2962 /* 2963 * If the load IA32_EFER VM-exit control is 1, bits reserved in the 2964 * IA32_EFER MSR must be 0 in the field for that register. In addition, 2965 * the values of the LMA and LME bits in the field must each be that of 2966 * the host address-space size VM-exit control. 2967 */ 2968 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) { 2969 if (CC(!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer)) || 2970 CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA)) || 2971 CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LME))) 2972 return -EINVAL; 2973 } 2974 2975 return 0; 2976 } 2977 2978 static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu, 2979 struct vmcs12 *vmcs12) 2980 { 2981 struct vcpu_vmx *vmx = to_vmx(vcpu); 2982 struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache; 2983 struct vmcs_hdr hdr; 2984 2985 if (vmcs12->vmcs_link_pointer == INVALID_GPA) 2986 return 0; 2987 2988 if (CC(!page_address_valid(vcpu, vmcs12->vmcs_link_pointer))) 2989 return -EINVAL; 2990 2991 if (ghc->gpa != vmcs12->vmcs_link_pointer && 2992 CC(kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, 2993 vmcs12->vmcs_link_pointer, VMCS12_SIZE))) 2994 return -EINVAL; 2995 2996 if (CC(kvm_read_guest_offset_cached(vcpu->kvm, ghc, &hdr, 2997 offsetof(struct vmcs12, hdr), 2998 sizeof(hdr)))) 2999 return -EINVAL; 3000 3001 if (CC(hdr.revision_id != VMCS12_REVISION) || 3002 CC(hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12))) 3003 return -EINVAL; 3004 3005 return 0; 3006 } 3007 3008 /* 3009 * Checks related to Guest Non-register State 3010 */ 3011 static int nested_check_guest_non_reg_state(struct vmcs12 *vmcs12) 3012 { 3013 if (CC(vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE && 3014 vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT && 3015 vmcs12->guest_activity_state != GUEST_ACTIVITY_WAIT_SIPI)) 3016 return -EINVAL; 3017 3018 return 0; 3019 } 3020 3021 static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu, 3022 struct vmcs12 *vmcs12, 3023 enum vm_entry_failure_code *entry_failure_code) 3024 { 3025 bool ia32e = !!(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE); 3026 3027 *entry_failure_code = ENTRY_FAIL_DEFAULT; 3028 3029 if (CC(!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0)) || 3030 CC(!nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4))) 3031 return -EINVAL; 3032 3033 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) && 3034 CC(!kvm_dr7_valid(vmcs12->guest_dr7))) 3035 return -EINVAL; 3036 3037 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) && 3038 CC(!kvm_pat_valid(vmcs12->guest_ia32_pat))) 3039 return -EINVAL; 3040 3041 if (nested_vmx_check_vmcs_link_ptr(vcpu, vmcs12)) { 3042 *entry_failure_code = ENTRY_FAIL_VMCS_LINK_PTR; 3043 return -EINVAL; 3044 } 3045 3046 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) && 3047 CC(!kvm_valid_perf_global_ctrl(vcpu_to_pmu(vcpu), 3048 vmcs12->guest_ia32_perf_global_ctrl))) 3049 return -EINVAL; 3050 3051 if (CC((vmcs12->guest_cr0 & (X86_CR0_PG | X86_CR0_PE)) == X86_CR0_PG)) 3052 return -EINVAL; 3053 3054 if (CC(ia32e && !(vmcs12->guest_cr4 & X86_CR4_PAE)) || 3055 CC(ia32e && !(vmcs12->guest_cr0 & X86_CR0_PG))) 3056 return -EINVAL; 3057 3058 /* 3059 * If the load IA32_EFER VM-entry control is 1, the following checks 3060 * are performed on the field for the IA32_EFER MSR: 3061 * - Bits reserved in the IA32_EFER MSR must be 0. 3062 * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of 3063 * the IA-32e mode guest VM-exit control. It must also be identical 3064 * to bit 8 (LME) if bit 31 in the CR0 field (corresponding to 3065 * CR0.PG) is 1. 3066 */ 3067 if (to_vmx(vcpu)->nested.nested_run_pending && 3068 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) { 3069 if (CC(!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer)) || 3070 CC(ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA)) || 3071 CC(((vmcs12->guest_cr0 & X86_CR0_PG) && 3072 ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME)))) 3073 return -EINVAL; 3074 } 3075 3076 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) && 3077 (CC(is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu)) || 3078 CC((vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD)))) 3079 return -EINVAL; 3080 3081 if (nested_check_guest_non_reg_state(vmcs12)) 3082 return -EINVAL; 3083 3084 return 0; 3085 } 3086 3087 static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu) 3088 { 3089 struct vcpu_vmx *vmx = to_vmx(vcpu); 3090 unsigned long cr3, cr4; 3091 bool vm_fail; 3092 3093 if (!nested_early_check) 3094 return 0; 3095 3096 if (vmx->msr_autoload.host.nr) 3097 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); 3098 if (vmx->msr_autoload.guest.nr) 3099 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0); 3100 3101 preempt_disable(); 3102 3103 vmx_prepare_switch_to_guest(vcpu); 3104 3105 /* 3106 * Induce a consistency check VMExit by clearing bit 1 in GUEST_RFLAGS, 3107 * which is reserved to '1' by hardware. GUEST_RFLAGS is guaranteed to 3108 * be written (by prepare_vmcs02()) before the "real" VMEnter, i.e. 3109 * there is no need to preserve other bits or save/restore the field. 3110 */ 3111 vmcs_writel(GUEST_RFLAGS, 0); 3112 3113 cr3 = __get_current_cr3_fast(); 3114 if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) { 3115 vmcs_writel(HOST_CR3, cr3); 3116 vmx->loaded_vmcs->host_state.cr3 = cr3; 3117 } 3118 3119 cr4 = cr4_read_shadow(); 3120 if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) { 3121 vmcs_writel(HOST_CR4, cr4); 3122 vmx->loaded_vmcs->host_state.cr4 = cr4; 3123 } 3124 3125 vm_fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs, 3126 __vmx_vcpu_run_flags(vmx)); 3127 3128 if (vmx->msr_autoload.host.nr) 3129 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); 3130 if (vmx->msr_autoload.guest.nr) 3131 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); 3132 3133 if (vm_fail) { 3134 u32 error = vmcs_read32(VM_INSTRUCTION_ERROR); 3135 3136 preempt_enable(); 3137 3138 trace_kvm_nested_vmenter_failed( 3139 "early hardware check VM-instruction error: ", error); 3140 WARN_ON_ONCE(error != VMXERR_ENTRY_INVALID_CONTROL_FIELD); 3141 return 1; 3142 } 3143 3144 /* 3145 * VMExit clears RFLAGS.IF and DR7, even on a consistency check. 3146 */ 3147 if (hw_breakpoint_active()) 3148 set_debugreg(__this_cpu_read(cpu_dr7), 7); 3149 local_irq_enable(); 3150 preempt_enable(); 3151 3152 /* 3153 * A non-failing VMEntry means we somehow entered guest mode with 3154 * an illegal RIP, and that's just the tip of the iceberg. There 3155 * is no telling what memory has been modified or what state has 3156 * been exposed to unknown code. Hitting this all but guarantees 3157 * a (very critical) hardware issue. 3158 */ 3159 WARN_ON(!(vmcs_read32(VM_EXIT_REASON) & 3160 VMX_EXIT_REASONS_FAILED_VMENTRY)); 3161 3162 return 0; 3163 } 3164 3165 static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu) 3166 { 3167 struct vcpu_vmx *vmx = to_vmx(vcpu); 3168 3169 /* 3170 * hv_evmcs may end up being not mapped after migration (when 3171 * L2 was running), map it here to make sure vmcs12 changes are 3172 * properly reflected. 3173 */ 3174 if (guest_cpuid_has_evmcs(vcpu) && 3175 vmx->nested.hv_evmcs_vmptr == EVMPTR_MAP_PENDING) { 3176 enum nested_evmptrld_status evmptrld_status = 3177 nested_vmx_handle_enlightened_vmptrld(vcpu, false); 3178 3179 if (evmptrld_status == EVMPTRLD_VMFAIL || 3180 evmptrld_status == EVMPTRLD_ERROR) 3181 return false; 3182 3183 /* 3184 * Post migration VMCS12 always provides the most actual 3185 * information, copy it to eVMCS upon entry. 3186 */ 3187 vmx->nested.need_vmcs12_to_shadow_sync = true; 3188 } 3189 3190 return true; 3191 } 3192 3193 static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu) 3194 { 3195 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 3196 struct vcpu_vmx *vmx = to_vmx(vcpu); 3197 struct kvm_host_map *map; 3198 3199 if (!vcpu->arch.pdptrs_from_userspace && 3200 !nested_cpu_has_ept(vmcs12) && is_pae_paging(vcpu)) { 3201 /* 3202 * Reload the guest's PDPTRs since after a migration 3203 * the guest CR3 might be restored prior to setting the nested 3204 * state which can lead to a load of wrong PDPTRs. 3205 */ 3206 if (CC(!load_pdptrs(vcpu, vcpu->arch.cr3))) 3207 return false; 3208 } 3209 3210 3211 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { 3212 map = &vmx->nested.apic_access_page_map; 3213 3214 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->apic_access_addr), map)) { 3215 vmcs_write64(APIC_ACCESS_ADDR, pfn_to_hpa(map->pfn)); 3216 } else { 3217 pr_debug_ratelimited("%s: no backing for APIC-access address in vmcs12\n", 3218 __func__); 3219 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 3220 vcpu->run->internal.suberror = 3221 KVM_INTERNAL_ERROR_EMULATION; 3222 vcpu->run->internal.ndata = 0; 3223 return false; 3224 } 3225 } 3226 3227 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) { 3228 map = &vmx->nested.virtual_apic_map; 3229 3230 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->virtual_apic_page_addr), map)) { 3231 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, pfn_to_hpa(map->pfn)); 3232 } else if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING) && 3233 nested_cpu_has(vmcs12, CPU_BASED_CR8_STORE_EXITING) && 3234 !nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { 3235 /* 3236 * The processor will never use the TPR shadow, simply 3237 * clear the bit from the execution control. Such a 3238 * configuration is useless, but it happens in tests. 3239 * For any other configuration, failing the vm entry is 3240 * _not_ what the processor does but it's basically the 3241 * only possibility we have. 3242 */ 3243 exec_controls_clearbit(vmx, CPU_BASED_TPR_SHADOW); 3244 } else { 3245 /* 3246 * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR to 3247 * force VM-Entry to fail. 3248 */ 3249 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, INVALID_GPA); 3250 } 3251 } 3252 3253 if (nested_cpu_has_posted_intr(vmcs12)) { 3254 map = &vmx->nested.pi_desc_map; 3255 3256 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->posted_intr_desc_addr), map)) { 3257 vmx->nested.pi_desc = 3258 (struct pi_desc *)(((void *)map->hva) + 3259 offset_in_page(vmcs12->posted_intr_desc_addr)); 3260 vmcs_write64(POSTED_INTR_DESC_ADDR, 3261 pfn_to_hpa(map->pfn) + offset_in_page(vmcs12->posted_intr_desc_addr)); 3262 } else { 3263 /* 3264 * Defer the KVM_INTERNAL_EXIT until KVM tries to 3265 * access the contents of the VMCS12 posted interrupt 3266 * descriptor. (Note that KVM may do this when it 3267 * should not, per the architectural specification.) 3268 */ 3269 vmx->nested.pi_desc = NULL; 3270 pin_controls_clearbit(vmx, PIN_BASED_POSTED_INTR); 3271 } 3272 } 3273 if (nested_vmx_prepare_msr_bitmap(vcpu, vmcs12)) 3274 exec_controls_setbit(vmx, CPU_BASED_USE_MSR_BITMAPS); 3275 else 3276 exec_controls_clearbit(vmx, CPU_BASED_USE_MSR_BITMAPS); 3277 3278 return true; 3279 } 3280 3281 static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu) 3282 { 3283 /* 3284 * Note: nested_get_evmcs_page() also updates 'vp_assist_page' copy 3285 * in 'struct kvm_vcpu_hv' in case eVMCS is in use, this is mandatory 3286 * to make nested_evmcs_l2_tlb_flush_enabled() work correctly post 3287 * migration. 3288 */ 3289 if (!nested_get_evmcs_page(vcpu)) { 3290 pr_debug_ratelimited("%s: enlightened vmptrld failed\n", 3291 __func__); 3292 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 3293 vcpu->run->internal.suberror = 3294 KVM_INTERNAL_ERROR_EMULATION; 3295 vcpu->run->internal.ndata = 0; 3296 3297 return false; 3298 } 3299 3300 if (is_guest_mode(vcpu) && !nested_get_vmcs12_pages(vcpu)) 3301 return false; 3302 3303 return true; 3304 } 3305 3306 static int nested_vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa) 3307 { 3308 struct vmcs12 *vmcs12; 3309 struct vcpu_vmx *vmx = to_vmx(vcpu); 3310 gpa_t dst; 3311 3312 if (WARN_ON_ONCE(!is_guest_mode(vcpu))) 3313 return 0; 3314 3315 if (WARN_ON_ONCE(vmx->nested.pml_full)) 3316 return 1; 3317 3318 /* 3319 * Check if PML is enabled for the nested guest. Whether eptp bit 6 is 3320 * set is already checked as part of A/D emulation. 3321 */ 3322 vmcs12 = get_vmcs12(vcpu); 3323 if (!nested_cpu_has_pml(vmcs12)) 3324 return 0; 3325 3326 if (vmcs12->guest_pml_index >= PML_ENTITY_NUM) { 3327 vmx->nested.pml_full = true; 3328 return 1; 3329 } 3330 3331 gpa &= ~0xFFFull; 3332 dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index; 3333 3334 if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa, 3335 offset_in_page(dst), sizeof(gpa))) 3336 return 0; 3337 3338 vmcs12->guest_pml_index--; 3339 3340 return 0; 3341 } 3342 3343 /* 3344 * Intel's VMX Instruction Reference specifies a common set of prerequisites 3345 * for running VMX instructions (except VMXON, whose prerequisites are 3346 * slightly different). It also specifies what exception to inject otherwise. 3347 * Note that many of these exceptions have priority over VM exits, so they 3348 * don't have to be checked again here. 3349 */ 3350 static int nested_vmx_check_permission(struct kvm_vcpu *vcpu) 3351 { 3352 if (!to_vmx(vcpu)->nested.vmxon) { 3353 kvm_queue_exception(vcpu, UD_VECTOR); 3354 return 0; 3355 } 3356 3357 if (vmx_get_cpl(vcpu)) { 3358 kvm_inject_gp(vcpu, 0); 3359 return 0; 3360 } 3361 3362 return 1; 3363 } 3364 3365 static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu) 3366 { 3367 u8 rvi = vmx_get_rvi(); 3368 u8 vppr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_PROCPRI); 3369 3370 return ((rvi & 0xf0) > (vppr & 0xf0)); 3371 } 3372 3373 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, 3374 struct vmcs12 *vmcs12); 3375 3376 /* 3377 * If from_vmentry is false, this is being called from state restore (either RSM 3378 * or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume. 3379 * 3380 * Returns: 3381 * NVMX_VMENTRY_SUCCESS: Entered VMX non-root mode 3382 * NVMX_VMENTRY_VMFAIL: Consistency check VMFail 3383 * NVMX_VMENTRY_VMEXIT: Consistency check VMExit 3384 * NVMX_VMENTRY_KVM_INTERNAL_ERROR: KVM internal error 3385 */ 3386 enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, 3387 bool from_vmentry) 3388 { 3389 struct vcpu_vmx *vmx = to_vmx(vcpu); 3390 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 3391 enum vm_entry_failure_code entry_failure_code; 3392 bool evaluate_pending_interrupts; 3393 union vmx_exit_reason exit_reason = { 3394 .basic = EXIT_REASON_INVALID_STATE, 3395 .failed_vmentry = 1, 3396 }; 3397 u32 failed_index; 3398 3399 trace_kvm_nested_vmenter(kvm_rip_read(vcpu), 3400 vmx->nested.current_vmptr, 3401 vmcs12->guest_rip, 3402 vmcs12->guest_intr_status, 3403 vmcs12->vm_entry_intr_info_field, 3404 vmcs12->secondary_vm_exec_control & SECONDARY_EXEC_ENABLE_EPT, 3405 vmcs12->ept_pointer, 3406 vmcs12->guest_cr3, 3407 KVM_ISA_VMX); 3408 3409 kvm_service_local_tlb_flush_requests(vcpu); 3410 3411 evaluate_pending_interrupts = exec_controls_get(vmx) & 3412 (CPU_BASED_INTR_WINDOW_EXITING | CPU_BASED_NMI_WINDOW_EXITING); 3413 if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu)) 3414 evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu); 3415 if (!evaluate_pending_interrupts) 3416 evaluate_pending_interrupts |= kvm_apic_has_pending_init_or_sipi(vcpu); 3417 3418 if (!vmx->nested.nested_run_pending || 3419 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) 3420 vmx->nested.pre_vmenter_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); 3421 if (kvm_mpx_supported() && 3422 (!vmx->nested.nested_run_pending || 3423 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))) 3424 vmx->nested.pre_vmenter_bndcfgs = vmcs_read64(GUEST_BNDCFGS); 3425 3426 /* 3427 * Overwrite vmcs01.GUEST_CR3 with L1's CR3 if EPT is disabled *and* 3428 * nested early checks are disabled. In the event of a "late" VM-Fail, 3429 * i.e. a VM-Fail detected by hardware but not KVM, KVM must unwind its 3430 * software model to the pre-VMEntry host state. When EPT is disabled, 3431 * GUEST_CR3 holds KVM's shadow CR3, not L1's "real" CR3, which causes 3432 * nested_vmx_restore_host_state() to corrupt vcpu->arch.cr3. Stuffing 3433 * vmcs01.GUEST_CR3 results in the unwind naturally setting arch.cr3 to 3434 * the correct value. Smashing vmcs01.GUEST_CR3 is safe because nested 3435 * VM-Exits, and the unwind, reset KVM's MMU, i.e. vmcs01.GUEST_CR3 is 3436 * guaranteed to be overwritten with a shadow CR3 prior to re-entering 3437 * L1. Don't stuff vmcs01.GUEST_CR3 when using nested early checks as 3438 * KVM modifies vcpu->arch.cr3 if and only if the early hardware checks 3439 * pass, and early VM-Fails do not reset KVM's MMU, i.e. the VM-Fail 3440 * path would need to manually save/restore vmcs01.GUEST_CR3. 3441 */ 3442 if (!enable_ept && !nested_early_check) 3443 vmcs_writel(GUEST_CR3, vcpu->arch.cr3); 3444 3445 vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02); 3446 3447 prepare_vmcs02_early(vmx, &vmx->vmcs01, vmcs12); 3448 3449 if (from_vmentry) { 3450 if (unlikely(!nested_get_vmcs12_pages(vcpu))) { 3451 vmx_switch_vmcs(vcpu, &vmx->vmcs01); 3452 return NVMX_VMENTRY_KVM_INTERNAL_ERROR; 3453 } 3454 3455 if (nested_vmx_check_vmentry_hw(vcpu)) { 3456 vmx_switch_vmcs(vcpu, &vmx->vmcs01); 3457 return NVMX_VMENTRY_VMFAIL; 3458 } 3459 3460 if (nested_vmx_check_guest_state(vcpu, vmcs12, 3461 &entry_failure_code)) { 3462 exit_reason.basic = EXIT_REASON_INVALID_STATE; 3463 vmcs12->exit_qualification = entry_failure_code; 3464 goto vmentry_fail_vmexit; 3465 } 3466 } 3467 3468 enter_guest_mode(vcpu); 3469 3470 if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &entry_failure_code)) { 3471 exit_reason.basic = EXIT_REASON_INVALID_STATE; 3472 vmcs12->exit_qualification = entry_failure_code; 3473 goto vmentry_fail_vmexit_guest_mode; 3474 } 3475 3476 if (from_vmentry) { 3477 failed_index = nested_vmx_load_msr(vcpu, 3478 vmcs12->vm_entry_msr_load_addr, 3479 vmcs12->vm_entry_msr_load_count); 3480 if (failed_index) { 3481 exit_reason.basic = EXIT_REASON_MSR_LOAD_FAIL; 3482 vmcs12->exit_qualification = failed_index; 3483 goto vmentry_fail_vmexit_guest_mode; 3484 } 3485 } else { 3486 /* 3487 * The MMU is not initialized to point at the right entities yet and 3488 * "get pages" would need to read data from the guest (i.e. we will 3489 * need to perform gpa to hpa translation). Request a call 3490 * to nested_get_vmcs12_pages before the next VM-entry. The MSRs 3491 * have already been set at vmentry time and should not be reset. 3492 */ 3493 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); 3494 } 3495 3496 /* 3497 * Re-evaluate pending events if L1 had a pending IRQ/NMI/INIT/SIPI 3498 * when it executed VMLAUNCH/VMRESUME, as entering non-root mode can 3499 * effectively unblock various events, e.g. INIT/SIPI cause VM-Exit 3500 * unconditionally. 3501 */ 3502 if (unlikely(evaluate_pending_interrupts)) 3503 kvm_make_request(KVM_REQ_EVENT, vcpu); 3504 3505 /* 3506 * Do not start the preemption timer hrtimer until after we know 3507 * we are successful, so that only nested_vmx_vmexit needs to cancel 3508 * the timer. 3509 */ 3510 vmx->nested.preemption_timer_expired = false; 3511 if (nested_cpu_has_preemption_timer(vmcs12)) { 3512 u64 timer_value = vmx_calc_preemption_timer_value(vcpu); 3513 vmx_start_preemption_timer(vcpu, timer_value); 3514 } 3515 3516 /* 3517 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point 3518 * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet 3519 * returned as far as L1 is concerned. It will only return (and set 3520 * the success flag) when L2 exits (see nested_vmx_vmexit()). 3521 */ 3522 return NVMX_VMENTRY_SUCCESS; 3523 3524 /* 3525 * A failed consistency check that leads to a VMExit during L1's 3526 * VMEnter to L2 is a variation of a normal VMexit, as explained in 3527 * 26.7 "VM-entry failures during or after loading guest state". 3528 */ 3529 vmentry_fail_vmexit_guest_mode: 3530 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING) 3531 vcpu->arch.tsc_offset -= vmcs12->tsc_offset; 3532 leave_guest_mode(vcpu); 3533 3534 vmentry_fail_vmexit: 3535 vmx_switch_vmcs(vcpu, &vmx->vmcs01); 3536 3537 if (!from_vmentry) 3538 return NVMX_VMENTRY_VMEXIT; 3539 3540 load_vmcs12_host_state(vcpu, vmcs12); 3541 vmcs12->vm_exit_reason = exit_reason.full; 3542 if (enable_shadow_vmcs || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) 3543 vmx->nested.need_vmcs12_to_shadow_sync = true; 3544 return NVMX_VMENTRY_VMEXIT; 3545 } 3546 3547 /* 3548 * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1 3549 * for running an L2 nested guest. 3550 */ 3551 static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) 3552 { 3553 struct vmcs12 *vmcs12; 3554 enum nvmx_vmentry_status status; 3555 struct vcpu_vmx *vmx = to_vmx(vcpu); 3556 u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu); 3557 enum nested_evmptrld_status evmptrld_status; 3558 3559 if (!nested_vmx_check_permission(vcpu)) 3560 return 1; 3561 3562 evmptrld_status = nested_vmx_handle_enlightened_vmptrld(vcpu, launch); 3563 if (evmptrld_status == EVMPTRLD_ERROR) { 3564 kvm_queue_exception(vcpu, UD_VECTOR); 3565 return 1; 3566 } 3567 3568 kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_BRANCH_INSTRUCTIONS); 3569 3570 if (CC(evmptrld_status == EVMPTRLD_VMFAIL)) 3571 return nested_vmx_failInvalid(vcpu); 3572 3573 if (CC(!evmptr_is_valid(vmx->nested.hv_evmcs_vmptr) && 3574 vmx->nested.current_vmptr == INVALID_GPA)) 3575 return nested_vmx_failInvalid(vcpu); 3576 3577 vmcs12 = get_vmcs12(vcpu); 3578 3579 /* 3580 * Can't VMLAUNCH or VMRESUME a shadow VMCS. Despite the fact 3581 * that there *is* a valid VMCS pointer, RFLAGS.CF is set 3582 * rather than RFLAGS.ZF, and no error number is stored to the 3583 * VM-instruction error field. 3584 */ 3585 if (CC(vmcs12->hdr.shadow_vmcs)) 3586 return nested_vmx_failInvalid(vcpu); 3587 3588 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) { 3589 copy_enlightened_to_vmcs12(vmx, vmx->nested.hv_evmcs->hv_clean_fields); 3590 /* Enlightened VMCS doesn't have launch state */ 3591 vmcs12->launch_state = !launch; 3592 } else if (enable_shadow_vmcs) { 3593 copy_shadow_to_vmcs12(vmx); 3594 } 3595 3596 /* 3597 * The nested entry process starts with enforcing various prerequisites 3598 * on vmcs12 as required by the Intel SDM, and act appropriately when 3599 * they fail: As the SDM explains, some conditions should cause the 3600 * instruction to fail, while others will cause the instruction to seem 3601 * to succeed, but return an EXIT_REASON_INVALID_STATE. 3602 * To speed up the normal (success) code path, we should avoid checking 3603 * for misconfigurations which will anyway be caught by the processor 3604 * when using the merged vmcs02. 3605 */ 3606 if (CC(interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS)) 3607 return nested_vmx_fail(vcpu, VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS); 3608 3609 if (CC(vmcs12->launch_state == launch)) 3610 return nested_vmx_fail(vcpu, 3611 launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS 3612 : VMXERR_VMRESUME_NONLAUNCHED_VMCS); 3613 3614 if (nested_vmx_check_controls(vcpu, vmcs12)) 3615 return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); 3616 3617 if (nested_vmx_check_address_space_size(vcpu, vmcs12)) 3618 return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD); 3619 3620 if (nested_vmx_check_host_state(vcpu, vmcs12)) 3621 return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD); 3622 3623 /* 3624 * We're finally done with prerequisite checking, and can start with 3625 * the nested entry. 3626 */ 3627 vmx->nested.nested_run_pending = 1; 3628 vmx->nested.has_preemption_timer_deadline = false; 3629 status = nested_vmx_enter_non_root_mode(vcpu, true); 3630 if (unlikely(status != NVMX_VMENTRY_SUCCESS)) 3631 goto vmentry_failed; 3632 3633 /* Emulate processing of posted interrupts on VM-Enter. */ 3634 if (nested_cpu_has_posted_intr(vmcs12) && 3635 kvm_apic_has_interrupt(vcpu) == vmx->nested.posted_intr_nv) { 3636 vmx->nested.pi_pending = true; 3637 kvm_make_request(KVM_REQ_EVENT, vcpu); 3638 kvm_apic_clear_irr(vcpu, vmx->nested.posted_intr_nv); 3639 } 3640 3641 /* Hide L1D cache contents from the nested guest. */ 3642 vmx->vcpu.arch.l1tf_flush_l1d = true; 3643 3644 /* 3645 * Must happen outside of nested_vmx_enter_non_root_mode() as it will 3646 * also be used as part of restoring nVMX state for 3647 * snapshot restore (migration). 3648 * 3649 * In this flow, it is assumed that vmcs12 cache was 3650 * transferred as part of captured nVMX state and should 3651 * therefore not be read from guest memory (which may not 3652 * exist on destination host yet). 3653 */ 3654 nested_cache_shadow_vmcs12(vcpu, vmcs12); 3655 3656 switch (vmcs12->guest_activity_state) { 3657 case GUEST_ACTIVITY_HLT: 3658 /* 3659 * If we're entering a halted L2 vcpu and the L2 vcpu won't be 3660 * awakened by event injection or by an NMI-window VM-exit or 3661 * by an interrupt-window VM-exit, halt the vcpu. 3662 */ 3663 if (!(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) && 3664 !nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING) && 3665 !(nested_cpu_has(vmcs12, CPU_BASED_INTR_WINDOW_EXITING) && 3666 (vmcs12->guest_rflags & X86_EFLAGS_IF))) { 3667 vmx->nested.nested_run_pending = 0; 3668 return kvm_emulate_halt_noskip(vcpu); 3669 } 3670 break; 3671 case GUEST_ACTIVITY_WAIT_SIPI: 3672 vmx->nested.nested_run_pending = 0; 3673 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; 3674 break; 3675 default: 3676 break; 3677 } 3678 3679 return 1; 3680 3681 vmentry_failed: 3682 vmx->nested.nested_run_pending = 0; 3683 if (status == NVMX_VMENTRY_KVM_INTERNAL_ERROR) 3684 return 0; 3685 if (status == NVMX_VMENTRY_VMEXIT) 3686 return 1; 3687 WARN_ON_ONCE(status != NVMX_VMENTRY_VMFAIL); 3688 return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); 3689 } 3690 3691 /* 3692 * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date 3693 * because L2 may have changed some cr0 bits directly (CR0_GUEST_HOST_MASK). 3694 * This function returns the new value we should put in vmcs12.guest_cr0. 3695 * It's not enough to just return the vmcs02 GUEST_CR0. Rather, 3696 * 1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now 3697 * available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0 3698 * didn't trap the bit, because if L1 did, so would L0). 3699 * 2. Bits that L1 asked to trap (and therefore L0 also did) could not have 3700 * been modified by L2, and L1 knows it. So just leave the old value of 3701 * the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0 3702 * isn't relevant, because if L0 traps this bit it can set it to anything. 3703 * 3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have 3704 * changed these bits, and therefore they need to be updated, but L0 3705 * didn't necessarily allow them to be changed in GUEST_CR0 - and rather 3706 * put them in vmcs02 CR0_READ_SHADOW. So take these bits from there. 3707 */ 3708 static inline unsigned long 3709 vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) 3710 { 3711 return 3712 /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) | 3713 /*2*/ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) | 3714 /*3*/ (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask | 3715 vcpu->arch.cr0_guest_owned_bits)); 3716 } 3717 3718 static inline unsigned long 3719 vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) 3720 { 3721 return 3722 /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) | 3723 /*2*/ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) | 3724 /*3*/ (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask | 3725 vcpu->arch.cr4_guest_owned_bits)); 3726 } 3727 3728 static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu, 3729 struct vmcs12 *vmcs12, 3730 u32 vm_exit_reason, u32 exit_intr_info) 3731 { 3732 u32 idt_vectoring; 3733 unsigned int nr; 3734 3735 /* 3736 * Per the SDM, VM-Exits due to double and triple faults are never 3737 * considered to occur during event delivery, even if the double/triple 3738 * fault is the result of an escalating vectoring issue. 3739 * 3740 * Note, the SDM qualifies the double fault behavior with "The original 3741 * event results in a double-fault exception". It's unclear why the 3742 * qualification exists since exits due to double fault can occur only 3743 * while vectoring a different exception (injected events are never 3744 * subject to interception), i.e. there's _always_ an original event. 3745 * 3746 * The SDM also uses NMI as a confusing example for the "original event 3747 * causes the VM exit directly" clause. NMI isn't special in any way, 3748 * the same rule applies to all events that cause an exit directly. 3749 * NMI is an odd choice for the example because NMIs can only occur on 3750 * instruction boundaries, i.e. they _can't_ occur during vectoring. 3751 */ 3752 if ((u16)vm_exit_reason == EXIT_REASON_TRIPLE_FAULT || 3753 ((u16)vm_exit_reason == EXIT_REASON_EXCEPTION_NMI && 3754 is_double_fault(exit_intr_info))) { 3755 vmcs12->idt_vectoring_info_field = 0; 3756 } else if (vcpu->arch.exception.injected) { 3757 nr = vcpu->arch.exception.vector; 3758 idt_vectoring = nr | VECTORING_INFO_VALID_MASK; 3759 3760 if (kvm_exception_is_soft(nr)) { 3761 vmcs12->vm_exit_instruction_len = 3762 vcpu->arch.event_exit_inst_len; 3763 idt_vectoring |= INTR_TYPE_SOFT_EXCEPTION; 3764 } else 3765 idt_vectoring |= INTR_TYPE_HARD_EXCEPTION; 3766 3767 if (vcpu->arch.exception.has_error_code) { 3768 idt_vectoring |= VECTORING_INFO_DELIVER_CODE_MASK; 3769 vmcs12->idt_vectoring_error_code = 3770 vcpu->arch.exception.error_code; 3771 } 3772 3773 vmcs12->idt_vectoring_info_field = idt_vectoring; 3774 } else if (vcpu->arch.nmi_injected) { 3775 vmcs12->idt_vectoring_info_field = 3776 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR; 3777 } else if (vcpu->arch.interrupt.injected) { 3778 nr = vcpu->arch.interrupt.nr; 3779 idt_vectoring = nr | VECTORING_INFO_VALID_MASK; 3780 3781 if (vcpu->arch.interrupt.soft) { 3782 idt_vectoring |= INTR_TYPE_SOFT_INTR; 3783 vmcs12->vm_entry_instruction_len = 3784 vcpu->arch.event_exit_inst_len; 3785 } else 3786 idt_vectoring |= INTR_TYPE_EXT_INTR; 3787 3788 vmcs12->idt_vectoring_info_field = idt_vectoring; 3789 } else { 3790 vmcs12->idt_vectoring_info_field = 0; 3791 } 3792 } 3793 3794 3795 void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu) 3796 { 3797 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 3798 gfn_t gfn; 3799 3800 /* 3801 * Don't need to mark the APIC access page dirty; it is never 3802 * written to by the CPU during APIC virtualization. 3803 */ 3804 3805 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) { 3806 gfn = vmcs12->virtual_apic_page_addr >> PAGE_SHIFT; 3807 kvm_vcpu_mark_page_dirty(vcpu, gfn); 3808 } 3809 3810 if (nested_cpu_has_posted_intr(vmcs12)) { 3811 gfn = vmcs12->posted_intr_desc_addr >> PAGE_SHIFT; 3812 kvm_vcpu_mark_page_dirty(vcpu, gfn); 3813 } 3814 } 3815 3816 static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu) 3817 { 3818 struct vcpu_vmx *vmx = to_vmx(vcpu); 3819 int max_irr; 3820 void *vapic_page; 3821 u16 status; 3822 3823 if (!vmx->nested.pi_pending) 3824 return 0; 3825 3826 if (!vmx->nested.pi_desc) 3827 goto mmio_needed; 3828 3829 vmx->nested.pi_pending = false; 3830 3831 if (!pi_test_and_clear_on(vmx->nested.pi_desc)) 3832 return 0; 3833 3834 max_irr = pi_find_highest_vector(vmx->nested.pi_desc); 3835 if (max_irr > 0) { 3836 vapic_page = vmx->nested.virtual_apic_map.hva; 3837 if (!vapic_page) 3838 goto mmio_needed; 3839 3840 __kvm_apic_update_irr(vmx->nested.pi_desc->pir, 3841 vapic_page, &max_irr); 3842 status = vmcs_read16(GUEST_INTR_STATUS); 3843 if ((u8)max_irr > ((u8)status & 0xff)) { 3844 status &= ~0xff; 3845 status |= (u8)max_irr; 3846 vmcs_write16(GUEST_INTR_STATUS, status); 3847 } 3848 } 3849 3850 nested_mark_vmcs12_pages_dirty(vcpu); 3851 return 0; 3852 3853 mmio_needed: 3854 kvm_handle_memory_failure(vcpu, X86EMUL_IO_NEEDED, NULL); 3855 return -ENXIO; 3856 } 3857 3858 static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu) 3859 { 3860 struct kvm_queued_exception *ex = &vcpu->arch.exception_vmexit; 3861 u32 intr_info = ex->vector | INTR_INFO_VALID_MASK; 3862 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 3863 unsigned long exit_qual; 3864 3865 if (ex->has_payload) { 3866 exit_qual = ex->payload; 3867 } else if (ex->vector == PF_VECTOR) { 3868 exit_qual = vcpu->arch.cr2; 3869 } else if (ex->vector == DB_VECTOR) { 3870 exit_qual = vcpu->arch.dr6; 3871 exit_qual &= ~DR6_BT; 3872 exit_qual ^= DR6_ACTIVE_LOW; 3873 } else { 3874 exit_qual = 0; 3875 } 3876 3877 /* 3878 * Unlike AMD's Paged Real Mode, which reports an error code on #PF 3879 * VM-Exits even if the CPU is in Real Mode, Intel VMX never sets the 3880 * "has error code" flags on VM-Exit if the CPU is in Real Mode. 3881 */ 3882 if (ex->has_error_code && is_protmode(vcpu)) { 3883 /* 3884 * Intel CPUs do not generate error codes with bits 31:16 set, 3885 * and more importantly VMX disallows setting bits 31:16 in the 3886 * injected error code for VM-Entry. Drop the bits to mimic 3887 * hardware and avoid inducing failure on nested VM-Entry if L1 3888 * chooses to inject the exception back to L2. AMD CPUs _do_ 3889 * generate "full" 32-bit error codes, so KVM allows userspace 3890 * to inject exception error codes with bits 31:16 set. 3891 */ 3892 vmcs12->vm_exit_intr_error_code = (u16)ex->error_code; 3893 intr_info |= INTR_INFO_DELIVER_CODE_MASK; 3894 } 3895 3896 if (kvm_exception_is_soft(ex->vector)) 3897 intr_info |= INTR_TYPE_SOFT_EXCEPTION; 3898 else 3899 intr_info |= INTR_TYPE_HARD_EXCEPTION; 3900 3901 if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) && 3902 vmx_get_nmi_mask(vcpu)) 3903 intr_info |= INTR_INFO_UNBLOCK_NMI; 3904 3905 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual); 3906 } 3907 3908 /* 3909 * Returns true if a debug trap is (likely) pending delivery. Infer the class 3910 * of a #DB (trap-like vs. fault-like) from the exception payload (to-be-DR6). 3911 * Using the payload is flawed because code breakpoints (fault-like) and data 3912 * breakpoints (trap-like) set the same bits in DR6 (breakpoint detected), i.e. 3913 * this will return false positives if a to-be-injected code breakpoint #DB is 3914 * pending (from KVM's perspective, but not "pending" across an instruction 3915 * boundary). ICEBP, a.k.a. INT1, is also not reflected here even though it 3916 * too is trap-like. 3917 * 3918 * KVM "works" despite these flaws as ICEBP isn't currently supported by the 3919 * emulator, Monitor Trap Flag is not marked pending on intercepted #DBs (the 3920 * #DB has already happened), and MTF isn't marked pending on code breakpoints 3921 * from the emulator (because such #DBs are fault-like and thus don't trigger 3922 * actions that fire on instruction retire). 3923 */ 3924 static unsigned long vmx_get_pending_dbg_trap(struct kvm_queued_exception *ex) 3925 { 3926 if (!ex->pending || ex->vector != DB_VECTOR) 3927 return 0; 3928 3929 /* General Detect #DBs are always fault-like. */ 3930 return ex->payload & ~DR6_BD; 3931 } 3932 3933 /* 3934 * Returns true if there's a pending #DB exception that is lower priority than 3935 * a pending Monitor Trap Flag VM-Exit. TSS T-flag #DBs are not emulated by 3936 * KVM, but could theoretically be injected by userspace. Note, this code is 3937 * imperfect, see above. 3938 */ 3939 static bool vmx_is_low_priority_db_trap(struct kvm_queued_exception *ex) 3940 { 3941 return vmx_get_pending_dbg_trap(ex) & ~DR6_BT; 3942 } 3943 3944 /* 3945 * Certain VM-exits set the 'pending debug exceptions' field to indicate a 3946 * recognized #DB (data or single-step) that has yet to be delivered. Since KVM 3947 * represents these debug traps with a payload that is said to be compatible 3948 * with the 'pending debug exceptions' field, write the payload to the VMCS 3949 * field if a VM-exit is delivered before the debug trap. 3950 */ 3951 static void nested_vmx_update_pending_dbg(struct kvm_vcpu *vcpu) 3952 { 3953 unsigned long pending_dbg; 3954 3955 pending_dbg = vmx_get_pending_dbg_trap(&vcpu->arch.exception); 3956 if (pending_dbg) 3957 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, pending_dbg); 3958 } 3959 3960 static bool nested_vmx_preemption_timer_pending(struct kvm_vcpu *vcpu) 3961 { 3962 return nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) && 3963 to_vmx(vcpu)->nested.preemption_timer_expired; 3964 } 3965 3966 static bool vmx_has_nested_events(struct kvm_vcpu *vcpu, bool for_injection) 3967 { 3968 return nested_vmx_preemption_timer_pending(vcpu) || 3969 to_vmx(vcpu)->nested.mtf_pending; 3970 } 3971 3972 /* 3973 * Per the Intel SDM's table "Priority Among Concurrent Events", with minor 3974 * edits to fill in missing examples, e.g. #DB due to split-lock accesses, 3975 * and less minor edits to splice in the priority of VMX Non-Root specific 3976 * events, e.g. MTF and NMI/INTR-window exiting. 3977 * 3978 * 1 Hardware Reset and Machine Checks 3979 * - RESET 3980 * - Machine Check 3981 * 3982 * 2 Trap on Task Switch 3983 * - T flag in TSS is set (on task switch) 3984 * 3985 * 3 External Hardware Interventions 3986 * - FLUSH 3987 * - STOPCLK 3988 * - SMI 3989 * - INIT 3990 * 3991 * 3.5 Monitor Trap Flag (MTF) VM-exit[1] 3992 * 3993 * 4 Traps on Previous Instruction 3994 * - Breakpoints 3995 * - Trap-class Debug Exceptions (#DB due to TF flag set, data/I-O 3996 * breakpoint, or #DB due to a split-lock access) 3997 * 3998 * 4.3 VMX-preemption timer expired VM-exit 3999 * 4000 * 4.6 NMI-window exiting VM-exit[2] 4001 * 4002 * 5 Nonmaskable Interrupts (NMI) 4003 * 4004 * 5.5 Interrupt-window exiting VM-exit and Virtual-interrupt delivery 4005 * 4006 * 6 Maskable Hardware Interrupts 4007 * 4008 * 7 Code Breakpoint Fault 4009 * 4010 * 8 Faults from Fetching Next Instruction 4011 * - Code-Segment Limit Violation 4012 * - Code Page Fault 4013 * - Control protection exception (missing ENDBRANCH at target of indirect 4014 * call or jump) 4015 * 4016 * 9 Faults from Decoding Next Instruction 4017 * - Instruction length > 15 bytes 4018 * - Invalid Opcode 4019 * - Coprocessor Not Available 4020 * 4021 *10 Faults on Executing Instruction 4022 * - Overflow 4023 * - Bound error 4024 * - Invalid TSS 4025 * - Segment Not Present 4026 * - Stack fault 4027 * - General Protection 4028 * - Data Page Fault 4029 * - Alignment Check 4030 * - x86 FPU Floating-point exception 4031 * - SIMD floating-point exception 4032 * - Virtualization exception 4033 * - Control protection exception 4034 * 4035 * [1] Per the "Monitor Trap Flag" section: System-management interrupts (SMIs), 4036 * INIT signals, and higher priority events take priority over MTF VM exits. 4037 * MTF VM exits take priority over debug-trap exceptions and lower priority 4038 * events. 4039 * 4040 * [2] Debug-trap exceptions and higher priority events take priority over VM exits 4041 * caused by the VMX-preemption timer. VM exits caused by the VMX-preemption 4042 * timer take priority over VM exits caused by the "NMI-window exiting" 4043 * VM-execution control and lower priority events. 4044 * 4045 * [3] Debug-trap exceptions and higher priority events take priority over VM exits 4046 * caused by "NMI-window exiting". VM exits caused by this control take 4047 * priority over non-maskable interrupts (NMIs) and lower priority events. 4048 * 4049 * [4] Virtual-interrupt delivery has the same priority as that of VM exits due to 4050 * the 1-setting of the "interrupt-window exiting" VM-execution control. Thus, 4051 * non-maskable interrupts (NMIs) and higher priority events take priority over 4052 * delivery of a virtual interrupt; delivery of a virtual interrupt takes 4053 * priority over external interrupts and lower priority events. 4054 */ 4055 static int vmx_check_nested_events(struct kvm_vcpu *vcpu) 4056 { 4057 struct kvm_lapic *apic = vcpu->arch.apic; 4058 struct vcpu_vmx *vmx = to_vmx(vcpu); 4059 /* 4060 * Only a pending nested run blocks a pending exception. If there is a 4061 * previously injected event, the pending exception occurred while said 4062 * event was being delivered and thus needs to be handled. 4063 */ 4064 bool block_nested_exceptions = vmx->nested.nested_run_pending; 4065 /* 4066 * New events (not exceptions) are only recognized at instruction 4067 * boundaries. If an event needs reinjection, then KVM is handling a 4068 * VM-Exit that occurred _during_ instruction execution; new events are 4069 * blocked until the instruction completes. 4070 */ 4071 bool block_nested_events = block_nested_exceptions || 4072 kvm_event_needs_reinjection(vcpu); 4073 4074 if (lapic_in_kernel(vcpu) && 4075 test_bit(KVM_APIC_INIT, &apic->pending_events)) { 4076 if (block_nested_events) 4077 return -EBUSY; 4078 nested_vmx_update_pending_dbg(vcpu); 4079 clear_bit(KVM_APIC_INIT, &apic->pending_events); 4080 if (vcpu->arch.mp_state != KVM_MP_STATE_INIT_RECEIVED) 4081 nested_vmx_vmexit(vcpu, EXIT_REASON_INIT_SIGNAL, 0, 0); 4082 4083 /* MTF is discarded if the vCPU is in WFS. */ 4084 vmx->nested.mtf_pending = false; 4085 return 0; 4086 } 4087 4088 if (lapic_in_kernel(vcpu) && 4089 test_bit(KVM_APIC_SIPI, &apic->pending_events)) { 4090 if (block_nested_events) 4091 return -EBUSY; 4092 4093 clear_bit(KVM_APIC_SIPI, &apic->pending_events); 4094 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { 4095 nested_vmx_vmexit(vcpu, EXIT_REASON_SIPI_SIGNAL, 0, 4096 apic->sipi_vector & 0xFFUL); 4097 return 0; 4098 } 4099 /* Fallthrough, the SIPI is completely ignored. */ 4100 } 4101 4102 /* 4103 * Process exceptions that are higher priority than Monitor Trap Flag: 4104 * fault-like exceptions, TSS T flag #DB (not emulated by KVM, but 4105 * could theoretically come in from userspace), and ICEBP (INT1). 4106 * 4107 * TODO: SMIs have higher priority than MTF and trap-like #DBs (except 4108 * for TSS T flag #DBs). KVM also doesn't save/restore pending MTF 4109 * across SMI/RSM as it should; that needs to be addressed in order to 4110 * prioritize SMI over MTF and trap-like #DBs. 4111 */ 4112 if (vcpu->arch.exception_vmexit.pending && 4113 !vmx_is_low_priority_db_trap(&vcpu->arch.exception_vmexit)) { 4114 if (block_nested_exceptions) 4115 return -EBUSY; 4116 4117 nested_vmx_inject_exception_vmexit(vcpu); 4118 return 0; 4119 } 4120 4121 if (vcpu->arch.exception.pending && 4122 !vmx_is_low_priority_db_trap(&vcpu->arch.exception)) { 4123 if (block_nested_exceptions) 4124 return -EBUSY; 4125 goto no_vmexit; 4126 } 4127 4128 if (vmx->nested.mtf_pending) { 4129 if (block_nested_events) 4130 return -EBUSY; 4131 nested_vmx_update_pending_dbg(vcpu); 4132 nested_vmx_vmexit(vcpu, EXIT_REASON_MONITOR_TRAP_FLAG, 0, 0); 4133 return 0; 4134 } 4135 4136 if (vcpu->arch.exception_vmexit.pending) { 4137 if (block_nested_exceptions) 4138 return -EBUSY; 4139 4140 nested_vmx_inject_exception_vmexit(vcpu); 4141 return 0; 4142 } 4143 4144 if (vcpu->arch.exception.pending) { 4145 if (block_nested_exceptions) 4146 return -EBUSY; 4147 goto no_vmexit; 4148 } 4149 4150 if (nested_vmx_preemption_timer_pending(vcpu)) { 4151 if (block_nested_events) 4152 return -EBUSY; 4153 nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0); 4154 return 0; 4155 } 4156 4157 if (vcpu->arch.smi_pending && !is_smm(vcpu)) { 4158 if (block_nested_events) 4159 return -EBUSY; 4160 goto no_vmexit; 4161 } 4162 4163 if (vcpu->arch.nmi_pending && !vmx_nmi_blocked(vcpu)) { 4164 if (block_nested_events) 4165 return -EBUSY; 4166 if (!nested_exit_on_nmi(vcpu)) 4167 goto no_vmexit; 4168 4169 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, 4170 NMI_VECTOR | INTR_TYPE_NMI_INTR | 4171 INTR_INFO_VALID_MASK, 0); 4172 /* 4173 * The NMI-triggered VM exit counts as injection: 4174 * clear this one and block further NMIs. 4175 */ 4176 vcpu->arch.nmi_pending = 0; 4177 vmx_set_nmi_mask(vcpu, true); 4178 return 0; 4179 } 4180 4181 if (kvm_cpu_has_interrupt(vcpu) && !vmx_interrupt_blocked(vcpu)) { 4182 if (block_nested_events) 4183 return -EBUSY; 4184 if (!nested_exit_on_intr(vcpu)) 4185 goto no_vmexit; 4186 nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0); 4187 return 0; 4188 } 4189 4190 no_vmexit: 4191 return vmx_complete_nested_posted_interrupt(vcpu); 4192 } 4193 4194 static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu) 4195 { 4196 ktime_t remaining = 4197 hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer); 4198 u64 value; 4199 4200 if (ktime_to_ns(remaining) <= 0) 4201 return 0; 4202 4203 value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz; 4204 do_div(value, 1000000); 4205 return value >> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; 4206 } 4207 4208 static bool is_vmcs12_ext_field(unsigned long field) 4209 { 4210 switch (field) { 4211 case GUEST_ES_SELECTOR: 4212 case GUEST_CS_SELECTOR: 4213 case GUEST_SS_SELECTOR: 4214 case GUEST_DS_SELECTOR: 4215 case GUEST_FS_SELECTOR: 4216 case GUEST_GS_SELECTOR: 4217 case GUEST_LDTR_SELECTOR: 4218 case GUEST_TR_SELECTOR: 4219 case GUEST_ES_LIMIT: 4220 case GUEST_CS_LIMIT: 4221 case GUEST_SS_LIMIT: 4222 case GUEST_DS_LIMIT: 4223 case GUEST_FS_LIMIT: 4224 case GUEST_GS_LIMIT: 4225 case GUEST_LDTR_LIMIT: 4226 case GUEST_TR_LIMIT: 4227 case GUEST_GDTR_LIMIT: 4228 case GUEST_IDTR_LIMIT: 4229 case GUEST_ES_AR_BYTES: 4230 case GUEST_DS_AR_BYTES: 4231 case GUEST_FS_AR_BYTES: 4232 case GUEST_GS_AR_BYTES: 4233 case GUEST_LDTR_AR_BYTES: 4234 case GUEST_TR_AR_BYTES: 4235 case GUEST_ES_BASE: 4236 case GUEST_CS_BASE: 4237 case GUEST_SS_BASE: 4238 case GUEST_DS_BASE: 4239 case GUEST_FS_BASE: 4240 case GUEST_GS_BASE: 4241 case GUEST_LDTR_BASE: 4242 case GUEST_TR_BASE: 4243 case GUEST_GDTR_BASE: 4244 case GUEST_IDTR_BASE: 4245 case GUEST_PENDING_DBG_EXCEPTIONS: 4246 case GUEST_BNDCFGS: 4247 return true; 4248 default: 4249 break; 4250 } 4251 4252 return false; 4253 } 4254 4255 static void sync_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu, 4256 struct vmcs12 *vmcs12) 4257 { 4258 struct vcpu_vmx *vmx = to_vmx(vcpu); 4259 4260 vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR); 4261 vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR); 4262 vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR); 4263 vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR); 4264 vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR); 4265 vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR); 4266 vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR); 4267 vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR); 4268 vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT); 4269 vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT); 4270 vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT); 4271 vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT); 4272 vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT); 4273 vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT); 4274 vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT); 4275 vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT); 4276 vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT); 4277 vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT); 4278 vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES); 4279 vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES); 4280 vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES); 4281 vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES); 4282 vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES); 4283 vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES); 4284 vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE); 4285 vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE); 4286 vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE); 4287 vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE); 4288 vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE); 4289 vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE); 4290 vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE); 4291 vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE); 4292 vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE); 4293 vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE); 4294 vmcs12->guest_pending_dbg_exceptions = 4295 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS); 4296 4297 vmx->nested.need_sync_vmcs02_to_vmcs12_rare = false; 4298 } 4299 4300 static void copy_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu, 4301 struct vmcs12 *vmcs12) 4302 { 4303 struct vcpu_vmx *vmx = to_vmx(vcpu); 4304 int cpu; 4305 4306 if (!vmx->nested.need_sync_vmcs02_to_vmcs12_rare) 4307 return; 4308 4309 4310 WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01); 4311 4312 cpu = get_cpu(); 4313 vmx->loaded_vmcs = &vmx->nested.vmcs02; 4314 vmx_vcpu_load_vmcs(vcpu, cpu, &vmx->vmcs01); 4315 4316 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12); 4317 4318 vmx->loaded_vmcs = &vmx->vmcs01; 4319 vmx_vcpu_load_vmcs(vcpu, cpu, &vmx->nested.vmcs02); 4320 put_cpu(); 4321 } 4322 4323 /* 4324 * Update the guest state fields of vmcs12 to reflect changes that 4325 * occurred while L2 was running. (The "IA-32e mode guest" bit of the 4326 * VM-entry controls is also updated, since this is really a guest 4327 * state bit.) 4328 */ 4329 static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) 4330 { 4331 struct vcpu_vmx *vmx = to_vmx(vcpu); 4332 4333 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) 4334 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12); 4335 4336 vmx->nested.need_sync_vmcs02_to_vmcs12_rare = 4337 !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr); 4338 4339 vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12); 4340 vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12); 4341 4342 vmcs12->guest_rsp = kvm_rsp_read(vcpu); 4343 vmcs12->guest_rip = kvm_rip_read(vcpu); 4344 vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS); 4345 4346 vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES); 4347 vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES); 4348 4349 vmcs12->guest_interruptibility_info = 4350 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); 4351 4352 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) 4353 vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT; 4354 else if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) 4355 vmcs12->guest_activity_state = GUEST_ACTIVITY_WAIT_SIPI; 4356 else 4357 vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE; 4358 4359 if (nested_cpu_has_preemption_timer(vmcs12) && 4360 vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER && 4361 !vmx->nested.nested_run_pending) 4362 vmcs12->vmx_preemption_timer_value = 4363 vmx_get_preemption_timer_value(vcpu); 4364 4365 /* 4366 * In some cases (usually, nested EPT), L2 is allowed to change its 4367 * own CR3 without exiting. If it has changed it, we must keep it. 4368 * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined 4369 * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12. 4370 * 4371 * Additionally, restore L2's PDPTR to vmcs12. 4372 */ 4373 if (enable_ept) { 4374 vmcs12->guest_cr3 = vmcs_readl(GUEST_CR3); 4375 if (nested_cpu_has_ept(vmcs12) && is_pae_paging(vcpu)) { 4376 vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0); 4377 vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1); 4378 vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2); 4379 vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3); 4380 } 4381 } 4382 4383 vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS); 4384 4385 if (nested_cpu_has_vid(vmcs12)) 4386 vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS); 4387 4388 vmcs12->vm_entry_controls = 4389 (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) | 4390 (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE); 4391 4392 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) 4393 kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7); 4394 4395 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER) 4396 vmcs12->guest_ia32_efer = vcpu->arch.efer; 4397 } 4398 4399 /* 4400 * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits 4401 * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12), 4402 * and this function updates it to reflect the changes to the guest state while 4403 * L2 was running (and perhaps made some exits which were handled directly by L0 4404 * without going back to L1), and to reflect the exit reason. 4405 * Note that we do not have to copy here all VMCS fields, just those that 4406 * could have changed by the L2 guest or the exit - i.e., the guest-state and 4407 * exit-information fields only. Other fields are modified by L1 with VMWRITE, 4408 * which already writes to vmcs12 directly. 4409 */ 4410 static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, 4411 u32 vm_exit_reason, u32 exit_intr_info, 4412 unsigned long exit_qualification) 4413 { 4414 /* update exit information fields: */ 4415 vmcs12->vm_exit_reason = vm_exit_reason; 4416 if (to_vmx(vcpu)->exit_reason.enclave_mode) 4417 vmcs12->vm_exit_reason |= VMX_EXIT_REASONS_SGX_ENCLAVE_MODE; 4418 vmcs12->exit_qualification = exit_qualification; 4419 4420 /* 4421 * On VM-Exit due to a failed VM-Entry, the VMCS isn't marked launched 4422 * and only EXIT_REASON and EXIT_QUALIFICATION are updated, all other 4423 * exit info fields are unmodified. 4424 */ 4425 if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) { 4426 vmcs12->launch_state = 1; 4427 4428 /* vm_entry_intr_info_field is cleared on exit. Emulate this 4429 * instead of reading the real value. */ 4430 vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK; 4431 4432 /* 4433 * Transfer the event that L0 or L1 may wanted to inject into 4434 * L2 to IDT_VECTORING_INFO_FIELD. 4435 */ 4436 vmcs12_save_pending_event(vcpu, vmcs12, 4437 vm_exit_reason, exit_intr_info); 4438 4439 vmcs12->vm_exit_intr_info = exit_intr_info; 4440 vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); 4441 vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 4442 4443 /* 4444 * According to spec, there's no need to store the guest's 4445 * MSRs if the exit is due to a VM-entry failure that occurs 4446 * during or after loading the guest state. Since this exit 4447 * does not fall in that category, we need to save the MSRs. 4448 */ 4449 if (nested_vmx_store_msr(vcpu, 4450 vmcs12->vm_exit_msr_store_addr, 4451 vmcs12->vm_exit_msr_store_count)) 4452 nested_vmx_abort(vcpu, 4453 VMX_ABORT_SAVE_GUEST_MSR_FAIL); 4454 } 4455 } 4456 4457 /* 4458 * A part of what we need to when the nested L2 guest exits and we want to 4459 * run its L1 parent, is to reset L1's guest state to the host state specified 4460 * in vmcs12. 4461 * This function is to be called not only on normal nested exit, but also on 4462 * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry 4463 * Failures During or After Loading Guest State"). 4464 * This function should be called when the active VMCS is L1's (vmcs01). 4465 */ 4466 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, 4467 struct vmcs12 *vmcs12) 4468 { 4469 enum vm_entry_failure_code ignored; 4470 struct kvm_segment seg; 4471 4472 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) 4473 vcpu->arch.efer = vmcs12->host_ia32_efer; 4474 else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) 4475 vcpu->arch.efer |= (EFER_LMA | EFER_LME); 4476 else 4477 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME); 4478 vmx_set_efer(vcpu, vcpu->arch.efer); 4479 4480 kvm_rsp_write(vcpu, vmcs12->host_rsp); 4481 kvm_rip_write(vcpu, vmcs12->host_rip); 4482 vmx_set_rflags(vcpu, X86_EFLAGS_FIXED); 4483 vmx_set_interrupt_shadow(vcpu, 0); 4484 4485 /* 4486 * Note that calling vmx_set_cr0 is important, even if cr0 hasn't 4487 * actually changed, because vmx_set_cr0 refers to efer set above. 4488 * 4489 * CR0_GUEST_HOST_MASK is already set in the original vmcs01 4490 * (KVM doesn't change it); 4491 */ 4492 vcpu->arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits(); 4493 vmx_set_cr0(vcpu, vmcs12->host_cr0); 4494 4495 /* Same as above - no reason to call set_cr4_guest_host_mask(). */ 4496 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); 4497 vmx_set_cr4(vcpu, vmcs12->host_cr4); 4498 4499 nested_ept_uninit_mmu_context(vcpu); 4500 4501 /* 4502 * Only PDPTE load can fail as the value of cr3 was checked on entry and 4503 * couldn't have changed. 4504 */ 4505 if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, true, &ignored)) 4506 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL); 4507 4508 nested_vmx_transition_tlb_flush(vcpu, vmcs12, false); 4509 4510 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs); 4511 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp); 4512 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip); 4513 vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base); 4514 vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base); 4515 vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF); 4516 vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF); 4517 4518 /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */ 4519 if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS) 4520 vmcs_write64(GUEST_BNDCFGS, 0); 4521 4522 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) { 4523 vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat); 4524 vcpu->arch.pat = vmcs12->host_ia32_pat; 4525 } 4526 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) && 4527 kvm_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu))) 4528 WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL, 4529 vmcs12->host_ia32_perf_global_ctrl)); 4530 4531 /* Set L1 segment info according to Intel SDM 4532 27.5.2 Loading Host Segment and Descriptor-Table Registers */ 4533 seg = (struct kvm_segment) { 4534 .base = 0, 4535 .limit = 0xFFFFFFFF, 4536 .selector = vmcs12->host_cs_selector, 4537 .type = 11, 4538 .present = 1, 4539 .s = 1, 4540 .g = 1 4541 }; 4542 if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) 4543 seg.l = 1; 4544 else 4545 seg.db = 1; 4546 __vmx_set_segment(vcpu, &seg, VCPU_SREG_CS); 4547 seg = (struct kvm_segment) { 4548 .base = 0, 4549 .limit = 0xFFFFFFFF, 4550 .type = 3, 4551 .present = 1, 4552 .s = 1, 4553 .db = 1, 4554 .g = 1 4555 }; 4556 seg.selector = vmcs12->host_ds_selector; 4557 __vmx_set_segment(vcpu, &seg, VCPU_SREG_DS); 4558 seg.selector = vmcs12->host_es_selector; 4559 __vmx_set_segment(vcpu, &seg, VCPU_SREG_ES); 4560 seg.selector = vmcs12->host_ss_selector; 4561 __vmx_set_segment(vcpu, &seg, VCPU_SREG_SS); 4562 seg.selector = vmcs12->host_fs_selector; 4563 seg.base = vmcs12->host_fs_base; 4564 __vmx_set_segment(vcpu, &seg, VCPU_SREG_FS); 4565 seg.selector = vmcs12->host_gs_selector; 4566 seg.base = vmcs12->host_gs_base; 4567 __vmx_set_segment(vcpu, &seg, VCPU_SREG_GS); 4568 seg = (struct kvm_segment) { 4569 .base = vmcs12->host_tr_base, 4570 .limit = 0x67, 4571 .selector = vmcs12->host_tr_selector, 4572 .type = 11, 4573 .present = 1 4574 }; 4575 __vmx_set_segment(vcpu, &seg, VCPU_SREG_TR); 4576 4577 memset(&seg, 0, sizeof(seg)); 4578 seg.unusable = 1; 4579 __vmx_set_segment(vcpu, &seg, VCPU_SREG_LDTR); 4580 4581 kvm_set_dr(vcpu, 7, 0x400); 4582 vmcs_write64(GUEST_IA32_DEBUGCTL, 0); 4583 4584 if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr, 4585 vmcs12->vm_exit_msr_load_count)) 4586 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL); 4587 4588 to_vmx(vcpu)->emulation_required = vmx_emulation_required(vcpu); 4589 } 4590 4591 static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx) 4592 { 4593 struct vmx_uret_msr *efer_msr; 4594 unsigned int i; 4595 4596 if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_EFER) 4597 return vmcs_read64(GUEST_IA32_EFER); 4598 4599 if (cpu_has_load_ia32_efer()) 4600 return host_efer; 4601 4602 for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) { 4603 if (vmx->msr_autoload.guest.val[i].index == MSR_EFER) 4604 return vmx->msr_autoload.guest.val[i].value; 4605 } 4606 4607 efer_msr = vmx_find_uret_msr(vmx, MSR_EFER); 4608 if (efer_msr) 4609 return efer_msr->data; 4610 4611 return host_efer; 4612 } 4613 4614 static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu) 4615 { 4616 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 4617 struct vcpu_vmx *vmx = to_vmx(vcpu); 4618 struct vmx_msr_entry g, h; 4619 gpa_t gpa; 4620 u32 i, j; 4621 4622 vcpu->arch.pat = vmcs_read64(GUEST_IA32_PAT); 4623 4624 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) { 4625 /* 4626 * L1's host DR7 is lost if KVM_GUESTDBG_USE_HW_BP is set 4627 * as vmcs01.GUEST_DR7 contains a userspace defined value 4628 * and vcpu->arch.dr7 is not squirreled away before the 4629 * nested VMENTER (not worth adding a variable in nested_vmx). 4630 */ 4631 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) 4632 kvm_set_dr(vcpu, 7, DR7_FIXED_1); 4633 else 4634 WARN_ON(kvm_set_dr(vcpu, 7, vmcs_readl(GUEST_DR7))); 4635 } 4636 4637 /* 4638 * Note that calling vmx_set_{efer,cr0,cr4} is important as they 4639 * handle a variety of side effects to KVM's software model. 4640 */ 4641 vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx)); 4642 4643 vcpu->arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits(); 4644 vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW)); 4645 4646 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); 4647 vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW)); 4648 4649 nested_ept_uninit_mmu_context(vcpu); 4650 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); 4651 kvm_register_mark_available(vcpu, VCPU_EXREG_CR3); 4652 4653 /* 4654 * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs 4655 * from vmcs01 (if necessary). The PDPTRs are not loaded on 4656 * VMFail, like everything else we just need to ensure our 4657 * software model is up-to-date. 4658 */ 4659 if (enable_ept && is_pae_paging(vcpu)) 4660 ept_save_pdptrs(vcpu); 4661 4662 kvm_mmu_reset_context(vcpu); 4663 4664 /* 4665 * This nasty bit of open coding is a compromise between blindly 4666 * loading L1's MSRs using the exit load lists (incorrect emulation 4667 * of VMFail), leaving the nested VM's MSRs in the software model 4668 * (incorrect behavior) and snapshotting the modified MSRs (too 4669 * expensive since the lists are unbound by hardware). For each 4670 * MSR that was (prematurely) loaded from the nested VMEntry load 4671 * list, reload it from the exit load list if it exists and differs 4672 * from the guest value. The intent is to stuff host state as 4673 * silently as possible, not to fully process the exit load list. 4674 */ 4675 for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) { 4676 gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g)); 4677 if (kvm_vcpu_read_guest(vcpu, gpa, &g, sizeof(g))) { 4678 pr_debug_ratelimited( 4679 "%s read MSR index failed (%u, 0x%08llx)\n", 4680 __func__, i, gpa); 4681 goto vmabort; 4682 } 4683 4684 for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) { 4685 gpa = vmcs12->vm_exit_msr_load_addr + (j * sizeof(h)); 4686 if (kvm_vcpu_read_guest(vcpu, gpa, &h, sizeof(h))) { 4687 pr_debug_ratelimited( 4688 "%s read MSR failed (%u, 0x%08llx)\n", 4689 __func__, j, gpa); 4690 goto vmabort; 4691 } 4692 if (h.index != g.index) 4693 continue; 4694 if (h.value == g.value) 4695 break; 4696 4697 if (nested_vmx_load_msr_check(vcpu, &h)) { 4698 pr_debug_ratelimited( 4699 "%s check failed (%u, 0x%x, 0x%x)\n", 4700 __func__, j, h.index, h.reserved); 4701 goto vmabort; 4702 } 4703 4704 if (kvm_set_msr(vcpu, h.index, h.value)) { 4705 pr_debug_ratelimited( 4706 "%s WRMSR failed (%u, 0x%x, 0x%llx)\n", 4707 __func__, j, h.index, h.value); 4708 goto vmabort; 4709 } 4710 } 4711 } 4712 4713 return; 4714 4715 vmabort: 4716 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL); 4717 } 4718 4719 /* 4720 * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1 4721 * and modify vmcs12 to make it see what it would expect to see there if 4722 * L2 was its real guest. Must only be called when in L2 (is_guest_mode()) 4723 */ 4724 void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason, 4725 u32 exit_intr_info, unsigned long exit_qualification) 4726 { 4727 struct vcpu_vmx *vmx = to_vmx(vcpu); 4728 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 4729 4730 /* Pending MTF traps are discarded on VM-Exit. */ 4731 vmx->nested.mtf_pending = false; 4732 4733 /* trying to cancel vmlaunch/vmresume is a bug */ 4734 WARN_ON_ONCE(vmx->nested.nested_run_pending); 4735 4736 if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) { 4737 /* 4738 * KVM_REQ_GET_NESTED_STATE_PAGES is also used to map 4739 * Enlightened VMCS after migration and we still need to 4740 * do that when something is forcing L2->L1 exit prior to 4741 * the first L2 run. 4742 */ 4743 (void)nested_get_evmcs_page(vcpu); 4744 } 4745 4746 /* Service pending TLB flush requests for L2 before switching to L1. */ 4747 kvm_service_local_tlb_flush_requests(vcpu); 4748 4749 /* 4750 * VCPU_EXREG_PDPTR will be clobbered in arch/x86/kvm/vmx/vmx.h between 4751 * now and the new vmentry. Ensure that the VMCS02 PDPTR fields are 4752 * up-to-date before switching to L1. 4753 */ 4754 if (enable_ept && is_pae_paging(vcpu)) 4755 vmx_ept_load_pdptrs(vcpu); 4756 4757 leave_guest_mode(vcpu); 4758 4759 if (nested_cpu_has_preemption_timer(vmcs12)) 4760 hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer); 4761 4762 if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETTING)) { 4763 vcpu->arch.tsc_offset = vcpu->arch.l1_tsc_offset; 4764 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_TSC_SCALING)) 4765 vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio; 4766 } 4767 4768 if (likely(!vmx->fail)) { 4769 sync_vmcs02_to_vmcs12(vcpu, vmcs12); 4770 4771 if (vm_exit_reason != -1) 4772 prepare_vmcs12(vcpu, vmcs12, vm_exit_reason, 4773 exit_intr_info, exit_qualification); 4774 4775 /* 4776 * Must happen outside of sync_vmcs02_to_vmcs12() as it will 4777 * also be used to capture vmcs12 cache as part of 4778 * capturing nVMX state for snapshot (migration). 4779 * 4780 * Otherwise, this flush will dirty guest memory at a 4781 * point it is already assumed by user-space to be 4782 * immutable. 4783 */ 4784 nested_flush_cached_shadow_vmcs12(vcpu, vmcs12); 4785 } else { 4786 /* 4787 * The only expected VM-instruction error is "VM entry with 4788 * invalid control field(s)." Anything else indicates a 4789 * problem with L0. And we should never get here with a 4790 * VMFail of any type if early consistency checks are enabled. 4791 */ 4792 WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) != 4793 VMXERR_ENTRY_INVALID_CONTROL_FIELD); 4794 WARN_ON_ONCE(nested_early_check); 4795 } 4796 4797 /* 4798 * Drop events/exceptions that were queued for re-injection to L2 4799 * (picked up via vmx_complete_interrupts()), as well as exceptions 4800 * that were pending for L2. Note, this must NOT be hoisted above 4801 * prepare_vmcs12(), events/exceptions queued for re-injection need to 4802 * be captured in vmcs12 (see vmcs12_save_pending_event()). 4803 */ 4804 vcpu->arch.nmi_injected = false; 4805 kvm_clear_exception_queue(vcpu); 4806 kvm_clear_interrupt_queue(vcpu); 4807 4808 vmx_switch_vmcs(vcpu, &vmx->vmcs01); 4809 4810 /* 4811 * If IBRS is advertised to the vCPU, KVM must flush the indirect 4812 * branch predictors when transitioning from L2 to L1, as L1 expects 4813 * hardware (KVM in this case) to provide separate predictor modes. 4814 * Bare metal isolates VMX root (host) from VMX non-root (guest), but 4815 * doesn't isolate different VMCSs, i.e. in this case, doesn't provide 4816 * separate modes for L2 vs L1. 4817 */ 4818 if (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) 4819 indirect_branch_prediction_barrier(); 4820 4821 /* Update any VMCS fields that might have changed while L2 ran */ 4822 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); 4823 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); 4824 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); 4825 if (kvm_caps.has_tsc_control) 4826 vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio); 4827 4828 if (vmx->nested.l1_tpr_threshold != -1) 4829 vmcs_write32(TPR_THRESHOLD, vmx->nested.l1_tpr_threshold); 4830 4831 if (vmx->nested.change_vmcs01_virtual_apic_mode) { 4832 vmx->nested.change_vmcs01_virtual_apic_mode = false; 4833 vmx_set_virtual_apic_mode(vcpu); 4834 } 4835 4836 if (vmx->nested.update_vmcs01_cpu_dirty_logging) { 4837 vmx->nested.update_vmcs01_cpu_dirty_logging = false; 4838 vmx_update_cpu_dirty_logging(vcpu); 4839 } 4840 4841 /* Unpin physical memory we referred to in vmcs02 */ 4842 kvm_vcpu_unmap(vcpu, &vmx->nested.apic_access_page_map, false); 4843 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true); 4844 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true); 4845 vmx->nested.pi_desc = NULL; 4846 4847 if (vmx->nested.reload_vmcs01_apic_access_page) { 4848 vmx->nested.reload_vmcs01_apic_access_page = false; 4849 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); 4850 } 4851 4852 if (vmx->nested.update_vmcs01_apicv_status) { 4853 vmx->nested.update_vmcs01_apicv_status = false; 4854 kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu); 4855 } 4856 4857 if ((vm_exit_reason != -1) && 4858 (enable_shadow_vmcs || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))) 4859 vmx->nested.need_vmcs12_to_shadow_sync = true; 4860 4861 /* in case we halted in L2 */ 4862 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 4863 4864 if (likely(!vmx->fail)) { 4865 if ((u16)vm_exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT && 4866 nested_exit_intr_ack_set(vcpu)) { 4867 int irq = kvm_cpu_get_interrupt(vcpu); 4868 WARN_ON(irq < 0); 4869 vmcs12->vm_exit_intr_info = irq | 4870 INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR; 4871 } 4872 4873 if (vm_exit_reason != -1) 4874 trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason, 4875 vmcs12->exit_qualification, 4876 vmcs12->idt_vectoring_info_field, 4877 vmcs12->vm_exit_intr_info, 4878 vmcs12->vm_exit_intr_error_code, 4879 KVM_ISA_VMX); 4880 4881 load_vmcs12_host_state(vcpu, vmcs12); 4882 4883 return; 4884 } 4885 4886 /* 4887 * After an early L2 VM-entry failure, we're now back 4888 * in L1 which thinks it just finished a VMLAUNCH or 4889 * VMRESUME instruction, so we need to set the failure 4890 * flag and the VM-instruction error field of the VMCS 4891 * accordingly, and skip the emulated instruction. 4892 */ 4893 (void)nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); 4894 4895 /* 4896 * Restore L1's host state to KVM's software model. We're here 4897 * because a consistency check was caught by hardware, which 4898 * means some amount of guest state has been propagated to KVM's 4899 * model and needs to be unwound to the host's state. 4900 */ 4901 nested_vmx_restore_host_state(vcpu); 4902 4903 vmx->fail = 0; 4904 } 4905 4906 static void nested_vmx_triple_fault(struct kvm_vcpu *vcpu) 4907 { 4908 kvm_clear_request(KVM_REQ_TRIPLE_FAULT, vcpu); 4909 nested_vmx_vmexit(vcpu, EXIT_REASON_TRIPLE_FAULT, 0, 0); 4910 } 4911 4912 /* 4913 * Decode the memory-address operand of a vmx instruction, as recorded on an 4914 * exit caused by such an instruction (run by a guest hypervisor). 4915 * On success, returns 0. When the operand is invalid, returns 1 and throws 4916 * #UD, #GP, or #SS. 4917 */ 4918 int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification, 4919 u32 vmx_instruction_info, bool wr, int len, gva_t *ret) 4920 { 4921 gva_t off; 4922 bool exn; 4923 struct kvm_segment s; 4924 4925 /* 4926 * According to Vol. 3B, "Information for VM Exits Due to Instruction 4927 * Execution", on an exit, vmx_instruction_info holds most of the 4928 * addressing components of the operand. Only the displacement part 4929 * is put in exit_qualification (see 3B, "Basic VM-Exit Information"). 4930 * For how an actual address is calculated from all these components, 4931 * refer to Vol. 1, "Operand Addressing". 4932 */ 4933 int scaling = vmx_instruction_info & 3; 4934 int addr_size = (vmx_instruction_info >> 7) & 7; 4935 bool is_reg = vmx_instruction_info & (1u << 10); 4936 int seg_reg = (vmx_instruction_info >> 15) & 7; 4937 int index_reg = (vmx_instruction_info >> 18) & 0xf; 4938 bool index_is_valid = !(vmx_instruction_info & (1u << 22)); 4939 int base_reg = (vmx_instruction_info >> 23) & 0xf; 4940 bool base_is_valid = !(vmx_instruction_info & (1u << 27)); 4941 4942 if (is_reg) { 4943 kvm_queue_exception(vcpu, UD_VECTOR); 4944 return 1; 4945 } 4946 4947 /* Addr = segment_base + offset */ 4948 /* offset = base + [index * scale] + displacement */ 4949 off = exit_qualification; /* holds the displacement */ 4950 if (addr_size == 1) 4951 off = (gva_t)sign_extend64(off, 31); 4952 else if (addr_size == 0) 4953 off = (gva_t)sign_extend64(off, 15); 4954 if (base_is_valid) 4955 off += kvm_register_read(vcpu, base_reg); 4956 if (index_is_valid) 4957 off += kvm_register_read(vcpu, index_reg) << scaling; 4958 vmx_get_segment(vcpu, &s, seg_reg); 4959 4960 /* 4961 * The effective address, i.e. @off, of a memory operand is truncated 4962 * based on the address size of the instruction. Note that this is 4963 * the *effective address*, i.e. the address prior to accounting for 4964 * the segment's base. 4965 */ 4966 if (addr_size == 1) /* 32 bit */ 4967 off &= 0xffffffff; 4968 else if (addr_size == 0) /* 16 bit */ 4969 off &= 0xffff; 4970 4971 /* Checks for #GP/#SS exceptions. */ 4972 exn = false; 4973 if (is_long_mode(vcpu)) { 4974 /* 4975 * The virtual/linear address is never truncated in 64-bit 4976 * mode, e.g. a 32-bit address size can yield a 64-bit virtual 4977 * address when using FS/GS with a non-zero base. 4978 */ 4979 if (seg_reg == VCPU_SREG_FS || seg_reg == VCPU_SREG_GS) 4980 *ret = s.base + off; 4981 else 4982 *ret = off; 4983 4984 /* Long mode: #GP(0)/#SS(0) if the memory address is in a 4985 * non-canonical form. This is the only check on the memory 4986 * destination for long mode! 4987 */ 4988 exn = is_noncanonical_address(*ret, vcpu); 4989 } else { 4990 /* 4991 * When not in long mode, the virtual/linear address is 4992 * unconditionally truncated to 32 bits regardless of the 4993 * address size. 4994 */ 4995 *ret = (s.base + off) & 0xffffffff; 4996 4997 /* Protected mode: apply checks for segment validity in the 4998 * following order: 4999 * - segment type check (#GP(0) may be thrown) 5000 * - usability check (#GP(0)/#SS(0)) 5001 * - limit check (#GP(0)/#SS(0)) 5002 */ 5003 if (wr) 5004 /* #GP(0) if the destination operand is located in a 5005 * read-only data segment or any code segment. 5006 */ 5007 exn = ((s.type & 0xa) == 0 || (s.type & 8)); 5008 else 5009 /* #GP(0) if the source operand is located in an 5010 * execute-only code segment 5011 */ 5012 exn = ((s.type & 0xa) == 8); 5013 if (exn) { 5014 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); 5015 return 1; 5016 } 5017 /* Protected mode: #GP(0)/#SS(0) if the segment is unusable. 5018 */ 5019 exn = (s.unusable != 0); 5020 5021 /* 5022 * Protected mode: #GP(0)/#SS(0) if the memory operand is 5023 * outside the segment limit. All CPUs that support VMX ignore 5024 * limit checks for flat segments, i.e. segments with base==0, 5025 * limit==0xffffffff and of type expand-up data or code. 5026 */ 5027 if (!(s.base == 0 && s.limit == 0xffffffff && 5028 ((s.type & 8) || !(s.type & 4)))) 5029 exn = exn || ((u64)off + len - 1 > s.limit); 5030 } 5031 if (exn) { 5032 kvm_queue_exception_e(vcpu, 5033 seg_reg == VCPU_SREG_SS ? 5034 SS_VECTOR : GP_VECTOR, 5035 0); 5036 return 1; 5037 } 5038 5039 return 0; 5040 } 5041 5042 static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer, 5043 int *ret) 5044 { 5045 gva_t gva; 5046 struct x86_exception e; 5047 int r; 5048 5049 if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu), 5050 vmcs_read32(VMX_INSTRUCTION_INFO), false, 5051 sizeof(*vmpointer), &gva)) { 5052 *ret = 1; 5053 return -EINVAL; 5054 } 5055 5056 r = kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e); 5057 if (r != X86EMUL_CONTINUE) { 5058 *ret = kvm_handle_memory_failure(vcpu, r, &e); 5059 return -EINVAL; 5060 } 5061 5062 return 0; 5063 } 5064 5065 /* 5066 * Allocate a shadow VMCS and associate it with the currently loaded 5067 * VMCS, unless such a shadow VMCS already exists. The newly allocated 5068 * VMCS is also VMCLEARed, so that it is ready for use. 5069 */ 5070 static struct vmcs *alloc_shadow_vmcs(struct kvm_vcpu *vcpu) 5071 { 5072 struct vcpu_vmx *vmx = to_vmx(vcpu); 5073 struct loaded_vmcs *loaded_vmcs = vmx->loaded_vmcs; 5074 5075 /* 5076 * KVM allocates a shadow VMCS only when L1 executes VMXON and frees it 5077 * when L1 executes VMXOFF or the vCPU is forced out of nested 5078 * operation. VMXON faults if the CPU is already post-VMXON, so it 5079 * should be impossible to already have an allocated shadow VMCS. KVM 5080 * doesn't support virtualization of VMCS shadowing, so vmcs01 should 5081 * always be the loaded VMCS. 5082 */ 5083 if (WARN_ON(loaded_vmcs != &vmx->vmcs01 || loaded_vmcs->shadow_vmcs)) 5084 return loaded_vmcs->shadow_vmcs; 5085 5086 loaded_vmcs->shadow_vmcs = alloc_vmcs(true); 5087 if (loaded_vmcs->shadow_vmcs) 5088 vmcs_clear(loaded_vmcs->shadow_vmcs); 5089 5090 return loaded_vmcs->shadow_vmcs; 5091 } 5092 5093 static int enter_vmx_operation(struct kvm_vcpu *vcpu) 5094 { 5095 struct vcpu_vmx *vmx = to_vmx(vcpu); 5096 int r; 5097 5098 r = alloc_loaded_vmcs(&vmx->nested.vmcs02); 5099 if (r < 0) 5100 goto out_vmcs02; 5101 5102 vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT); 5103 if (!vmx->nested.cached_vmcs12) 5104 goto out_cached_vmcs12; 5105 5106 vmx->nested.shadow_vmcs12_cache.gpa = INVALID_GPA; 5107 vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT); 5108 if (!vmx->nested.cached_shadow_vmcs12) 5109 goto out_cached_shadow_vmcs12; 5110 5111 if (enable_shadow_vmcs && !alloc_shadow_vmcs(vcpu)) 5112 goto out_shadow_vmcs; 5113 5114 hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC, 5115 HRTIMER_MODE_ABS_PINNED); 5116 vmx->nested.preemption_timer.function = vmx_preemption_timer_fn; 5117 5118 vmx->nested.vpid02 = allocate_vpid(); 5119 5120 vmx->nested.vmcs02_initialized = false; 5121 vmx->nested.vmxon = true; 5122 5123 if (vmx_pt_mode_is_host_guest()) { 5124 vmx->pt_desc.guest.ctl = 0; 5125 pt_update_intercept_for_msr(vcpu); 5126 } 5127 5128 return 0; 5129 5130 out_shadow_vmcs: 5131 kfree(vmx->nested.cached_shadow_vmcs12); 5132 5133 out_cached_shadow_vmcs12: 5134 kfree(vmx->nested.cached_vmcs12); 5135 5136 out_cached_vmcs12: 5137 free_loaded_vmcs(&vmx->nested.vmcs02); 5138 5139 out_vmcs02: 5140 return -ENOMEM; 5141 } 5142 5143 /* Emulate the VMXON instruction. */ 5144 static int handle_vmxon(struct kvm_vcpu *vcpu) 5145 { 5146 int ret; 5147 gpa_t vmptr; 5148 uint32_t revision; 5149 struct vcpu_vmx *vmx = to_vmx(vcpu); 5150 const u64 VMXON_NEEDED_FEATURES = FEAT_CTL_LOCKED 5151 | FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX; 5152 5153 /* 5154 * Manually check CR4.VMXE checks, KVM must force CR4.VMXE=1 to enter 5155 * the guest and so cannot rely on hardware to perform the check, 5156 * which has higher priority than VM-Exit (see Intel SDM's pseudocode 5157 * for VMXON). 5158 * 5159 * Rely on hardware for the other pre-VM-Exit checks, CR0.PE=1, !VM86 5160 * and !COMPATIBILITY modes. For an unrestricted guest, KVM doesn't 5161 * force any of the relevant guest state. For a restricted guest, KVM 5162 * does force CR0.PE=1, but only to also force VM86 in order to emulate 5163 * Real Mode, and so there's no need to check CR0.PE manually. 5164 */ 5165 if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_VMXE)) { 5166 kvm_queue_exception(vcpu, UD_VECTOR); 5167 return 1; 5168 } 5169 5170 /* 5171 * The CPL is checked for "not in VMX operation" and for "in VMX root", 5172 * and has higher priority than the VM-Fail due to being post-VMXON, 5173 * i.e. VMXON #GPs outside of VMX non-root if CPL!=0. In VMX non-root, 5174 * VMXON causes VM-Exit and KVM unconditionally forwards VMXON VM-Exits 5175 * from L2 to L1, i.e. there's no need to check for the vCPU being in 5176 * VMX non-root. 5177 * 5178 * Forwarding the VM-Exit unconditionally, i.e. without performing the 5179 * #UD checks (see above), is functionally ok because KVM doesn't allow 5180 * L1 to run L2 without CR4.VMXE=0, and because KVM never modifies L2's 5181 * CR0 or CR4, i.e. it's L2's responsibility to emulate #UDs that are 5182 * missed by hardware due to shadowing CR0 and/or CR4. 5183 */ 5184 if (vmx_get_cpl(vcpu)) { 5185 kvm_inject_gp(vcpu, 0); 5186 return 1; 5187 } 5188 5189 if (vmx->nested.vmxon) 5190 return nested_vmx_fail(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION); 5191 5192 /* 5193 * Invalid CR0/CR4 generates #GP. These checks are performed if and 5194 * only if the vCPU isn't already in VMX operation, i.e. effectively 5195 * have lower priority than the VM-Fail above. 5196 */ 5197 if (!nested_host_cr0_valid(vcpu, kvm_read_cr0(vcpu)) || 5198 !nested_host_cr4_valid(vcpu, kvm_read_cr4(vcpu))) { 5199 kvm_inject_gp(vcpu, 0); 5200 return 1; 5201 } 5202 5203 if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES) 5204 != VMXON_NEEDED_FEATURES) { 5205 kvm_inject_gp(vcpu, 0); 5206 return 1; 5207 } 5208 5209 if (nested_vmx_get_vmptr(vcpu, &vmptr, &ret)) 5210 return ret; 5211 5212 /* 5213 * SDM 3: 24.11.5 5214 * The first 4 bytes of VMXON region contain the supported 5215 * VMCS revision identifier 5216 * 5217 * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case; 5218 * which replaces physical address width with 32 5219 */ 5220 if (!page_address_valid(vcpu, vmptr)) 5221 return nested_vmx_failInvalid(vcpu); 5222 5223 if (kvm_read_guest(vcpu->kvm, vmptr, &revision, sizeof(revision)) || 5224 revision != VMCS12_REVISION) 5225 return nested_vmx_failInvalid(vcpu); 5226 5227 vmx->nested.vmxon_ptr = vmptr; 5228 ret = enter_vmx_operation(vcpu); 5229 if (ret) 5230 return ret; 5231 5232 return nested_vmx_succeed(vcpu); 5233 } 5234 5235 static inline void nested_release_vmcs12(struct kvm_vcpu *vcpu) 5236 { 5237 struct vcpu_vmx *vmx = to_vmx(vcpu); 5238 5239 if (vmx->nested.current_vmptr == INVALID_GPA) 5240 return; 5241 5242 copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu)); 5243 5244 if (enable_shadow_vmcs) { 5245 /* copy to memory all shadowed fields in case 5246 they were modified */ 5247 copy_shadow_to_vmcs12(vmx); 5248 vmx_disable_shadow_vmcs(vmx); 5249 } 5250 vmx->nested.posted_intr_nv = -1; 5251 5252 /* Flush VMCS12 to guest memory */ 5253 kvm_vcpu_write_guest_page(vcpu, 5254 vmx->nested.current_vmptr >> PAGE_SHIFT, 5255 vmx->nested.cached_vmcs12, 0, VMCS12_SIZE); 5256 5257 kvm_mmu_free_roots(vcpu->kvm, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); 5258 5259 vmx->nested.current_vmptr = INVALID_GPA; 5260 } 5261 5262 /* Emulate the VMXOFF instruction */ 5263 static int handle_vmxoff(struct kvm_vcpu *vcpu) 5264 { 5265 if (!nested_vmx_check_permission(vcpu)) 5266 return 1; 5267 5268 free_nested(vcpu); 5269 5270 if (kvm_apic_has_pending_init_or_sipi(vcpu)) 5271 kvm_make_request(KVM_REQ_EVENT, vcpu); 5272 5273 return nested_vmx_succeed(vcpu); 5274 } 5275 5276 /* Emulate the VMCLEAR instruction */ 5277 static int handle_vmclear(struct kvm_vcpu *vcpu) 5278 { 5279 struct vcpu_vmx *vmx = to_vmx(vcpu); 5280 u32 zero = 0; 5281 gpa_t vmptr; 5282 int r; 5283 5284 if (!nested_vmx_check_permission(vcpu)) 5285 return 1; 5286 5287 if (nested_vmx_get_vmptr(vcpu, &vmptr, &r)) 5288 return r; 5289 5290 if (!page_address_valid(vcpu, vmptr)) 5291 return nested_vmx_fail(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS); 5292 5293 if (vmptr == vmx->nested.vmxon_ptr) 5294 return nested_vmx_fail(vcpu, VMXERR_VMCLEAR_VMXON_POINTER); 5295 5296 /* 5297 * When Enlightened VMEntry is enabled on the calling CPU we treat 5298 * memory area pointer by vmptr as Enlightened VMCS (as there's no good 5299 * way to distinguish it from VMCS12) and we must not corrupt it by 5300 * writing to the non-existent 'launch_state' field. The area doesn't 5301 * have to be the currently active EVMCS on the calling CPU and there's 5302 * nothing KVM has to do to transition it from 'active' to 'non-active' 5303 * state. It is possible that the area will stay mapped as 5304 * vmx->nested.hv_evmcs but this shouldn't be a problem. 5305 */ 5306 if (likely(!guest_cpuid_has_evmcs(vcpu) || 5307 !evmptr_is_valid(nested_get_evmptr(vcpu)))) { 5308 if (vmptr == vmx->nested.current_vmptr) 5309 nested_release_vmcs12(vcpu); 5310 5311 /* 5312 * Silently ignore memory errors on VMCLEAR, Intel's pseudocode 5313 * for VMCLEAR includes a "ensure that data for VMCS referenced 5314 * by the operand is in memory" clause that guards writes to 5315 * memory, i.e. doing nothing for I/O is architecturally valid. 5316 * 5317 * FIXME: Suppress failures if and only if no memslot is found, 5318 * i.e. exit to userspace if __copy_to_user() fails. 5319 */ 5320 (void)kvm_vcpu_write_guest(vcpu, 5321 vmptr + offsetof(struct vmcs12, 5322 launch_state), 5323 &zero, sizeof(zero)); 5324 } else if (vmx->nested.hv_evmcs && vmptr == vmx->nested.hv_evmcs_vmptr) { 5325 nested_release_evmcs(vcpu); 5326 } 5327 5328 return nested_vmx_succeed(vcpu); 5329 } 5330 5331 /* Emulate the VMLAUNCH instruction */ 5332 static int handle_vmlaunch(struct kvm_vcpu *vcpu) 5333 { 5334 return nested_vmx_run(vcpu, true); 5335 } 5336 5337 /* Emulate the VMRESUME instruction */ 5338 static int handle_vmresume(struct kvm_vcpu *vcpu) 5339 { 5340 5341 return nested_vmx_run(vcpu, false); 5342 } 5343 5344 static int handle_vmread(struct kvm_vcpu *vcpu) 5345 { 5346 struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu) 5347 : get_vmcs12(vcpu); 5348 unsigned long exit_qualification = vmx_get_exit_qual(vcpu); 5349 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO); 5350 struct vcpu_vmx *vmx = to_vmx(vcpu); 5351 struct x86_exception e; 5352 unsigned long field; 5353 u64 value; 5354 gva_t gva = 0; 5355 short offset; 5356 int len, r; 5357 5358 if (!nested_vmx_check_permission(vcpu)) 5359 return 1; 5360 5361 /* Decode instruction info and find the field to read */ 5362 field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf)); 5363 5364 if (!evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) { 5365 /* 5366 * In VMX non-root operation, when the VMCS-link pointer is INVALID_GPA, 5367 * any VMREAD sets the ALU flags for VMfailInvalid. 5368 */ 5369 if (vmx->nested.current_vmptr == INVALID_GPA || 5370 (is_guest_mode(vcpu) && 5371 get_vmcs12(vcpu)->vmcs_link_pointer == INVALID_GPA)) 5372 return nested_vmx_failInvalid(vcpu); 5373 5374 offset = get_vmcs12_field_offset(field); 5375 if (offset < 0) 5376 return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); 5377 5378 if (!is_guest_mode(vcpu) && is_vmcs12_ext_field(field)) 5379 copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12); 5380 5381 /* Read the field, zero-extended to a u64 value */ 5382 value = vmcs12_read_any(vmcs12, field, offset); 5383 } else { 5384 /* 5385 * Hyper-V TLFS (as of 6.0b) explicitly states, that while an 5386 * enlightened VMCS is active VMREAD/VMWRITE instructions are 5387 * unsupported. Unfortunately, certain versions of Windows 11 5388 * don't comply with this requirement which is not enforced in 5389 * genuine Hyper-V. Allow VMREAD from an enlightened VMCS as a 5390 * workaround, as misbehaving guests will panic on VM-Fail. 5391 * Note, enlightened VMCS is incompatible with shadow VMCS so 5392 * all VMREADs from L2 should go to L1. 5393 */ 5394 if (WARN_ON_ONCE(is_guest_mode(vcpu))) 5395 return nested_vmx_failInvalid(vcpu); 5396 5397 offset = evmcs_field_offset(field, NULL); 5398 if (offset < 0) 5399 return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); 5400 5401 /* Read the field, zero-extended to a u64 value */ 5402 value = evmcs_read_any(vmx->nested.hv_evmcs, field, offset); 5403 } 5404 5405 /* 5406 * Now copy part of this value to register or memory, as requested. 5407 * Note that the number of bits actually copied is 32 or 64 depending 5408 * on the guest's mode (32 or 64 bit), not on the given field's length. 5409 */ 5410 if (instr_info & BIT(10)) { 5411 kvm_register_write(vcpu, (((instr_info) >> 3) & 0xf), value); 5412 } else { 5413 len = is_64_bit_mode(vcpu) ? 8 : 4; 5414 if (get_vmx_mem_address(vcpu, exit_qualification, 5415 instr_info, true, len, &gva)) 5416 return 1; 5417 /* _system ok, nested_vmx_check_permission has verified cpl=0 */ 5418 r = kvm_write_guest_virt_system(vcpu, gva, &value, len, &e); 5419 if (r != X86EMUL_CONTINUE) 5420 return kvm_handle_memory_failure(vcpu, r, &e); 5421 } 5422 5423 return nested_vmx_succeed(vcpu); 5424 } 5425 5426 static bool is_shadow_field_rw(unsigned long field) 5427 { 5428 switch (field) { 5429 #define SHADOW_FIELD_RW(x, y) case x: 5430 #include "vmcs_shadow_fields.h" 5431 return true; 5432 default: 5433 break; 5434 } 5435 return false; 5436 } 5437 5438 static bool is_shadow_field_ro(unsigned long field) 5439 { 5440 switch (field) { 5441 #define SHADOW_FIELD_RO(x, y) case x: 5442 #include "vmcs_shadow_fields.h" 5443 return true; 5444 default: 5445 break; 5446 } 5447 return false; 5448 } 5449 5450 static int handle_vmwrite(struct kvm_vcpu *vcpu) 5451 { 5452 struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu) 5453 : get_vmcs12(vcpu); 5454 unsigned long exit_qualification = vmx_get_exit_qual(vcpu); 5455 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO); 5456 struct vcpu_vmx *vmx = to_vmx(vcpu); 5457 struct x86_exception e; 5458 unsigned long field; 5459 short offset; 5460 gva_t gva; 5461 int len, r; 5462 5463 /* 5464 * The value to write might be 32 or 64 bits, depending on L1's long 5465 * mode, and eventually we need to write that into a field of several 5466 * possible lengths. The code below first zero-extends the value to 64 5467 * bit (value), and then copies only the appropriate number of 5468 * bits into the vmcs12 field. 5469 */ 5470 u64 value = 0; 5471 5472 if (!nested_vmx_check_permission(vcpu)) 5473 return 1; 5474 5475 /* 5476 * In VMX non-root operation, when the VMCS-link pointer is INVALID_GPA, 5477 * any VMWRITE sets the ALU flags for VMfailInvalid. 5478 */ 5479 if (vmx->nested.current_vmptr == INVALID_GPA || 5480 (is_guest_mode(vcpu) && 5481 get_vmcs12(vcpu)->vmcs_link_pointer == INVALID_GPA)) 5482 return nested_vmx_failInvalid(vcpu); 5483 5484 if (instr_info & BIT(10)) 5485 value = kvm_register_read(vcpu, (((instr_info) >> 3) & 0xf)); 5486 else { 5487 len = is_64_bit_mode(vcpu) ? 8 : 4; 5488 if (get_vmx_mem_address(vcpu, exit_qualification, 5489 instr_info, false, len, &gva)) 5490 return 1; 5491 r = kvm_read_guest_virt(vcpu, gva, &value, len, &e); 5492 if (r != X86EMUL_CONTINUE) 5493 return kvm_handle_memory_failure(vcpu, r, &e); 5494 } 5495 5496 field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf)); 5497 5498 offset = get_vmcs12_field_offset(field); 5499 if (offset < 0) 5500 return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); 5501 5502 /* 5503 * If the vCPU supports "VMWRITE to any supported field in the 5504 * VMCS," then the "read-only" fields are actually read/write. 5505 */ 5506 if (vmcs_field_readonly(field) && 5507 !nested_cpu_has_vmwrite_any_field(vcpu)) 5508 return nested_vmx_fail(vcpu, VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT); 5509 5510 /* 5511 * Ensure vmcs12 is up-to-date before any VMWRITE that dirties 5512 * vmcs12, else we may crush a field or consume a stale value. 5513 */ 5514 if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field)) 5515 copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12); 5516 5517 /* 5518 * Some Intel CPUs intentionally drop the reserved bits of the AR byte 5519 * fields on VMWRITE. Emulate this behavior to ensure consistent KVM 5520 * behavior regardless of the underlying hardware, e.g. if an AR_BYTE 5521 * field is intercepted for VMWRITE but not VMREAD (in L1), then VMREAD 5522 * from L1 will return a different value than VMREAD from L2 (L1 sees 5523 * the stripped down value, L2 sees the full value as stored by KVM). 5524 */ 5525 if (field >= GUEST_ES_AR_BYTES && field <= GUEST_TR_AR_BYTES) 5526 value &= 0x1f0ff; 5527 5528 vmcs12_write_any(vmcs12, field, offset, value); 5529 5530 /* 5531 * Do not track vmcs12 dirty-state if in guest-mode as we actually 5532 * dirty shadow vmcs12 instead of vmcs12. Fields that can be updated 5533 * by L1 without a vmexit are always updated in the vmcs02, i.e. don't 5534 * "dirty" vmcs12, all others go down the prepare_vmcs02() slow path. 5535 */ 5536 if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field)) { 5537 /* 5538 * L1 can read these fields without exiting, ensure the 5539 * shadow VMCS is up-to-date. 5540 */ 5541 if (enable_shadow_vmcs && is_shadow_field_ro(field)) { 5542 preempt_disable(); 5543 vmcs_load(vmx->vmcs01.shadow_vmcs); 5544 5545 __vmcs_writel(field, value); 5546 5547 vmcs_clear(vmx->vmcs01.shadow_vmcs); 5548 vmcs_load(vmx->loaded_vmcs->vmcs); 5549 preempt_enable(); 5550 } 5551 vmx->nested.dirty_vmcs12 = true; 5552 } 5553 5554 return nested_vmx_succeed(vcpu); 5555 } 5556 5557 static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr) 5558 { 5559 vmx->nested.current_vmptr = vmptr; 5560 if (enable_shadow_vmcs) { 5561 secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_SHADOW_VMCS); 5562 vmcs_write64(VMCS_LINK_POINTER, 5563 __pa(vmx->vmcs01.shadow_vmcs)); 5564 vmx->nested.need_vmcs12_to_shadow_sync = true; 5565 } 5566 vmx->nested.dirty_vmcs12 = true; 5567 vmx->nested.force_msr_bitmap_recalc = true; 5568 } 5569 5570 /* Emulate the VMPTRLD instruction */ 5571 static int handle_vmptrld(struct kvm_vcpu *vcpu) 5572 { 5573 struct vcpu_vmx *vmx = to_vmx(vcpu); 5574 gpa_t vmptr; 5575 int r; 5576 5577 if (!nested_vmx_check_permission(vcpu)) 5578 return 1; 5579 5580 if (nested_vmx_get_vmptr(vcpu, &vmptr, &r)) 5581 return r; 5582 5583 if (!page_address_valid(vcpu, vmptr)) 5584 return nested_vmx_fail(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS); 5585 5586 if (vmptr == vmx->nested.vmxon_ptr) 5587 return nested_vmx_fail(vcpu, VMXERR_VMPTRLD_VMXON_POINTER); 5588 5589 /* Forbid normal VMPTRLD if Enlightened version was used */ 5590 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) 5591 return 1; 5592 5593 if (vmx->nested.current_vmptr != vmptr) { 5594 struct gfn_to_hva_cache *ghc = &vmx->nested.vmcs12_cache; 5595 struct vmcs_hdr hdr; 5596 5597 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, vmptr, VMCS12_SIZE)) { 5598 /* 5599 * Reads from an unbacked page return all 1s, 5600 * which means that the 32 bits located at the 5601 * given physical address won't match the required 5602 * VMCS12_REVISION identifier. 5603 */ 5604 return nested_vmx_fail(vcpu, 5605 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); 5606 } 5607 5608 if (kvm_read_guest_offset_cached(vcpu->kvm, ghc, &hdr, 5609 offsetof(struct vmcs12, hdr), 5610 sizeof(hdr))) { 5611 return nested_vmx_fail(vcpu, 5612 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); 5613 } 5614 5615 if (hdr.revision_id != VMCS12_REVISION || 5616 (hdr.shadow_vmcs && 5617 !nested_cpu_has_vmx_shadow_vmcs(vcpu))) { 5618 return nested_vmx_fail(vcpu, 5619 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); 5620 } 5621 5622 nested_release_vmcs12(vcpu); 5623 5624 /* 5625 * Load VMCS12 from guest memory since it is not already 5626 * cached. 5627 */ 5628 if (kvm_read_guest_cached(vcpu->kvm, ghc, vmx->nested.cached_vmcs12, 5629 VMCS12_SIZE)) { 5630 return nested_vmx_fail(vcpu, 5631 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); 5632 } 5633 5634 set_current_vmptr(vmx, vmptr); 5635 } 5636 5637 return nested_vmx_succeed(vcpu); 5638 } 5639 5640 /* Emulate the VMPTRST instruction */ 5641 static int handle_vmptrst(struct kvm_vcpu *vcpu) 5642 { 5643 unsigned long exit_qual = vmx_get_exit_qual(vcpu); 5644 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO); 5645 gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr; 5646 struct x86_exception e; 5647 gva_t gva; 5648 int r; 5649 5650 if (!nested_vmx_check_permission(vcpu)) 5651 return 1; 5652 5653 if (unlikely(evmptr_is_valid(to_vmx(vcpu)->nested.hv_evmcs_vmptr))) 5654 return 1; 5655 5656 if (get_vmx_mem_address(vcpu, exit_qual, instr_info, 5657 true, sizeof(gpa_t), &gva)) 5658 return 1; 5659 /* *_system ok, nested_vmx_check_permission has verified cpl=0 */ 5660 r = kvm_write_guest_virt_system(vcpu, gva, (void *)¤t_vmptr, 5661 sizeof(gpa_t), &e); 5662 if (r != X86EMUL_CONTINUE) 5663 return kvm_handle_memory_failure(vcpu, r, &e); 5664 5665 return nested_vmx_succeed(vcpu); 5666 } 5667 5668 /* Emulate the INVEPT instruction */ 5669 static int handle_invept(struct kvm_vcpu *vcpu) 5670 { 5671 struct vcpu_vmx *vmx = to_vmx(vcpu); 5672 u32 vmx_instruction_info, types; 5673 unsigned long type, roots_to_free; 5674 struct kvm_mmu *mmu; 5675 gva_t gva; 5676 struct x86_exception e; 5677 struct { 5678 u64 eptp, gpa; 5679 } operand; 5680 int i, r, gpr_index; 5681 5682 if (!(vmx->nested.msrs.secondary_ctls_high & 5683 SECONDARY_EXEC_ENABLE_EPT) || 5684 !(vmx->nested.msrs.ept_caps & VMX_EPT_INVEPT_BIT)) { 5685 kvm_queue_exception(vcpu, UD_VECTOR); 5686 return 1; 5687 } 5688 5689 if (!nested_vmx_check_permission(vcpu)) 5690 return 1; 5691 5692 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 5693 gpr_index = vmx_get_instr_info_reg2(vmx_instruction_info); 5694 type = kvm_register_read(vcpu, gpr_index); 5695 5696 types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; 5697 5698 if (type >= 32 || !(types & (1 << type))) 5699 return nested_vmx_fail(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 5700 5701 /* According to the Intel VMX instruction reference, the memory 5702 * operand is read even if it isn't needed (e.g., for type==global) 5703 */ 5704 if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu), 5705 vmx_instruction_info, false, sizeof(operand), &gva)) 5706 return 1; 5707 r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e); 5708 if (r != X86EMUL_CONTINUE) 5709 return kvm_handle_memory_failure(vcpu, r, &e); 5710 5711 /* 5712 * Nested EPT roots are always held through guest_mmu, 5713 * not root_mmu. 5714 */ 5715 mmu = &vcpu->arch.guest_mmu; 5716 5717 switch (type) { 5718 case VMX_EPT_EXTENT_CONTEXT: 5719 if (!nested_vmx_check_eptp(vcpu, operand.eptp)) 5720 return nested_vmx_fail(vcpu, 5721 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 5722 5723 roots_to_free = 0; 5724 if (nested_ept_root_matches(mmu->root.hpa, mmu->root.pgd, 5725 operand.eptp)) 5726 roots_to_free |= KVM_MMU_ROOT_CURRENT; 5727 5728 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { 5729 if (nested_ept_root_matches(mmu->prev_roots[i].hpa, 5730 mmu->prev_roots[i].pgd, 5731 operand.eptp)) 5732 roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i); 5733 } 5734 break; 5735 case VMX_EPT_EXTENT_GLOBAL: 5736 roots_to_free = KVM_MMU_ROOTS_ALL; 5737 break; 5738 default: 5739 BUG(); 5740 break; 5741 } 5742 5743 if (roots_to_free) 5744 kvm_mmu_free_roots(vcpu->kvm, mmu, roots_to_free); 5745 5746 return nested_vmx_succeed(vcpu); 5747 } 5748 5749 static int handle_invvpid(struct kvm_vcpu *vcpu) 5750 { 5751 struct vcpu_vmx *vmx = to_vmx(vcpu); 5752 u32 vmx_instruction_info; 5753 unsigned long type, types; 5754 gva_t gva; 5755 struct x86_exception e; 5756 struct { 5757 u64 vpid; 5758 u64 gla; 5759 } operand; 5760 u16 vpid02; 5761 int r, gpr_index; 5762 5763 if (!(vmx->nested.msrs.secondary_ctls_high & 5764 SECONDARY_EXEC_ENABLE_VPID) || 5765 !(vmx->nested.msrs.vpid_caps & VMX_VPID_INVVPID_BIT)) { 5766 kvm_queue_exception(vcpu, UD_VECTOR); 5767 return 1; 5768 } 5769 5770 if (!nested_vmx_check_permission(vcpu)) 5771 return 1; 5772 5773 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 5774 gpr_index = vmx_get_instr_info_reg2(vmx_instruction_info); 5775 type = kvm_register_read(vcpu, gpr_index); 5776 5777 types = (vmx->nested.msrs.vpid_caps & 5778 VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8; 5779 5780 if (type >= 32 || !(types & (1 << type))) 5781 return nested_vmx_fail(vcpu, 5782 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 5783 5784 /* according to the intel vmx instruction reference, the memory 5785 * operand is read even if it isn't needed (e.g., for type==global) 5786 */ 5787 if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu), 5788 vmx_instruction_info, false, sizeof(operand), &gva)) 5789 return 1; 5790 r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e); 5791 if (r != X86EMUL_CONTINUE) 5792 return kvm_handle_memory_failure(vcpu, r, &e); 5793 5794 if (operand.vpid >> 16) 5795 return nested_vmx_fail(vcpu, 5796 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 5797 5798 vpid02 = nested_get_vpid02(vcpu); 5799 switch (type) { 5800 case VMX_VPID_EXTENT_INDIVIDUAL_ADDR: 5801 if (!operand.vpid || 5802 is_noncanonical_address(operand.gla, vcpu)) 5803 return nested_vmx_fail(vcpu, 5804 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 5805 vpid_sync_vcpu_addr(vpid02, operand.gla); 5806 break; 5807 case VMX_VPID_EXTENT_SINGLE_CONTEXT: 5808 case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL: 5809 if (!operand.vpid) 5810 return nested_vmx_fail(vcpu, 5811 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 5812 vpid_sync_context(vpid02); 5813 break; 5814 case VMX_VPID_EXTENT_ALL_CONTEXT: 5815 vpid_sync_context(vpid02); 5816 break; 5817 default: 5818 WARN_ON_ONCE(1); 5819 return kvm_skip_emulated_instruction(vcpu); 5820 } 5821 5822 /* 5823 * Sync the shadow page tables if EPT is disabled, L1 is invalidating 5824 * linear mappings for L2 (tagged with L2's VPID). Free all guest 5825 * roots as VPIDs are not tracked in the MMU role. 5826 * 5827 * Note, this operates on root_mmu, not guest_mmu, as L1 and L2 share 5828 * an MMU when EPT is disabled. 5829 * 5830 * TODO: sync only the affected SPTEs for INVDIVIDUAL_ADDR. 5831 */ 5832 if (!enable_ept) 5833 kvm_mmu_free_guest_mode_roots(vcpu->kvm, &vcpu->arch.root_mmu); 5834 5835 return nested_vmx_succeed(vcpu); 5836 } 5837 5838 static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu, 5839 struct vmcs12 *vmcs12) 5840 { 5841 u32 index = kvm_rcx_read(vcpu); 5842 u64 new_eptp; 5843 5844 if (WARN_ON_ONCE(!nested_cpu_has_ept(vmcs12))) 5845 return 1; 5846 if (index >= VMFUNC_EPTP_ENTRIES) 5847 return 1; 5848 5849 if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT, 5850 &new_eptp, index * 8, 8)) 5851 return 1; 5852 5853 /* 5854 * If the (L2) guest does a vmfunc to the currently 5855 * active ept pointer, we don't have to do anything else 5856 */ 5857 if (vmcs12->ept_pointer != new_eptp) { 5858 if (!nested_vmx_check_eptp(vcpu, new_eptp)) 5859 return 1; 5860 5861 vmcs12->ept_pointer = new_eptp; 5862 nested_ept_new_eptp(vcpu); 5863 5864 if (!nested_cpu_has_vpid(vmcs12)) 5865 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 5866 } 5867 5868 return 0; 5869 } 5870 5871 static int handle_vmfunc(struct kvm_vcpu *vcpu) 5872 { 5873 struct vcpu_vmx *vmx = to_vmx(vcpu); 5874 struct vmcs12 *vmcs12; 5875 u32 function = kvm_rax_read(vcpu); 5876 5877 /* 5878 * VMFUNC should never execute cleanly while L1 is active; KVM supports 5879 * VMFUNC for nested VMs, but not for L1. 5880 */ 5881 if (WARN_ON_ONCE(!is_guest_mode(vcpu))) { 5882 kvm_queue_exception(vcpu, UD_VECTOR); 5883 return 1; 5884 } 5885 5886 vmcs12 = get_vmcs12(vcpu); 5887 5888 /* 5889 * #UD on out-of-bounds function has priority over VM-Exit, and VMFUNC 5890 * is enabled in vmcs02 if and only if it's enabled in vmcs12. 5891 */ 5892 if (WARN_ON_ONCE((function > 63) || !nested_cpu_has_vmfunc(vmcs12))) { 5893 kvm_queue_exception(vcpu, UD_VECTOR); 5894 return 1; 5895 } 5896 5897 if (!(vmcs12->vm_function_control & BIT_ULL(function))) 5898 goto fail; 5899 5900 switch (function) { 5901 case 0: 5902 if (nested_vmx_eptp_switching(vcpu, vmcs12)) 5903 goto fail; 5904 break; 5905 default: 5906 goto fail; 5907 } 5908 return kvm_skip_emulated_instruction(vcpu); 5909 5910 fail: 5911 /* 5912 * This is effectively a reflected VM-Exit, as opposed to a synthesized 5913 * nested VM-Exit. Pass the original exit reason, i.e. don't hardcode 5914 * EXIT_REASON_VMFUNC as the exit reason. 5915 */ 5916 nested_vmx_vmexit(vcpu, vmx->exit_reason.full, 5917 vmx_get_intr_info(vcpu), 5918 vmx_get_exit_qual(vcpu)); 5919 return 1; 5920 } 5921 5922 /* 5923 * Return true if an IO instruction with the specified port and size should cause 5924 * a VM-exit into L1. 5925 */ 5926 bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port, 5927 int size) 5928 { 5929 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 5930 gpa_t bitmap, last_bitmap; 5931 u8 b; 5932 5933 last_bitmap = INVALID_GPA; 5934 b = -1; 5935 5936 while (size > 0) { 5937 if (port < 0x8000) 5938 bitmap = vmcs12->io_bitmap_a; 5939 else if (port < 0x10000) 5940 bitmap = vmcs12->io_bitmap_b; 5941 else 5942 return true; 5943 bitmap += (port & 0x7fff) / 8; 5944 5945 if (last_bitmap != bitmap) 5946 if (kvm_vcpu_read_guest(vcpu, bitmap, &b, 1)) 5947 return true; 5948 if (b & (1 << (port & 7))) 5949 return true; 5950 5951 port++; 5952 size--; 5953 last_bitmap = bitmap; 5954 } 5955 5956 return false; 5957 } 5958 5959 static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu, 5960 struct vmcs12 *vmcs12) 5961 { 5962 unsigned long exit_qualification; 5963 unsigned short port; 5964 int size; 5965 5966 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) 5967 return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING); 5968 5969 exit_qualification = vmx_get_exit_qual(vcpu); 5970 5971 port = exit_qualification >> 16; 5972 size = (exit_qualification & 7) + 1; 5973 5974 return nested_vmx_check_io_bitmaps(vcpu, port, size); 5975 } 5976 5977 /* 5978 * Return 1 if we should exit from L2 to L1 to handle an MSR access, 5979 * rather than handle it ourselves in L0. I.e., check whether L1 expressed 5980 * disinterest in the current event (read or write a specific MSR) by using an 5981 * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps. 5982 */ 5983 static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu, 5984 struct vmcs12 *vmcs12, 5985 union vmx_exit_reason exit_reason) 5986 { 5987 u32 msr_index = kvm_rcx_read(vcpu); 5988 gpa_t bitmap; 5989 5990 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) 5991 return true; 5992 5993 /* 5994 * The MSR_BITMAP page is divided into four 1024-byte bitmaps, 5995 * for the four combinations of read/write and low/high MSR numbers. 5996 * First we need to figure out which of the four to use: 5997 */ 5998 bitmap = vmcs12->msr_bitmap; 5999 if (exit_reason.basic == EXIT_REASON_MSR_WRITE) 6000 bitmap += 2048; 6001 if (msr_index >= 0xc0000000) { 6002 msr_index -= 0xc0000000; 6003 bitmap += 1024; 6004 } 6005 6006 /* Then read the msr_index'th bit from this bitmap: */ 6007 if (msr_index < 1024*8) { 6008 unsigned char b; 6009 if (kvm_vcpu_read_guest(vcpu, bitmap + msr_index/8, &b, 1)) 6010 return true; 6011 return 1 & (b >> (msr_index & 7)); 6012 } else 6013 return true; /* let L1 handle the wrong parameter */ 6014 } 6015 6016 /* 6017 * Return 1 if we should exit from L2 to L1 to handle a CR access exit, 6018 * rather than handle it ourselves in L0. I.e., check if L1 wanted to 6019 * intercept (via guest_host_mask etc.) the current event. 6020 */ 6021 static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, 6022 struct vmcs12 *vmcs12) 6023 { 6024 unsigned long exit_qualification = vmx_get_exit_qual(vcpu); 6025 int cr = exit_qualification & 15; 6026 int reg; 6027 unsigned long val; 6028 6029 switch ((exit_qualification >> 4) & 3) { 6030 case 0: /* mov to cr */ 6031 reg = (exit_qualification >> 8) & 15; 6032 val = kvm_register_read(vcpu, reg); 6033 switch (cr) { 6034 case 0: 6035 if (vmcs12->cr0_guest_host_mask & 6036 (val ^ vmcs12->cr0_read_shadow)) 6037 return true; 6038 break; 6039 case 3: 6040 if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING)) 6041 return true; 6042 break; 6043 case 4: 6044 if (vmcs12->cr4_guest_host_mask & 6045 (vmcs12->cr4_read_shadow ^ val)) 6046 return true; 6047 break; 6048 case 8: 6049 if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING)) 6050 return true; 6051 break; 6052 } 6053 break; 6054 case 2: /* clts */ 6055 if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) && 6056 (vmcs12->cr0_read_shadow & X86_CR0_TS)) 6057 return true; 6058 break; 6059 case 1: /* mov from cr */ 6060 switch (cr) { 6061 case 3: 6062 if (vmcs12->cpu_based_vm_exec_control & 6063 CPU_BASED_CR3_STORE_EXITING) 6064 return true; 6065 break; 6066 case 8: 6067 if (vmcs12->cpu_based_vm_exec_control & 6068 CPU_BASED_CR8_STORE_EXITING) 6069 return true; 6070 break; 6071 } 6072 break; 6073 case 3: /* lmsw */ 6074 /* 6075 * lmsw can change bits 1..3 of cr0, and only set bit 0 of 6076 * cr0. Other attempted changes are ignored, with no exit. 6077 */ 6078 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f; 6079 if (vmcs12->cr0_guest_host_mask & 0xe & 6080 (val ^ vmcs12->cr0_read_shadow)) 6081 return true; 6082 if ((vmcs12->cr0_guest_host_mask & 0x1) && 6083 !(vmcs12->cr0_read_shadow & 0x1) && 6084 (val & 0x1)) 6085 return true; 6086 break; 6087 } 6088 return false; 6089 } 6090 6091 static bool nested_vmx_exit_handled_encls(struct kvm_vcpu *vcpu, 6092 struct vmcs12 *vmcs12) 6093 { 6094 u32 encls_leaf; 6095 6096 if (!guest_cpuid_has(vcpu, X86_FEATURE_SGX) || 6097 !nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENCLS_EXITING)) 6098 return false; 6099 6100 encls_leaf = kvm_rax_read(vcpu); 6101 if (encls_leaf > 62) 6102 encls_leaf = 63; 6103 return vmcs12->encls_exiting_bitmap & BIT_ULL(encls_leaf); 6104 } 6105 6106 static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu, 6107 struct vmcs12 *vmcs12, gpa_t bitmap) 6108 { 6109 u32 vmx_instruction_info; 6110 unsigned long field; 6111 u8 b; 6112 6113 if (!nested_cpu_has_shadow_vmcs(vmcs12)) 6114 return true; 6115 6116 /* Decode instruction info and find the field to access */ 6117 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 6118 field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); 6119 6120 /* Out-of-range fields always cause a VM exit from L2 to L1 */ 6121 if (field >> 15) 6122 return true; 6123 6124 if (kvm_vcpu_read_guest(vcpu, bitmap + field/8, &b, 1)) 6125 return true; 6126 6127 return 1 & (b >> (field & 7)); 6128 } 6129 6130 static bool nested_vmx_exit_handled_mtf(struct vmcs12 *vmcs12) 6131 { 6132 u32 entry_intr_info = vmcs12->vm_entry_intr_info_field; 6133 6134 if (nested_cpu_has_mtf(vmcs12)) 6135 return true; 6136 6137 /* 6138 * An MTF VM-exit may be injected into the guest by setting the 6139 * interruption-type to 7 (other event) and the vector field to 0. Such 6140 * is the case regardless of the 'monitor trap flag' VM-execution 6141 * control. 6142 */ 6143 return entry_intr_info == (INTR_INFO_VALID_MASK 6144 | INTR_TYPE_OTHER_EVENT); 6145 } 6146 6147 /* 6148 * Return true if L0 wants to handle an exit from L2 regardless of whether or not 6149 * L1 wants the exit. Only call this when in is_guest_mode (L2). 6150 */ 6151 static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu, 6152 union vmx_exit_reason exit_reason) 6153 { 6154 u32 intr_info; 6155 6156 switch ((u16)exit_reason.basic) { 6157 case EXIT_REASON_EXCEPTION_NMI: 6158 intr_info = vmx_get_intr_info(vcpu); 6159 if (is_nmi(intr_info)) 6160 return true; 6161 else if (is_page_fault(intr_info)) 6162 return vcpu->arch.apf.host_apf_flags || 6163 vmx_need_pf_intercept(vcpu); 6164 else if (is_debug(intr_info) && 6165 vcpu->guest_debug & 6166 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) 6167 return true; 6168 else if (is_breakpoint(intr_info) && 6169 vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) 6170 return true; 6171 else if (is_alignment_check(intr_info) && 6172 !vmx_guest_inject_ac(vcpu)) 6173 return true; 6174 return false; 6175 case EXIT_REASON_EXTERNAL_INTERRUPT: 6176 return true; 6177 case EXIT_REASON_MCE_DURING_VMENTRY: 6178 return true; 6179 case EXIT_REASON_EPT_VIOLATION: 6180 /* 6181 * L0 always deals with the EPT violation. If nested EPT is 6182 * used, and the nested mmu code discovers that the address is 6183 * missing in the guest EPT table (EPT12), the EPT violation 6184 * will be injected with nested_ept_inject_page_fault() 6185 */ 6186 return true; 6187 case EXIT_REASON_EPT_MISCONFIG: 6188 /* 6189 * L2 never uses directly L1's EPT, but rather L0's own EPT 6190 * table (shadow on EPT) or a merged EPT table that L0 built 6191 * (EPT on EPT). So any problems with the structure of the 6192 * table is L0's fault. 6193 */ 6194 return true; 6195 case EXIT_REASON_PREEMPTION_TIMER: 6196 return true; 6197 case EXIT_REASON_PML_FULL: 6198 /* 6199 * PML is emulated for an L1 VMM and should never be enabled in 6200 * vmcs02, always "handle" PML_FULL by exiting to userspace. 6201 */ 6202 return true; 6203 case EXIT_REASON_VMFUNC: 6204 /* VM functions are emulated through L2->L0 vmexits. */ 6205 return true; 6206 case EXIT_REASON_BUS_LOCK: 6207 /* 6208 * At present, bus lock VM exit is never exposed to L1. 6209 * Handle L2's bus locks in L0 directly. 6210 */ 6211 return true; 6212 case EXIT_REASON_VMCALL: 6213 /* Hyper-V L2 TLB flush hypercall is handled by L0 */ 6214 return guest_hv_cpuid_has_l2_tlb_flush(vcpu) && 6215 nested_evmcs_l2_tlb_flush_enabled(vcpu) && 6216 kvm_hv_is_tlb_flush_hcall(vcpu); 6217 default: 6218 break; 6219 } 6220 return false; 6221 } 6222 6223 /* 6224 * Return 1 if L1 wants to intercept an exit from L2. Only call this when in 6225 * is_guest_mode (L2). 6226 */ 6227 static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu, 6228 union vmx_exit_reason exit_reason) 6229 { 6230 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 6231 u32 intr_info; 6232 6233 switch ((u16)exit_reason.basic) { 6234 case EXIT_REASON_EXCEPTION_NMI: 6235 intr_info = vmx_get_intr_info(vcpu); 6236 if (is_nmi(intr_info)) 6237 return true; 6238 else if (is_page_fault(intr_info)) 6239 return true; 6240 return vmcs12->exception_bitmap & 6241 (1u << (intr_info & INTR_INFO_VECTOR_MASK)); 6242 case EXIT_REASON_EXTERNAL_INTERRUPT: 6243 return nested_exit_on_intr(vcpu); 6244 case EXIT_REASON_TRIPLE_FAULT: 6245 return true; 6246 case EXIT_REASON_INTERRUPT_WINDOW: 6247 return nested_cpu_has(vmcs12, CPU_BASED_INTR_WINDOW_EXITING); 6248 case EXIT_REASON_NMI_WINDOW: 6249 return nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING); 6250 case EXIT_REASON_TASK_SWITCH: 6251 return true; 6252 case EXIT_REASON_CPUID: 6253 return true; 6254 case EXIT_REASON_HLT: 6255 return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING); 6256 case EXIT_REASON_INVD: 6257 return true; 6258 case EXIT_REASON_INVLPG: 6259 return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING); 6260 case EXIT_REASON_RDPMC: 6261 return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING); 6262 case EXIT_REASON_RDRAND: 6263 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDRAND_EXITING); 6264 case EXIT_REASON_RDSEED: 6265 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDSEED_EXITING); 6266 case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP: 6267 return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING); 6268 case EXIT_REASON_VMREAD: 6269 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12, 6270 vmcs12->vmread_bitmap); 6271 case EXIT_REASON_VMWRITE: 6272 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12, 6273 vmcs12->vmwrite_bitmap); 6274 case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR: 6275 case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD: 6276 case EXIT_REASON_VMPTRST: case EXIT_REASON_VMRESUME: 6277 case EXIT_REASON_VMOFF: case EXIT_REASON_VMON: 6278 case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID: 6279 /* 6280 * VMX instructions trap unconditionally. This allows L1 to 6281 * emulate them for its L2 guest, i.e., allows 3-level nesting! 6282 */ 6283 return true; 6284 case EXIT_REASON_CR_ACCESS: 6285 return nested_vmx_exit_handled_cr(vcpu, vmcs12); 6286 case EXIT_REASON_DR_ACCESS: 6287 return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING); 6288 case EXIT_REASON_IO_INSTRUCTION: 6289 return nested_vmx_exit_handled_io(vcpu, vmcs12); 6290 case EXIT_REASON_GDTR_IDTR: case EXIT_REASON_LDTR_TR: 6291 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC); 6292 case EXIT_REASON_MSR_READ: 6293 case EXIT_REASON_MSR_WRITE: 6294 return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason); 6295 case EXIT_REASON_INVALID_STATE: 6296 return true; 6297 case EXIT_REASON_MWAIT_INSTRUCTION: 6298 return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING); 6299 case EXIT_REASON_MONITOR_TRAP_FLAG: 6300 return nested_vmx_exit_handled_mtf(vmcs12); 6301 case EXIT_REASON_MONITOR_INSTRUCTION: 6302 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING); 6303 case EXIT_REASON_PAUSE_INSTRUCTION: 6304 return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) || 6305 nested_cpu_has2(vmcs12, 6306 SECONDARY_EXEC_PAUSE_LOOP_EXITING); 6307 case EXIT_REASON_MCE_DURING_VMENTRY: 6308 return true; 6309 case EXIT_REASON_TPR_BELOW_THRESHOLD: 6310 return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW); 6311 case EXIT_REASON_APIC_ACCESS: 6312 case EXIT_REASON_APIC_WRITE: 6313 case EXIT_REASON_EOI_INDUCED: 6314 /* 6315 * The controls for "virtualize APIC accesses," "APIC- 6316 * register virtualization," and "virtual-interrupt 6317 * delivery" only come from vmcs12. 6318 */ 6319 return true; 6320 case EXIT_REASON_INVPCID: 6321 return 6322 nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_INVPCID) && 6323 nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING); 6324 case EXIT_REASON_WBINVD: 6325 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING); 6326 case EXIT_REASON_XSETBV: 6327 return true; 6328 case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS: 6329 /* 6330 * This should never happen, since it is not possible to 6331 * set XSS to a non-zero value---neither in L1 nor in L2. 6332 * If if it were, XSS would have to be checked against 6333 * the XSS exit bitmap in vmcs12. 6334 */ 6335 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_XSAVES); 6336 case EXIT_REASON_UMWAIT: 6337 case EXIT_REASON_TPAUSE: 6338 return nested_cpu_has2(vmcs12, 6339 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE); 6340 case EXIT_REASON_ENCLS: 6341 return nested_vmx_exit_handled_encls(vcpu, vmcs12); 6342 case EXIT_REASON_NOTIFY: 6343 /* Notify VM exit is not exposed to L1 */ 6344 return false; 6345 default: 6346 return true; 6347 } 6348 } 6349 6350 /* 6351 * Conditionally reflect a VM-Exit into L1. Returns %true if the VM-Exit was 6352 * reflected into L1. 6353 */ 6354 bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu) 6355 { 6356 struct vcpu_vmx *vmx = to_vmx(vcpu); 6357 union vmx_exit_reason exit_reason = vmx->exit_reason; 6358 unsigned long exit_qual; 6359 u32 exit_intr_info; 6360 6361 WARN_ON_ONCE(vmx->nested.nested_run_pending); 6362 6363 /* 6364 * Late nested VM-Fail shares the same flow as nested VM-Exit since KVM 6365 * has already loaded L2's state. 6366 */ 6367 if (unlikely(vmx->fail)) { 6368 trace_kvm_nested_vmenter_failed( 6369 "hardware VM-instruction error: ", 6370 vmcs_read32(VM_INSTRUCTION_ERROR)); 6371 exit_intr_info = 0; 6372 exit_qual = 0; 6373 goto reflect_vmexit; 6374 } 6375 6376 trace_kvm_nested_vmexit(vcpu, KVM_ISA_VMX); 6377 6378 /* If L0 (KVM) wants the exit, it trumps L1's desires. */ 6379 if (nested_vmx_l0_wants_exit(vcpu, exit_reason)) 6380 return false; 6381 6382 /* If L1 doesn't want the exit, handle it in L0. */ 6383 if (!nested_vmx_l1_wants_exit(vcpu, exit_reason)) 6384 return false; 6385 6386 /* 6387 * vmcs.VM_EXIT_INTR_INFO is only valid for EXCEPTION_NMI exits. For 6388 * EXTERNAL_INTERRUPT, the value for vmcs12->vm_exit_intr_info would 6389 * need to be synthesized by querying the in-kernel LAPIC, but external 6390 * interrupts are never reflected to L1 so it's a non-issue. 6391 */ 6392 exit_intr_info = vmx_get_intr_info(vcpu); 6393 if (is_exception_with_error_code(exit_intr_info)) { 6394 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 6395 6396 vmcs12->vm_exit_intr_error_code = 6397 vmcs_read32(VM_EXIT_INTR_ERROR_CODE); 6398 } 6399 exit_qual = vmx_get_exit_qual(vcpu); 6400 6401 reflect_vmexit: 6402 nested_vmx_vmexit(vcpu, exit_reason.full, exit_intr_info, exit_qual); 6403 return true; 6404 } 6405 6406 static int vmx_get_nested_state(struct kvm_vcpu *vcpu, 6407 struct kvm_nested_state __user *user_kvm_nested_state, 6408 u32 user_data_size) 6409 { 6410 struct vcpu_vmx *vmx; 6411 struct vmcs12 *vmcs12; 6412 struct kvm_nested_state kvm_state = { 6413 .flags = 0, 6414 .format = KVM_STATE_NESTED_FORMAT_VMX, 6415 .size = sizeof(kvm_state), 6416 .hdr.vmx.flags = 0, 6417 .hdr.vmx.vmxon_pa = INVALID_GPA, 6418 .hdr.vmx.vmcs12_pa = INVALID_GPA, 6419 .hdr.vmx.preemption_timer_deadline = 0, 6420 }; 6421 struct kvm_vmx_nested_state_data __user *user_vmx_nested_state = 6422 &user_kvm_nested_state->data.vmx[0]; 6423 6424 if (!vcpu) 6425 return kvm_state.size + sizeof(*user_vmx_nested_state); 6426 6427 vmx = to_vmx(vcpu); 6428 vmcs12 = get_vmcs12(vcpu); 6429 6430 if (guest_can_use(vcpu, X86_FEATURE_VMX) && 6431 (vmx->nested.vmxon || vmx->nested.smm.vmxon)) { 6432 kvm_state.hdr.vmx.vmxon_pa = vmx->nested.vmxon_ptr; 6433 kvm_state.hdr.vmx.vmcs12_pa = vmx->nested.current_vmptr; 6434 6435 if (vmx_has_valid_vmcs12(vcpu)) { 6436 kvm_state.size += sizeof(user_vmx_nested_state->vmcs12); 6437 6438 /* 'hv_evmcs_vmptr' can also be EVMPTR_MAP_PENDING here */ 6439 if (vmx->nested.hv_evmcs_vmptr != EVMPTR_INVALID) 6440 kvm_state.flags |= KVM_STATE_NESTED_EVMCS; 6441 6442 if (is_guest_mode(vcpu) && 6443 nested_cpu_has_shadow_vmcs(vmcs12) && 6444 vmcs12->vmcs_link_pointer != INVALID_GPA) 6445 kvm_state.size += sizeof(user_vmx_nested_state->shadow_vmcs12); 6446 } 6447 6448 if (vmx->nested.smm.vmxon) 6449 kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON; 6450 6451 if (vmx->nested.smm.guest_mode) 6452 kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE; 6453 6454 if (is_guest_mode(vcpu)) { 6455 kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE; 6456 6457 if (vmx->nested.nested_run_pending) 6458 kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING; 6459 6460 if (vmx->nested.mtf_pending) 6461 kvm_state.flags |= KVM_STATE_NESTED_MTF_PENDING; 6462 6463 if (nested_cpu_has_preemption_timer(vmcs12) && 6464 vmx->nested.has_preemption_timer_deadline) { 6465 kvm_state.hdr.vmx.flags |= 6466 KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE; 6467 kvm_state.hdr.vmx.preemption_timer_deadline = 6468 vmx->nested.preemption_timer_deadline; 6469 } 6470 } 6471 } 6472 6473 if (user_data_size < kvm_state.size) 6474 goto out; 6475 6476 if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state))) 6477 return -EFAULT; 6478 6479 if (!vmx_has_valid_vmcs12(vcpu)) 6480 goto out; 6481 6482 /* 6483 * When running L2, the authoritative vmcs12 state is in the 6484 * vmcs02. When running L1, the authoritative vmcs12 state is 6485 * in the shadow or enlightened vmcs linked to vmcs01, unless 6486 * need_vmcs12_to_shadow_sync is set, in which case, the authoritative 6487 * vmcs12 state is in the vmcs12 already. 6488 */ 6489 if (is_guest_mode(vcpu)) { 6490 sync_vmcs02_to_vmcs12(vcpu, vmcs12); 6491 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12); 6492 } else { 6493 copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu)); 6494 if (!vmx->nested.need_vmcs12_to_shadow_sync) { 6495 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) 6496 /* 6497 * L1 hypervisor is not obliged to keep eVMCS 6498 * clean fields data always up-to-date while 6499 * not in guest mode, 'hv_clean_fields' is only 6500 * supposed to be actual upon vmentry so we need 6501 * to ignore it here and do full copy. 6502 */ 6503 copy_enlightened_to_vmcs12(vmx, 0); 6504 else if (enable_shadow_vmcs) 6505 copy_shadow_to_vmcs12(vmx); 6506 } 6507 } 6508 6509 BUILD_BUG_ON(sizeof(user_vmx_nested_state->vmcs12) < VMCS12_SIZE); 6510 BUILD_BUG_ON(sizeof(user_vmx_nested_state->shadow_vmcs12) < VMCS12_SIZE); 6511 6512 /* 6513 * Copy over the full allocated size of vmcs12 rather than just the size 6514 * of the struct. 6515 */ 6516 if (copy_to_user(user_vmx_nested_state->vmcs12, vmcs12, VMCS12_SIZE)) 6517 return -EFAULT; 6518 6519 if (nested_cpu_has_shadow_vmcs(vmcs12) && 6520 vmcs12->vmcs_link_pointer != INVALID_GPA) { 6521 if (copy_to_user(user_vmx_nested_state->shadow_vmcs12, 6522 get_shadow_vmcs12(vcpu), VMCS12_SIZE)) 6523 return -EFAULT; 6524 } 6525 out: 6526 return kvm_state.size; 6527 } 6528 6529 void vmx_leave_nested(struct kvm_vcpu *vcpu) 6530 { 6531 if (is_guest_mode(vcpu)) { 6532 to_vmx(vcpu)->nested.nested_run_pending = 0; 6533 nested_vmx_vmexit(vcpu, -1, 0, 0); 6534 } 6535 free_nested(vcpu); 6536 } 6537 6538 static int vmx_set_nested_state(struct kvm_vcpu *vcpu, 6539 struct kvm_nested_state __user *user_kvm_nested_state, 6540 struct kvm_nested_state *kvm_state) 6541 { 6542 struct vcpu_vmx *vmx = to_vmx(vcpu); 6543 struct vmcs12 *vmcs12; 6544 enum vm_entry_failure_code ignored; 6545 struct kvm_vmx_nested_state_data __user *user_vmx_nested_state = 6546 &user_kvm_nested_state->data.vmx[0]; 6547 int ret; 6548 6549 if (kvm_state->format != KVM_STATE_NESTED_FORMAT_VMX) 6550 return -EINVAL; 6551 6552 if (kvm_state->hdr.vmx.vmxon_pa == INVALID_GPA) { 6553 if (kvm_state->hdr.vmx.smm.flags) 6554 return -EINVAL; 6555 6556 if (kvm_state->hdr.vmx.vmcs12_pa != INVALID_GPA) 6557 return -EINVAL; 6558 6559 /* 6560 * KVM_STATE_NESTED_EVMCS used to signal that KVM should 6561 * enable eVMCS capability on vCPU. However, since then 6562 * code was changed such that flag signals vmcs12 should 6563 * be copied into eVMCS in guest memory. 6564 * 6565 * To preserve backwards compatability, allow user 6566 * to set this flag even when there is no VMXON region. 6567 */ 6568 if (kvm_state->flags & ~KVM_STATE_NESTED_EVMCS) 6569 return -EINVAL; 6570 } else { 6571 if (!guest_can_use(vcpu, X86_FEATURE_VMX)) 6572 return -EINVAL; 6573 6574 if (!page_address_valid(vcpu, kvm_state->hdr.vmx.vmxon_pa)) 6575 return -EINVAL; 6576 } 6577 6578 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && 6579 (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) 6580 return -EINVAL; 6581 6582 if (kvm_state->hdr.vmx.smm.flags & 6583 ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON)) 6584 return -EINVAL; 6585 6586 if (kvm_state->hdr.vmx.flags & ~KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE) 6587 return -EINVAL; 6588 6589 /* 6590 * SMM temporarily disables VMX, so we cannot be in guest mode, 6591 * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags 6592 * must be zero. 6593 */ 6594 if (is_smm(vcpu) ? 6595 (kvm_state->flags & 6596 (KVM_STATE_NESTED_GUEST_MODE | KVM_STATE_NESTED_RUN_PENDING)) 6597 : kvm_state->hdr.vmx.smm.flags) 6598 return -EINVAL; 6599 6600 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && 6601 !(kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON)) 6602 return -EINVAL; 6603 6604 if ((kvm_state->flags & KVM_STATE_NESTED_EVMCS) && 6605 (!guest_can_use(vcpu, X86_FEATURE_VMX) || 6606 !vmx->nested.enlightened_vmcs_enabled)) 6607 return -EINVAL; 6608 6609 vmx_leave_nested(vcpu); 6610 6611 if (kvm_state->hdr.vmx.vmxon_pa == INVALID_GPA) 6612 return 0; 6613 6614 vmx->nested.vmxon_ptr = kvm_state->hdr.vmx.vmxon_pa; 6615 ret = enter_vmx_operation(vcpu); 6616 if (ret) 6617 return ret; 6618 6619 /* Empty 'VMXON' state is permitted if no VMCS loaded */ 6620 if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12)) { 6621 /* See vmx_has_valid_vmcs12. */ 6622 if ((kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE) || 6623 (kvm_state->flags & KVM_STATE_NESTED_EVMCS) || 6624 (kvm_state->hdr.vmx.vmcs12_pa != INVALID_GPA)) 6625 return -EINVAL; 6626 else 6627 return 0; 6628 } 6629 6630 if (kvm_state->hdr.vmx.vmcs12_pa != INVALID_GPA) { 6631 if (kvm_state->hdr.vmx.vmcs12_pa == kvm_state->hdr.vmx.vmxon_pa || 6632 !page_address_valid(vcpu, kvm_state->hdr.vmx.vmcs12_pa)) 6633 return -EINVAL; 6634 6635 set_current_vmptr(vmx, kvm_state->hdr.vmx.vmcs12_pa); 6636 } else if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) { 6637 /* 6638 * nested_vmx_handle_enlightened_vmptrld() cannot be called 6639 * directly from here as HV_X64_MSR_VP_ASSIST_PAGE may not be 6640 * restored yet. EVMCS will be mapped from 6641 * nested_get_vmcs12_pages(). 6642 */ 6643 vmx->nested.hv_evmcs_vmptr = EVMPTR_MAP_PENDING; 6644 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); 6645 } else { 6646 return -EINVAL; 6647 } 6648 6649 if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) { 6650 vmx->nested.smm.vmxon = true; 6651 vmx->nested.vmxon = false; 6652 6653 if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) 6654 vmx->nested.smm.guest_mode = true; 6655 } 6656 6657 vmcs12 = get_vmcs12(vcpu); 6658 if (copy_from_user(vmcs12, user_vmx_nested_state->vmcs12, sizeof(*vmcs12))) 6659 return -EFAULT; 6660 6661 if (vmcs12->hdr.revision_id != VMCS12_REVISION) 6662 return -EINVAL; 6663 6664 if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) 6665 return 0; 6666 6667 vmx->nested.nested_run_pending = 6668 !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING); 6669 6670 vmx->nested.mtf_pending = 6671 !!(kvm_state->flags & KVM_STATE_NESTED_MTF_PENDING); 6672 6673 ret = -EINVAL; 6674 if (nested_cpu_has_shadow_vmcs(vmcs12) && 6675 vmcs12->vmcs_link_pointer != INVALID_GPA) { 6676 struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu); 6677 6678 if (kvm_state->size < 6679 sizeof(*kvm_state) + 6680 sizeof(user_vmx_nested_state->vmcs12) + sizeof(*shadow_vmcs12)) 6681 goto error_guest_mode; 6682 6683 if (copy_from_user(shadow_vmcs12, 6684 user_vmx_nested_state->shadow_vmcs12, 6685 sizeof(*shadow_vmcs12))) { 6686 ret = -EFAULT; 6687 goto error_guest_mode; 6688 } 6689 6690 if (shadow_vmcs12->hdr.revision_id != VMCS12_REVISION || 6691 !shadow_vmcs12->hdr.shadow_vmcs) 6692 goto error_guest_mode; 6693 } 6694 6695 vmx->nested.has_preemption_timer_deadline = false; 6696 if (kvm_state->hdr.vmx.flags & KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE) { 6697 vmx->nested.has_preemption_timer_deadline = true; 6698 vmx->nested.preemption_timer_deadline = 6699 kvm_state->hdr.vmx.preemption_timer_deadline; 6700 } 6701 6702 if (nested_vmx_check_controls(vcpu, vmcs12) || 6703 nested_vmx_check_host_state(vcpu, vmcs12) || 6704 nested_vmx_check_guest_state(vcpu, vmcs12, &ignored)) 6705 goto error_guest_mode; 6706 6707 vmx->nested.dirty_vmcs12 = true; 6708 vmx->nested.force_msr_bitmap_recalc = true; 6709 ret = nested_vmx_enter_non_root_mode(vcpu, false); 6710 if (ret) 6711 goto error_guest_mode; 6712 6713 if (vmx->nested.mtf_pending) 6714 kvm_make_request(KVM_REQ_EVENT, vcpu); 6715 6716 return 0; 6717 6718 error_guest_mode: 6719 vmx->nested.nested_run_pending = 0; 6720 return ret; 6721 } 6722 6723 void nested_vmx_set_vmcs_shadowing_bitmap(void) 6724 { 6725 if (enable_shadow_vmcs) { 6726 vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap)); 6727 vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap)); 6728 } 6729 } 6730 6731 /* 6732 * Indexing into the vmcs12 uses the VMCS encoding rotated left by 6. Undo 6733 * that madness to get the encoding for comparison. 6734 */ 6735 #define VMCS12_IDX_TO_ENC(idx) ((u16)(((u16)(idx) >> 6) | ((u16)(idx) << 10))) 6736 6737 static u64 nested_vmx_calc_vmcs_enum_msr(void) 6738 { 6739 /* 6740 * Note these are the so called "index" of the VMCS field encoding, not 6741 * the index into vmcs12. 6742 */ 6743 unsigned int max_idx, idx; 6744 int i; 6745 6746 /* 6747 * For better or worse, KVM allows VMREAD/VMWRITE to all fields in 6748 * vmcs12, regardless of whether or not the associated feature is 6749 * exposed to L1. Simply find the field with the highest index. 6750 */ 6751 max_idx = 0; 6752 for (i = 0; i < nr_vmcs12_fields; i++) { 6753 /* The vmcs12 table is very, very sparsely populated. */ 6754 if (!vmcs12_field_offsets[i]) 6755 continue; 6756 6757 idx = vmcs_field_index(VMCS12_IDX_TO_ENC(i)); 6758 if (idx > max_idx) 6759 max_idx = idx; 6760 } 6761 6762 return (u64)max_idx << VMCS_FIELD_INDEX_SHIFT; 6763 } 6764 6765 static void nested_vmx_setup_pinbased_ctls(struct vmcs_config *vmcs_conf, 6766 struct nested_vmx_msrs *msrs) 6767 { 6768 msrs->pinbased_ctls_low = 6769 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR; 6770 6771 msrs->pinbased_ctls_high = vmcs_conf->pin_based_exec_ctrl; 6772 msrs->pinbased_ctls_high &= 6773 PIN_BASED_EXT_INTR_MASK | 6774 PIN_BASED_NMI_EXITING | 6775 PIN_BASED_VIRTUAL_NMIS | 6776 (enable_apicv ? PIN_BASED_POSTED_INTR : 0); 6777 msrs->pinbased_ctls_high |= 6778 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR | 6779 PIN_BASED_VMX_PREEMPTION_TIMER; 6780 } 6781 6782 static void nested_vmx_setup_exit_ctls(struct vmcs_config *vmcs_conf, 6783 struct nested_vmx_msrs *msrs) 6784 { 6785 msrs->exit_ctls_low = 6786 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR; 6787 6788 msrs->exit_ctls_high = vmcs_conf->vmexit_ctrl; 6789 msrs->exit_ctls_high &= 6790 #ifdef CONFIG_X86_64 6791 VM_EXIT_HOST_ADDR_SPACE_SIZE | 6792 #endif 6793 VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT | 6794 VM_EXIT_CLEAR_BNDCFGS; 6795 msrs->exit_ctls_high |= 6796 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR | 6797 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER | 6798 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT | 6799 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL; 6800 6801 /* We support free control of debug control saving. */ 6802 msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS; 6803 } 6804 6805 static void nested_vmx_setup_entry_ctls(struct vmcs_config *vmcs_conf, 6806 struct nested_vmx_msrs *msrs) 6807 { 6808 msrs->entry_ctls_low = 6809 VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR; 6810 6811 msrs->entry_ctls_high = vmcs_conf->vmentry_ctrl; 6812 msrs->entry_ctls_high &= 6813 #ifdef CONFIG_X86_64 6814 VM_ENTRY_IA32E_MODE | 6815 #endif 6816 VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_BNDCFGS; 6817 msrs->entry_ctls_high |= 6818 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER | 6819 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL); 6820 6821 /* We support free control of debug control loading. */ 6822 msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS; 6823 } 6824 6825 static void nested_vmx_setup_cpubased_ctls(struct vmcs_config *vmcs_conf, 6826 struct nested_vmx_msrs *msrs) 6827 { 6828 msrs->procbased_ctls_low = 6829 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR; 6830 6831 msrs->procbased_ctls_high = vmcs_conf->cpu_based_exec_ctrl; 6832 msrs->procbased_ctls_high &= 6833 CPU_BASED_INTR_WINDOW_EXITING | 6834 CPU_BASED_NMI_WINDOW_EXITING | CPU_BASED_USE_TSC_OFFSETTING | 6835 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING | 6836 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING | 6837 CPU_BASED_CR3_STORE_EXITING | 6838 #ifdef CONFIG_X86_64 6839 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING | 6840 #endif 6841 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING | 6842 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG | 6843 CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING | 6844 CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING | 6845 CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; 6846 /* 6847 * We can allow some features even when not supported by the 6848 * hardware. For example, L1 can specify an MSR bitmap - and we 6849 * can use it to avoid exits to L1 - even when L0 runs L2 6850 * without MSR bitmaps. 6851 */ 6852 msrs->procbased_ctls_high |= 6853 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR | 6854 CPU_BASED_USE_MSR_BITMAPS; 6855 6856 /* We support free control of CR3 access interception. */ 6857 msrs->procbased_ctls_low &= 6858 ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING); 6859 } 6860 6861 static void nested_vmx_setup_secondary_ctls(u32 ept_caps, 6862 struct vmcs_config *vmcs_conf, 6863 struct nested_vmx_msrs *msrs) 6864 { 6865 msrs->secondary_ctls_low = 0; 6866 6867 msrs->secondary_ctls_high = vmcs_conf->cpu_based_2nd_exec_ctrl; 6868 msrs->secondary_ctls_high &= 6869 SECONDARY_EXEC_DESC | 6870 SECONDARY_EXEC_ENABLE_RDTSCP | 6871 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 6872 SECONDARY_EXEC_WBINVD_EXITING | 6873 SECONDARY_EXEC_APIC_REGISTER_VIRT | 6874 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 6875 SECONDARY_EXEC_RDRAND_EXITING | 6876 SECONDARY_EXEC_ENABLE_INVPCID | 6877 SECONDARY_EXEC_ENABLE_VMFUNC | 6878 SECONDARY_EXEC_RDSEED_EXITING | 6879 SECONDARY_EXEC_ENABLE_XSAVES | 6880 SECONDARY_EXEC_TSC_SCALING | 6881 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE; 6882 6883 /* 6884 * We can emulate "VMCS shadowing," even if the hardware 6885 * doesn't support it. 6886 */ 6887 msrs->secondary_ctls_high |= 6888 SECONDARY_EXEC_SHADOW_VMCS; 6889 6890 if (enable_ept) { 6891 /* nested EPT: emulate EPT also to L1 */ 6892 msrs->secondary_ctls_high |= 6893 SECONDARY_EXEC_ENABLE_EPT; 6894 msrs->ept_caps = 6895 VMX_EPT_PAGE_WALK_4_BIT | 6896 VMX_EPT_PAGE_WALK_5_BIT | 6897 VMX_EPTP_WB_BIT | 6898 VMX_EPT_INVEPT_BIT | 6899 VMX_EPT_EXECUTE_ONLY_BIT; 6900 6901 msrs->ept_caps &= ept_caps; 6902 msrs->ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT | 6903 VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT | 6904 VMX_EPT_1GB_PAGE_BIT; 6905 if (enable_ept_ad_bits) { 6906 msrs->secondary_ctls_high |= 6907 SECONDARY_EXEC_ENABLE_PML; 6908 msrs->ept_caps |= VMX_EPT_AD_BIT; 6909 } 6910 6911 /* 6912 * Advertise EPTP switching irrespective of hardware support, 6913 * KVM emulates it in software so long as VMFUNC is supported. 6914 */ 6915 if (cpu_has_vmx_vmfunc()) 6916 msrs->vmfunc_controls = VMX_VMFUNC_EPTP_SWITCHING; 6917 } 6918 6919 /* 6920 * Old versions of KVM use the single-context version without 6921 * checking for support, so declare that it is supported even 6922 * though it is treated as global context. The alternative is 6923 * not failing the single-context invvpid, and it is worse. 6924 */ 6925 if (enable_vpid) { 6926 msrs->secondary_ctls_high |= 6927 SECONDARY_EXEC_ENABLE_VPID; 6928 msrs->vpid_caps = VMX_VPID_INVVPID_BIT | 6929 VMX_VPID_EXTENT_SUPPORTED_MASK; 6930 } 6931 6932 if (enable_unrestricted_guest) 6933 msrs->secondary_ctls_high |= 6934 SECONDARY_EXEC_UNRESTRICTED_GUEST; 6935 6936 if (flexpriority_enabled) 6937 msrs->secondary_ctls_high |= 6938 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; 6939 6940 if (enable_sgx) 6941 msrs->secondary_ctls_high |= SECONDARY_EXEC_ENCLS_EXITING; 6942 } 6943 6944 static void nested_vmx_setup_misc_data(struct vmcs_config *vmcs_conf, 6945 struct nested_vmx_msrs *msrs) 6946 { 6947 msrs->misc_low = (u32)vmcs_conf->misc & VMX_MISC_SAVE_EFER_LMA; 6948 msrs->misc_low |= 6949 MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS | 6950 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE | 6951 VMX_MISC_ACTIVITY_HLT | 6952 VMX_MISC_ACTIVITY_WAIT_SIPI; 6953 msrs->misc_high = 0; 6954 } 6955 6956 static void nested_vmx_setup_basic(struct nested_vmx_msrs *msrs) 6957 { 6958 /* 6959 * This MSR reports some information about VMX support. We 6960 * should return information about the VMX we emulate for the 6961 * guest, and the VMCS structure we give it - not about the 6962 * VMX support of the underlying hardware. 6963 */ 6964 msrs->basic = 6965 VMCS12_REVISION | 6966 VMX_BASIC_TRUE_CTLS | 6967 ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) | 6968 (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT); 6969 6970 if (cpu_has_vmx_basic_inout()) 6971 msrs->basic |= VMX_BASIC_INOUT; 6972 } 6973 6974 static void nested_vmx_setup_cr_fixed(struct nested_vmx_msrs *msrs) 6975 { 6976 /* 6977 * These MSRs specify bits which the guest must keep fixed on 6978 * while L1 is in VMXON mode (in L1's root mode, or running an L2). 6979 * We picked the standard core2 setting. 6980 */ 6981 #define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE) 6982 #define VMXON_CR4_ALWAYSON X86_CR4_VMXE 6983 msrs->cr0_fixed0 = VMXON_CR0_ALWAYSON; 6984 msrs->cr4_fixed0 = VMXON_CR4_ALWAYSON; 6985 6986 /* These MSRs specify bits which the guest must keep fixed off. */ 6987 rdmsrl(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1); 6988 rdmsrl(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1); 6989 6990 if (vmx_umip_emulated()) 6991 msrs->cr4_fixed1 |= X86_CR4_UMIP; 6992 } 6993 6994 /* 6995 * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be 6996 * returned for the various VMX controls MSRs when nested VMX is enabled. 6997 * The same values should also be used to verify that vmcs12 control fields are 6998 * valid during nested entry from L1 to L2. 6999 * Each of these control msrs has a low and high 32-bit half: A low bit is on 7000 * if the corresponding bit in the (32-bit) control field *must* be on, and a 7001 * bit in the high half is on if the corresponding bit in the control field 7002 * may be on. See also vmx_control_verify(). 7003 */ 7004 void nested_vmx_setup_ctls_msrs(struct vmcs_config *vmcs_conf, u32 ept_caps) 7005 { 7006 struct nested_vmx_msrs *msrs = &vmcs_conf->nested; 7007 7008 /* 7009 * Note that as a general rule, the high half of the MSRs (bits in 7010 * the control fields which may be 1) should be initialized by the 7011 * intersection of the underlying hardware's MSR (i.e., features which 7012 * can be supported) and the list of features we want to expose - 7013 * because they are known to be properly supported in our code. 7014 * Also, usually, the low half of the MSRs (bits which must be 1) can 7015 * be set to 0, meaning that L1 may turn off any of these bits. The 7016 * reason is that if one of these bits is necessary, it will appear 7017 * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control 7018 * fields of vmcs01 and vmcs02, will turn these bits off - and 7019 * nested_vmx_l1_wants_exit() will not pass related exits to L1. 7020 * These rules have exceptions below. 7021 */ 7022 nested_vmx_setup_pinbased_ctls(vmcs_conf, msrs); 7023 7024 nested_vmx_setup_exit_ctls(vmcs_conf, msrs); 7025 7026 nested_vmx_setup_entry_ctls(vmcs_conf, msrs); 7027 7028 nested_vmx_setup_cpubased_ctls(vmcs_conf, msrs); 7029 7030 nested_vmx_setup_secondary_ctls(ept_caps, vmcs_conf, msrs); 7031 7032 nested_vmx_setup_misc_data(vmcs_conf, msrs); 7033 7034 nested_vmx_setup_basic(msrs); 7035 7036 nested_vmx_setup_cr_fixed(msrs); 7037 7038 msrs->vmcs_enum = nested_vmx_calc_vmcs_enum_msr(); 7039 } 7040 7041 void nested_vmx_hardware_unsetup(void) 7042 { 7043 int i; 7044 7045 if (enable_shadow_vmcs) { 7046 for (i = 0; i < VMX_BITMAP_NR; i++) 7047 free_page((unsigned long)vmx_bitmap[i]); 7048 } 7049 } 7050 7051 __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *)) 7052 { 7053 int i; 7054 7055 if (!cpu_has_vmx_shadow_vmcs()) 7056 enable_shadow_vmcs = 0; 7057 if (enable_shadow_vmcs) { 7058 for (i = 0; i < VMX_BITMAP_NR; i++) { 7059 /* 7060 * The vmx_bitmap is not tied to a VM and so should 7061 * not be charged to a memcg. 7062 */ 7063 vmx_bitmap[i] = (unsigned long *) 7064 __get_free_page(GFP_KERNEL); 7065 if (!vmx_bitmap[i]) { 7066 nested_vmx_hardware_unsetup(); 7067 return -ENOMEM; 7068 } 7069 } 7070 7071 init_vmcs_shadow_fields(); 7072 } 7073 7074 exit_handlers[EXIT_REASON_VMCLEAR] = handle_vmclear; 7075 exit_handlers[EXIT_REASON_VMLAUNCH] = handle_vmlaunch; 7076 exit_handlers[EXIT_REASON_VMPTRLD] = handle_vmptrld; 7077 exit_handlers[EXIT_REASON_VMPTRST] = handle_vmptrst; 7078 exit_handlers[EXIT_REASON_VMREAD] = handle_vmread; 7079 exit_handlers[EXIT_REASON_VMRESUME] = handle_vmresume; 7080 exit_handlers[EXIT_REASON_VMWRITE] = handle_vmwrite; 7081 exit_handlers[EXIT_REASON_VMOFF] = handle_vmxoff; 7082 exit_handlers[EXIT_REASON_VMON] = handle_vmxon; 7083 exit_handlers[EXIT_REASON_INVEPT] = handle_invept; 7084 exit_handlers[EXIT_REASON_INVVPID] = handle_invvpid; 7085 exit_handlers[EXIT_REASON_VMFUNC] = handle_vmfunc; 7086 7087 return 0; 7088 } 7089 7090 struct kvm_x86_nested_ops vmx_nested_ops = { 7091 .leave_nested = vmx_leave_nested, 7092 .is_exception_vmexit = nested_vmx_is_exception_vmexit, 7093 .check_events = vmx_check_nested_events, 7094 .has_events = vmx_has_nested_events, 7095 .triple_fault = nested_vmx_triple_fault, 7096 .get_state = vmx_get_nested_state, 7097 .set_state = vmx_set_nested_state, 7098 .get_nested_state_pages = vmx_get_nested_state_pages, 7099 .write_log_dirty = nested_vmx_write_pml_buffer, 7100 .enable_evmcs = nested_enable_evmcs, 7101 .get_evmcs_version = nested_get_evmcs_version, 7102 .hv_inject_synthetic_vmexit_post_tlb_flush = vmx_hv_inject_synthetic_vmexit_post_tlb_flush, 7103 }; 7104