1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/objtool.h> 4 #include <linux/percpu.h> 5 6 #include <asm/debugreg.h> 7 #include <asm/mmu_context.h> 8 9 #include "cpuid.h" 10 #include "evmcs.h" 11 #include "hyperv.h" 12 #include "mmu.h" 13 #include "nested.h" 14 #include "pmu.h" 15 #include "sgx.h" 16 #include "trace.h" 17 #include "vmx.h" 18 #include "x86.h" 19 20 static bool __read_mostly enable_shadow_vmcs = 1; 21 module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO); 22 23 static bool __read_mostly nested_early_check = 0; 24 module_param(nested_early_check, bool, S_IRUGO); 25 26 #define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK 27 28 /* 29 * Hyper-V requires all of these, so mark them as supported even though 30 * they are just treated the same as all-context. 31 */ 32 #define VMX_VPID_EXTENT_SUPPORTED_MASK \ 33 (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \ 34 VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \ 35 VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \ 36 VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT) 37 38 #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5 39 40 enum { 41 VMX_VMREAD_BITMAP, 42 VMX_VMWRITE_BITMAP, 43 VMX_BITMAP_NR 44 }; 45 static unsigned long *vmx_bitmap[VMX_BITMAP_NR]; 46 47 #define vmx_vmread_bitmap (vmx_bitmap[VMX_VMREAD_BITMAP]) 48 #define vmx_vmwrite_bitmap (vmx_bitmap[VMX_VMWRITE_BITMAP]) 49 50 struct shadow_vmcs_field { 51 u16 encoding; 52 u16 offset; 53 }; 54 static struct shadow_vmcs_field shadow_read_only_fields[] = { 55 #define SHADOW_FIELD_RO(x, y) { x, offsetof(struct vmcs12, y) }, 56 #include "vmcs_shadow_fields.h" 57 }; 58 static int max_shadow_read_only_fields = 59 ARRAY_SIZE(shadow_read_only_fields); 60 61 static struct shadow_vmcs_field shadow_read_write_fields[] = { 62 #define SHADOW_FIELD_RW(x, y) { x, offsetof(struct vmcs12, y) }, 63 #include "vmcs_shadow_fields.h" 64 }; 65 static int max_shadow_read_write_fields = 66 ARRAY_SIZE(shadow_read_write_fields); 67 68 static void init_vmcs_shadow_fields(void) 69 { 70 int i, j; 71 72 memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE); 73 memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE); 74 75 for (i = j = 0; i < max_shadow_read_only_fields; i++) { 76 struct shadow_vmcs_field entry = shadow_read_only_fields[i]; 77 u16 field = entry.encoding; 78 79 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 && 80 (i + 1 == max_shadow_read_only_fields || 81 shadow_read_only_fields[i + 1].encoding != field + 1)) 82 pr_err("Missing field from shadow_read_only_field %x\n", 83 field + 1); 84 85 clear_bit(field, vmx_vmread_bitmap); 86 if (field & 1) 87 #ifdef CONFIG_X86_64 88 continue; 89 #else 90 entry.offset += sizeof(u32); 91 #endif 92 shadow_read_only_fields[j++] = entry; 93 } 94 max_shadow_read_only_fields = j; 95 96 for (i = j = 0; i < max_shadow_read_write_fields; i++) { 97 struct shadow_vmcs_field entry = shadow_read_write_fields[i]; 98 u16 field = entry.encoding; 99 100 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 && 101 (i + 1 == max_shadow_read_write_fields || 102 shadow_read_write_fields[i + 1].encoding != field + 1)) 103 pr_err("Missing field from shadow_read_write_field %x\n", 104 field + 1); 105 106 WARN_ONCE(field >= GUEST_ES_AR_BYTES && 107 field <= GUEST_TR_AR_BYTES, 108 "Update vmcs12_write_any() to drop reserved bits from AR_BYTES"); 109 110 /* 111 * PML and the preemption timer can be emulated, but the 112 * processor cannot vmwrite to fields that don't exist 113 * on bare metal. 114 */ 115 switch (field) { 116 case GUEST_PML_INDEX: 117 if (!cpu_has_vmx_pml()) 118 continue; 119 break; 120 case VMX_PREEMPTION_TIMER_VALUE: 121 if (!cpu_has_vmx_preemption_timer()) 122 continue; 123 break; 124 case GUEST_INTR_STATUS: 125 if (!cpu_has_vmx_apicv()) 126 continue; 127 break; 128 default: 129 break; 130 } 131 132 clear_bit(field, vmx_vmwrite_bitmap); 133 clear_bit(field, vmx_vmread_bitmap); 134 if (field & 1) 135 #ifdef CONFIG_X86_64 136 continue; 137 #else 138 entry.offset += sizeof(u32); 139 #endif 140 shadow_read_write_fields[j++] = entry; 141 } 142 max_shadow_read_write_fields = j; 143 } 144 145 /* 146 * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(), 147 * set the success or error code of an emulated VMX instruction (as specified 148 * by Vol 2B, VMX Instruction Reference, "Conventions"), and skip the emulated 149 * instruction. 150 */ 151 static int nested_vmx_succeed(struct kvm_vcpu *vcpu) 152 { 153 vmx_set_rflags(vcpu, vmx_get_rflags(vcpu) 154 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | 155 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF)); 156 return kvm_skip_emulated_instruction(vcpu); 157 } 158 159 static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu) 160 { 161 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) 162 & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF | 163 X86_EFLAGS_SF | X86_EFLAGS_OF)) 164 | X86_EFLAGS_CF); 165 return kvm_skip_emulated_instruction(vcpu); 166 } 167 168 static int nested_vmx_failValid(struct kvm_vcpu *vcpu, 169 u32 vm_instruction_error) 170 { 171 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) 172 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | 173 X86_EFLAGS_SF | X86_EFLAGS_OF)) 174 | X86_EFLAGS_ZF); 175 get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error; 176 /* 177 * We don't need to force sync to shadow VMCS because 178 * VM_INSTRUCTION_ERROR is not shadowed. Enlightened VMCS 'shadows' all 179 * fields and thus must be synced. 180 */ 181 if (to_vmx(vcpu)->nested.hv_evmcs_vmptr != EVMPTR_INVALID) 182 to_vmx(vcpu)->nested.need_vmcs12_to_shadow_sync = true; 183 184 return kvm_skip_emulated_instruction(vcpu); 185 } 186 187 static int nested_vmx_fail(struct kvm_vcpu *vcpu, u32 vm_instruction_error) 188 { 189 struct vcpu_vmx *vmx = to_vmx(vcpu); 190 191 /* 192 * failValid writes the error number to the current VMCS, which 193 * can't be done if there isn't a current VMCS. 194 */ 195 if (vmx->nested.current_vmptr == INVALID_GPA && 196 !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) 197 return nested_vmx_failInvalid(vcpu); 198 199 return nested_vmx_failValid(vcpu, vm_instruction_error); 200 } 201 202 static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator) 203 { 204 /* TODO: not to reset guest simply here. */ 205 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 206 pr_debug_ratelimited("kvm: nested vmx abort, indicator %d\n", indicator); 207 } 208 209 static inline bool vmx_control_verify(u32 control, u32 low, u32 high) 210 { 211 return fixed_bits_valid(control, low, high); 212 } 213 214 static inline u64 vmx_control_msr(u32 low, u32 high) 215 { 216 return low | ((u64)high << 32); 217 } 218 219 static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx) 220 { 221 secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_SHADOW_VMCS); 222 vmcs_write64(VMCS_LINK_POINTER, INVALID_GPA); 223 vmx->nested.need_vmcs12_to_shadow_sync = false; 224 } 225 226 static inline void nested_release_evmcs(struct kvm_vcpu *vcpu) 227 { 228 struct vcpu_vmx *vmx = to_vmx(vcpu); 229 230 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) { 231 kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true); 232 vmx->nested.hv_evmcs = NULL; 233 } 234 235 vmx->nested.hv_evmcs_vmptr = EVMPTR_INVALID; 236 } 237 238 static void vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx, 239 struct loaded_vmcs *prev) 240 { 241 struct vmcs_host_state *dest, *src; 242 243 if (unlikely(!vmx->guest_state_loaded)) 244 return; 245 246 src = &prev->host_state; 247 dest = &vmx->loaded_vmcs->host_state; 248 249 vmx_set_vmcs_host_state(dest, src->cr3, src->fs_sel, src->gs_sel, 250 src->fs_base, src->gs_base); 251 dest->ldt_sel = src->ldt_sel; 252 #ifdef CONFIG_X86_64 253 dest->ds_sel = src->ds_sel; 254 dest->es_sel = src->es_sel; 255 #endif 256 } 257 258 static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs) 259 { 260 struct vcpu_vmx *vmx = to_vmx(vcpu); 261 struct loaded_vmcs *prev; 262 int cpu; 263 264 if (WARN_ON_ONCE(vmx->loaded_vmcs == vmcs)) 265 return; 266 267 cpu = get_cpu(); 268 prev = vmx->loaded_vmcs; 269 vmx->loaded_vmcs = vmcs; 270 vmx_vcpu_load_vmcs(vcpu, cpu, prev); 271 vmx_sync_vmcs_host_state(vmx, prev); 272 put_cpu(); 273 274 vcpu->arch.regs_avail = ~VMX_REGS_LAZY_LOAD_SET; 275 276 /* 277 * All lazily updated registers will be reloaded from VMCS12 on both 278 * vmentry and vmexit. 279 */ 280 vcpu->arch.regs_dirty = 0; 281 } 282 283 /* 284 * Free whatever needs to be freed from vmx->nested when L1 goes down, or 285 * just stops using VMX. 286 */ 287 static void free_nested(struct kvm_vcpu *vcpu) 288 { 289 struct vcpu_vmx *vmx = to_vmx(vcpu); 290 291 if (WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01)) 292 vmx_switch_vmcs(vcpu, &vmx->vmcs01); 293 294 if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon) 295 return; 296 297 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); 298 299 vmx->nested.vmxon = false; 300 vmx->nested.smm.vmxon = false; 301 vmx->nested.vmxon_ptr = INVALID_GPA; 302 free_vpid(vmx->nested.vpid02); 303 vmx->nested.posted_intr_nv = -1; 304 vmx->nested.current_vmptr = INVALID_GPA; 305 if (enable_shadow_vmcs) { 306 vmx_disable_shadow_vmcs(vmx); 307 vmcs_clear(vmx->vmcs01.shadow_vmcs); 308 free_vmcs(vmx->vmcs01.shadow_vmcs); 309 vmx->vmcs01.shadow_vmcs = NULL; 310 } 311 kfree(vmx->nested.cached_vmcs12); 312 vmx->nested.cached_vmcs12 = NULL; 313 kfree(vmx->nested.cached_shadow_vmcs12); 314 vmx->nested.cached_shadow_vmcs12 = NULL; 315 /* Unpin physical memory we referred to in the vmcs02 */ 316 if (vmx->nested.apic_access_page) { 317 kvm_release_page_clean(vmx->nested.apic_access_page); 318 vmx->nested.apic_access_page = NULL; 319 } 320 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true); 321 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true); 322 vmx->nested.pi_desc = NULL; 323 324 kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); 325 326 nested_release_evmcs(vcpu); 327 328 free_loaded_vmcs(&vmx->nested.vmcs02); 329 } 330 331 /* 332 * Ensure that the current vmcs of the logical processor is the 333 * vmcs01 of the vcpu before calling free_nested(). 334 */ 335 void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu) 336 { 337 vcpu_load(vcpu); 338 vmx_leave_nested(vcpu); 339 vcpu_put(vcpu); 340 } 341 342 #define EPTP_PA_MASK GENMASK_ULL(51, 12) 343 344 static bool nested_ept_root_matches(hpa_t root_hpa, u64 root_eptp, u64 eptp) 345 { 346 return VALID_PAGE(root_hpa) && 347 ((root_eptp & EPTP_PA_MASK) == (eptp & EPTP_PA_MASK)); 348 } 349 350 static void nested_ept_invalidate_addr(struct kvm_vcpu *vcpu, gpa_t eptp, 351 gpa_t addr) 352 { 353 uint i; 354 struct kvm_mmu_root_info *cached_root; 355 356 WARN_ON_ONCE(!mmu_is_nested(vcpu)); 357 358 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { 359 cached_root = &vcpu->arch.mmu->prev_roots[i]; 360 361 if (nested_ept_root_matches(cached_root->hpa, cached_root->pgd, 362 eptp)) 363 vcpu->arch.mmu->invlpg(vcpu, addr, cached_root->hpa); 364 } 365 } 366 367 static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu, 368 struct x86_exception *fault) 369 { 370 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 371 struct vcpu_vmx *vmx = to_vmx(vcpu); 372 u32 vm_exit_reason; 373 unsigned long exit_qualification = vcpu->arch.exit_qualification; 374 375 if (vmx->nested.pml_full) { 376 vm_exit_reason = EXIT_REASON_PML_FULL; 377 vmx->nested.pml_full = false; 378 exit_qualification &= INTR_INFO_UNBLOCK_NMI; 379 } else { 380 if (fault->error_code & PFERR_RSVD_MASK) 381 vm_exit_reason = EXIT_REASON_EPT_MISCONFIG; 382 else 383 vm_exit_reason = EXIT_REASON_EPT_VIOLATION; 384 385 /* 386 * Although the caller (kvm_inject_emulated_page_fault) would 387 * have already synced the faulting address in the shadow EPT 388 * tables for the current EPTP12, we also need to sync it for 389 * any other cached EPTP02s based on the same EP4TA, since the 390 * TLB associates mappings to the EP4TA rather than the full EPTP. 391 */ 392 nested_ept_invalidate_addr(vcpu, vmcs12->ept_pointer, 393 fault->address); 394 } 395 396 nested_vmx_vmexit(vcpu, vm_exit_reason, 0, exit_qualification); 397 vmcs12->guest_physical_address = fault->address; 398 } 399 400 static void nested_ept_new_eptp(struct kvm_vcpu *vcpu) 401 { 402 struct vcpu_vmx *vmx = to_vmx(vcpu); 403 bool execonly = vmx->nested.msrs.ept_caps & VMX_EPT_EXECUTE_ONLY_BIT; 404 int ept_lpage_level = ept_caps_to_lpage_level(vmx->nested.msrs.ept_caps); 405 406 kvm_init_shadow_ept_mmu(vcpu, execonly, ept_lpage_level, 407 nested_ept_ad_enabled(vcpu), 408 nested_ept_get_eptp(vcpu)); 409 } 410 411 static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) 412 { 413 WARN_ON(mmu_is_nested(vcpu)); 414 415 vcpu->arch.mmu = &vcpu->arch.guest_mmu; 416 nested_ept_new_eptp(vcpu); 417 vcpu->arch.mmu->get_guest_pgd = nested_ept_get_eptp; 418 vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault; 419 vcpu->arch.mmu->get_pdptr = kvm_pdptr_read; 420 421 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; 422 } 423 424 static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu) 425 { 426 vcpu->arch.mmu = &vcpu->arch.root_mmu; 427 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; 428 } 429 430 static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12, 431 u16 error_code) 432 { 433 bool inequality, bit; 434 435 bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0; 436 inequality = 437 (error_code & vmcs12->page_fault_error_code_mask) != 438 vmcs12->page_fault_error_code_match; 439 return inequality ^ bit; 440 } 441 442 443 /* 444 * KVM wants to inject page-faults which it got to the guest. This function 445 * checks whether in a nested guest, we need to inject them to L1 or L2. 446 */ 447 static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit_qual) 448 { 449 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 450 unsigned int nr = vcpu->arch.exception.nr; 451 bool has_payload = vcpu->arch.exception.has_payload; 452 unsigned long payload = vcpu->arch.exception.payload; 453 454 if (nr == PF_VECTOR) { 455 if (vcpu->arch.exception.nested_apf) { 456 *exit_qual = vcpu->arch.apf.nested_apf_token; 457 return 1; 458 } 459 if (nested_vmx_is_page_fault_vmexit(vmcs12, 460 vcpu->arch.exception.error_code)) { 461 *exit_qual = has_payload ? payload : vcpu->arch.cr2; 462 return 1; 463 } 464 } else if (vmcs12->exception_bitmap & (1u << nr)) { 465 if (nr == DB_VECTOR) { 466 if (!has_payload) { 467 payload = vcpu->arch.dr6; 468 payload &= ~DR6_BT; 469 payload ^= DR6_ACTIVE_LOW; 470 } 471 *exit_qual = payload; 472 } else 473 *exit_qual = 0; 474 return 1; 475 } 476 477 return 0; 478 } 479 480 481 static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu, 482 struct x86_exception *fault) 483 { 484 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 485 486 WARN_ON(!is_guest_mode(vcpu)); 487 488 if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code) && 489 !to_vmx(vcpu)->nested.nested_run_pending) { 490 vmcs12->vm_exit_intr_error_code = fault->error_code; 491 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, 492 PF_VECTOR | INTR_TYPE_HARD_EXCEPTION | 493 INTR_INFO_DELIVER_CODE_MASK | INTR_INFO_VALID_MASK, 494 fault->address); 495 } else { 496 kvm_inject_page_fault(vcpu, fault); 497 } 498 } 499 500 static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu, 501 struct vmcs12 *vmcs12) 502 { 503 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) 504 return 0; 505 506 if (CC(!page_address_valid(vcpu, vmcs12->io_bitmap_a)) || 507 CC(!page_address_valid(vcpu, vmcs12->io_bitmap_b))) 508 return -EINVAL; 509 510 return 0; 511 } 512 513 static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu, 514 struct vmcs12 *vmcs12) 515 { 516 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) 517 return 0; 518 519 if (CC(!page_address_valid(vcpu, vmcs12->msr_bitmap))) 520 return -EINVAL; 521 522 return 0; 523 } 524 525 static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu, 526 struct vmcs12 *vmcs12) 527 { 528 if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) 529 return 0; 530 531 if (CC(!page_address_valid(vcpu, vmcs12->virtual_apic_page_addr))) 532 return -EINVAL; 533 534 return 0; 535 } 536 537 /* 538 * For x2APIC MSRs, ignore the vmcs01 bitmap. L1 can enable x2APIC without L1 539 * itself utilizing x2APIC. All MSRs were previously set to be intercepted, 540 * only the "disable intercept" case needs to be handled. 541 */ 542 static void nested_vmx_disable_intercept_for_x2apic_msr(unsigned long *msr_bitmap_l1, 543 unsigned long *msr_bitmap_l0, 544 u32 msr, int type) 545 { 546 if (type & MSR_TYPE_R && !vmx_test_msr_bitmap_read(msr_bitmap_l1, msr)) 547 vmx_clear_msr_bitmap_read(msr_bitmap_l0, msr); 548 549 if (type & MSR_TYPE_W && !vmx_test_msr_bitmap_write(msr_bitmap_l1, msr)) 550 vmx_clear_msr_bitmap_write(msr_bitmap_l0, msr); 551 } 552 553 static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap) 554 { 555 int msr; 556 557 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { 558 unsigned word = msr / BITS_PER_LONG; 559 560 msr_bitmap[word] = ~0; 561 msr_bitmap[word + (0x800 / sizeof(long))] = ~0; 562 } 563 } 564 565 #define BUILD_NVMX_MSR_INTERCEPT_HELPER(rw) \ 566 static inline \ 567 void nested_vmx_set_msr_##rw##_intercept(struct vcpu_vmx *vmx, \ 568 unsigned long *msr_bitmap_l1, \ 569 unsigned long *msr_bitmap_l0, u32 msr) \ 570 { \ 571 if (vmx_test_msr_bitmap_##rw(vmx->vmcs01.msr_bitmap, msr) || \ 572 vmx_test_msr_bitmap_##rw(msr_bitmap_l1, msr)) \ 573 vmx_set_msr_bitmap_##rw(msr_bitmap_l0, msr); \ 574 else \ 575 vmx_clear_msr_bitmap_##rw(msr_bitmap_l0, msr); \ 576 } 577 BUILD_NVMX_MSR_INTERCEPT_HELPER(read) 578 BUILD_NVMX_MSR_INTERCEPT_HELPER(write) 579 580 static inline void nested_vmx_set_intercept_for_msr(struct vcpu_vmx *vmx, 581 unsigned long *msr_bitmap_l1, 582 unsigned long *msr_bitmap_l0, 583 u32 msr, int types) 584 { 585 if (types & MSR_TYPE_R) 586 nested_vmx_set_msr_read_intercept(vmx, msr_bitmap_l1, 587 msr_bitmap_l0, msr); 588 if (types & MSR_TYPE_W) 589 nested_vmx_set_msr_write_intercept(vmx, msr_bitmap_l1, 590 msr_bitmap_l0, msr); 591 } 592 593 /* 594 * Merge L0's and L1's MSR bitmap, return false to indicate that 595 * we do not use the hardware. 596 */ 597 static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu, 598 struct vmcs12 *vmcs12) 599 { 600 struct vcpu_vmx *vmx = to_vmx(vcpu); 601 int msr; 602 unsigned long *msr_bitmap_l1; 603 unsigned long *msr_bitmap_l0 = vmx->nested.vmcs02.msr_bitmap; 604 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; 605 struct kvm_host_map *map = &vmx->nested.msr_bitmap_map; 606 607 /* Nothing to do if the MSR bitmap is not in use. */ 608 if (!cpu_has_vmx_msr_bitmap() || 609 !nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) 610 return false; 611 612 /* 613 * MSR bitmap update can be skipped when: 614 * - MSR bitmap for L1 hasn't changed. 615 * - Nested hypervisor (L1) is attempting to launch the same L2 as 616 * before. 617 * - Nested hypervisor (L1) has enabled 'Enlightened MSR Bitmap' feature 618 * and tells KVM (L0) there were no changes in MSR bitmap for L2. 619 */ 620 if (!vmx->nested.force_msr_bitmap_recalc && evmcs && 621 evmcs->hv_enlightenments_control.msr_bitmap && 622 evmcs->hv_clean_fields & HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP) 623 return true; 624 625 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->msr_bitmap), map)) 626 return false; 627 628 msr_bitmap_l1 = (unsigned long *)map->hva; 629 630 /* 631 * To keep the control flow simple, pay eight 8-byte writes (sixteen 632 * 4-byte writes on 32-bit systems) up front to enable intercepts for 633 * the x2APIC MSR range and selectively toggle those relevant to L2. 634 */ 635 enable_x2apic_msr_intercepts(msr_bitmap_l0); 636 637 if (nested_cpu_has_virt_x2apic_mode(vmcs12)) { 638 if (nested_cpu_has_apic_reg_virt(vmcs12)) { 639 /* 640 * L0 need not intercept reads for MSRs between 0x800 641 * and 0x8ff, it just lets the processor take the value 642 * from the virtual-APIC page; take those 256 bits 643 * directly from the L1 bitmap. 644 */ 645 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { 646 unsigned word = msr / BITS_PER_LONG; 647 648 msr_bitmap_l0[word] = msr_bitmap_l1[word]; 649 } 650 } 651 652 nested_vmx_disable_intercept_for_x2apic_msr( 653 msr_bitmap_l1, msr_bitmap_l0, 654 X2APIC_MSR(APIC_TASKPRI), 655 MSR_TYPE_R | MSR_TYPE_W); 656 657 if (nested_cpu_has_vid(vmcs12)) { 658 nested_vmx_disable_intercept_for_x2apic_msr( 659 msr_bitmap_l1, msr_bitmap_l0, 660 X2APIC_MSR(APIC_EOI), 661 MSR_TYPE_W); 662 nested_vmx_disable_intercept_for_x2apic_msr( 663 msr_bitmap_l1, msr_bitmap_l0, 664 X2APIC_MSR(APIC_SELF_IPI), 665 MSR_TYPE_W); 666 } 667 } 668 669 /* 670 * Always check vmcs01's bitmap to honor userspace MSR filters and any 671 * other runtime changes to vmcs01's bitmap, e.g. dynamic pass-through. 672 */ 673 #ifdef CONFIG_X86_64 674 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0, 675 MSR_FS_BASE, MSR_TYPE_RW); 676 677 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0, 678 MSR_GS_BASE, MSR_TYPE_RW); 679 680 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0, 681 MSR_KERNEL_GS_BASE, MSR_TYPE_RW); 682 #endif 683 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0, 684 MSR_IA32_SPEC_CTRL, MSR_TYPE_RW); 685 686 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0, 687 MSR_IA32_PRED_CMD, MSR_TYPE_W); 688 689 kvm_vcpu_unmap(vcpu, &vmx->nested.msr_bitmap_map, false); 690 691 vmx->nested.force_msr_bitmap_recalc = false; 692 693 return true; 694 } 695 696 static void nested_cache_shadow_vmcs12(struct kvm_vcpu *vcpu, 697 struct vmcs12 *vmcs12) 698 { 699 struct vcpu_vmx *vmx = to_vmx(vcpu); 700 struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache; 701 702 if (!nested_cpu_has_shadow_vmcs(vmcs12) || 703 vmcs12->vmcs_link_pointer == INVALID_GPA) 704 return; 705 706 if (ghc->gpa != vmcs12->vmcs_link_pointer && 707 kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, 708 vmcs12->vmcs_link_pointer, VMCS12_SIZE)) 709 return; 710 711 kvm_read_guest_cached(vmx->vcpu.kvm, ghc, get_shadow_vmcs12(vcpu), 712 VMCS12_SIZE); 713 } 714 715 static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu *vcpu, 716 struct vmcs12 *vmcs12) 717 { 718 struct vcpu_vmx *vmx = to_vmx(vcpu); 719 struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache; 720 721 if (!nested_cpu_has_shadow_vmcs(vmcs12) || 722 vmcs12->vmcs_link_pointer == INVALID_GPA) 723 return; 724 725 if (ghc->gpa != vmcs12->vmcs_link_pointer && 726 kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, 727 vmcs12->vmcs_link_pointer, VMCS12_SIZE)) 728 return; 729 730 kvm_write_guest_cached(vmx->vcpu.kvm, ghc, get_shadow_vmcs12(vcpu), 731 VMCS12_SIZE); 732 } 733 734 /* 735 * In nested virtualization, check if L1 has set 736 * VM_EXIT_ACK_INTR_ON_EXIT 737 */ 738 static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu) 739 { 740 return get_vmcs12(vcpu)->vm_exit_controls & 741 VM_EXIT_ACK_INTR_ON_EXIT; 742 } 743 744 static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu, 745 struct vmcs12 *vmcs12) 746 { 747 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) && 748 CC(!page_address_valid(vcpu, vmcs12->apic_access_addr))) 749 return -EINVAL; 750 else 751 return 0; 752 } 753 754 static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, 755 struct vmcs12 *vmcs12) 756 { 757 if (!nested_cpu_has_virt_x2apic_mode(vmcs12) && 758 !nested_cpu_has_apic_reg_virt(vmcs12) && 759 !nested_cpu_has_vid(vmcs12) && 760 !nested_cpu_has_posted_intr(vmcs12)) 761 return 0; 762 763 /* 764 * If virtualize x2apic mode is enabled, 765 * virtualize apic access must be disabled. 766 */ 767 if (CC(nested_cpu_has_virt_x2apic_mode(vmcs12) && 768 nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))) 769 return -EINVAL; 770 771 /* 772 * If virtual interrupt delivery is enabled, 773 * we must exit on external interrupts. 774 */ 775 if (CC(nested_cpu_has_vid(vmcs12) && !nested_exit_on_intr(vcpu))) 776 return -EINVAL; 777 778 /* 779 * bits 15:8 should be zero in posted_intr_nv, 780 * the descriptor address has been already checked 781 * in nested_get_vmcs12_pages. 782 * 783 * bits 5:0 of posted_intr_desc_addr should be zero. 784 */ 785 if (nested_cpu_has_posted_intr(vmcs12) && 786 (CC(!nested_cpu_has_vid(vmcs12)) || 787 CC(!nested_exit_intr_ack_set(vcpu)) || 788 CC((vmcs12->posted_intr_nv & 0xff00)) || 789 CC(!kvm_vcpu_is_legal_aligned_gpa(vcpu, vmcs12->posted_intr_desc_addr, 64)))) 790 return -EINVAL; 791 792 /* tpr shadow is needed by all apicv features. */ 793 if (CC(!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))) 794 return -EINVAL; 795 796 return 0; 797 } 798 799 static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu, 800 u32 count, u64 addr) 801 { 802 if (count == 0) 803 return 0; 804 805 if (!kvm_vcpu_is_legal_aligned_gpa(vcpu, addr, 16) || 806 !kvm_vcpu_is_legal_gpa(vcpu, (addr + count * sizeof(struct vmx_msr_entry) - 1))) 807 return -EINVAL; 808 809 return 0; 810 } 811 812 static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu *vcpu, 813 struct vmcs12 *vmcs12) 814 { 815 if (CC(nested_vmx_check_msr_switch(vcpu, 816 vmcs12->vm_exit_msr_load_count, 817 vmcs12->vm_exit_msr_load_addr)) || 818 CC(nested_vmx_check_msr_switch(vcpu, 819 vmcs12->vm_exit_msr_store_count, 820 vmcs12->vm_exit_msr_store_addr))) 821 return -EINVAL; 822 823 return 0; 824 } 825 826 static int nested_vmx_check_entry_msr_switch_controls(struct kvm_vcpu *vcpu, 827 struct vmcs12 *vmcs12) 828 { 829 if (CC(nested_vmx_check_msr_switch(vcpu, 830 vmcs12->vm_entry_msr_load_count, 831 vmcs12->vm_entry_msr_load_addr))) 832 return -EINVAL; 833 834 return 0; 835 } 836 837 static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu, 838 struct vmcs12 *vmcs12) 839 { 840 if (!nested_cpu_has_pml(vmcs12)) 841 return 0; 842 843 if (CC(!nested_cpu_has_ept(vmcs12)) || 844 CC(!page_address_valid(vcpu, vmcs12->pml_address))) 845 return -EINVAL; 846 847 return 0; 848 } 849 850 static int nested_vmx_check_unrestricted_guest_controls(struct kvm_vcpu *vcpu, 851 struct vmcs12 *vmcs12) 852 { 853 if (CC(nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST) && 854 !nested_cpu_has_ept(vmcs12))) 855 return -EINVAL; 856 return 0; 857 } 858 859 static int nested_vmx_check_mode_based_ept_exec_controls(struct kvm_vcpu *vcpu, 860 struct vmcs12 *vmcs12) 861 { 862 if (CC(nested_cpu_has2(vmcs12, SECONDARY_EXEC_MODE_BASED_EPT_EXEC) && 863 !nested_cpu_has_ept(vmcs12))) 864 return -EINVAL; 865 return 0; 866 } 867 868 static int nested_vmx_check_shadow_vmcs_controls(struct kvm_vcpu *vcpu, 869 struct vmcs12 *vmcs12) 870 { 871 if (!nested_cpu_has_shadow_vmcs(vmcs12)) 872 return 0; 873 874 if (CC(!page_address_valid(vcpu, vmcs12->vmread_bitmap)) || 875 CC(!page_address_valid(vcpu, vmcs12->vmwrite_bitmap))) 876 return -EINVAL; 877 878 return 0; 879 } 880 881 static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu, 882 struct vmx_msr_entry *e) 883 { 884 /* x2APIC MSR accesses are not allowed */ 885 if (CC(vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8)) 886 return -EINVAL; 887 if (CC(e->index == MSR_IA32_UCODE_WRITE) || /* SDM Table 35-2 */ 888 CC(e->index == MSR_IA32_UCODE_REV)) 889 return -EINVAL; 890 if (CC(e->reserved != 0)) 891 return -EINVAL; 892 return 0; 893 } 894 895 static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu, 896 struct vmx_msr_entry *e) 897 { 898 if (CC(e->index == MSR_FS_BASE) || 899 CC(e->index == MSR_GS_BASE) || 900 CC(e->index == MSR_IA32_SMM_MONITOR_CTL) || /* SMM is not supported */ 901 nested_vmx_msr_check_common(vcpu, e)) 902 return -EINVAL; 903 return 0; 904 } 905 906 static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu, 907 struct vmx_msr_entry *e) 908 { 909 if (CC(e->index == MSR_IA32_SMBASE) || /* SMM is not supported */ 910 nested_vmx_msr_check_common(vcpu, e)) 911 return -EINVAL; 912 return 0; 913 } 914 915 static u32 nested_vmx_max_atomic_switch_msrs(struct kvm_vcpu *vcpu) 916 { 917 struct vcpu_vmx *vmx = to_vmx(vcpu); 918 u64 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low, 919 vmx->nested.msrs.misc_high); 920 921 return (vmx_misc_max_msr(vmx_misc) + 1) * VMX_MISC_MSR_LIST_MULTIPLIER; 922 } 923 924 /* 925 * Load guest's/host's msr at nested entry/exit. 926 * return 0 for success, entry index for failure. 927 * 928 * One of the failure modes for MSR load/store is when a list exceeds the 929 * virtual hardware's capacity. To maintain compatibility with hardware inasmuch 930 * as possible, process all valid entries before failing rather than precheck 931 * for a capacity violation. 932 */ 933 static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) 934 { 935 u32 i; 936 struct vmx_msr_entry e; 937 u32 max_msr_list_size = nested_vmx_max_atomic_switch_msrs(vcpu); 938 939 for (i = 0; i < count; i++) { 940 if (unlikely(i >= max_msr_list_size)) 941 goto fail; 942 943 if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e), 944 &e, sizeof(e))) { 945 pr_debug_ratelimited( 946 "%s cannot read MSR entry (%u, 0x%08llx)\n", 947 __func__, i, gpa + i * sizeof(e)); 948 goto fail; 949 } 950 if (nested_vmx_load_msr_check(vcpu, &e)) { 951 pr_debug_ratelimited( 952 "%s check failed (%u, 0x%x, 0x%x)\n", 953 __func__, i, e.index, e.reserved); 954 goto fail; 955 } 956 if (kvm_set_msr(vcpu, e.index, e.value)) { 957 pr_debug_ratelimited( 958 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n", 959 __func__, i, e.index, e.value); 960 goto fail; 961 } 962 } 963 return 0; 964 fail: 965 /* Note, max_msr_list_size is at most 4096, i.e. this can't wrap. */ 966 return i + 1; 967 } 968 969 static bool nested_vmx_get_vmexit_msr_value(struct kvm_vcpu *vcpu, 970 u32 msr_index, 971 u64 *data) 972 { 973 struct vcpu_vmx *vmx = to_vmx(vcpu); 974 975 /* 976 * If the L0 hypervisor stored a more accurate value for the TSC that 977 * does not include the time taken for emulation of the L2->L1 978 * VM-exit in L0, use the more accurate value. 979 */ 980 if (msr_index == MSR_IA32_TSC) { 981 int i = vmx_find_loadstore_msr_slot(&vmx->msr_autostore.guest, 982 MSR_IA32_TSC); 983 984 if (i >= 0) { 985 u64 val = vmx->msr_autostore.guest.val[i].value; 986 987 *data = kvm_read_l1_tsc(vcpu, val); 988 return true; 989 } 990 } 991 992 if (kvm_get_msr(vcpu, msr_index, data)) { 993 pr_debug_ratelimited("%s cannot read MSR (0x%x)\n", __func__, 994 msr_index); 995 return false; 996 } 997 return true; 998 } 999 1000 static bool read_and_check_msr_entry(struct kvm_vcpu *vcpu, u64 gpa, int i, 1001 struct vmx_msr_entry *e) 1002 { 1003 if (kvm_vcpu_read_guest(vcpu, 1004 gpa + i * sizeof(*e), 1005 e, 2 * sizeof(u32))) { 1006 pr_debug_ratelimited( 1007 "%s cannot read MSR entry (%u, 0x%08llx)\n", 1008 __func__, i, gpa + i * sizeof(*e)); 1009 return false; 1010 } 1011 if (nested_vmx_store_msr_check(vcpu, e)) { 1012 pr_debug_ratelimited( 1013 "%s check failed (%u, 0x%x, 0x%x)\n", 1014 __func__, i, e->index, e->reserved); 1015 return false; 1016 } 1017 return true; 1018 } 1019 1020 static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) 1021 { 1022 u64 data; 1023 u32 i; 1024 struct vmx_msr_entry e; 1025 u32 max_msr_list_size = nested_vmx_max_atomic_switch_msrs(vcpu); 1026 1027 for (i = 0; i < count; i++) { 1028 if (unlikely(i >= max_msr_list_size)) 1029 return -EINVAL; 1030 1031 if (!read_and_check_msr_entry(vcpu, gpa, i, &e)) 1032 return -EINVAL; 1033 1034 if (!nested_vmx_get_vmexit_msr_value(vcpu, e.index, &data)) 1035 return -EINVAL; 1036 1037 if (kvm_vcpu_write_guest(vcpu, 1038 gpa + i * sizeof(e) + 1039 offsetof(struct vmx_msr_entry, value), 1040 &data, sizeof(data))) { 1041 pr_debug_ratelimited( 1042 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n", 1043 __func__, i, e.index, data); 1044 return -EINVAL; 1045 } 1046 } 1047 return 0; 1048 } 1049 1050 static bool nested_msr_store_list_has_msr(struct kvm_vcpu *vcpu, u32 msr_index) 1051 { 1052 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 1053 u32 count = vmcs12->vm_exit_msr_store_count; 1054 u64 gpa = vmcs12->vm_exit_msr_store_addr; 1055 struct vmx_msr_entry e; 1056 u32 i; 1057 1058 for (i = 0; i < count; i++) { 1059 if (!read_and_check_msr_entry(vcpu, gpa, i, &e)) 1060 return false; 1061 1062 if (e.index == msr_index) 1063 return true; 1064 } 1065 return false; 1066 } 1067 1068 static void prepare_vmx_msr_autostore_list(struct kvm_vcpu *vcpu, 1069 u32 msr_index) 1070 { 1071 struct vcpu_vmx *vmx = to_vmx(vcpu); 1072 struct vmx_msrs *autostore = &vmx->msr_autostore.guest; 1073 bool in_vmcs12_store_list; 1074 int msr_autostore_slot; 1075 bool in_autostore_list; 1076 int last; 1077 1078 msr_autostore_slot = vmx_find_loadstore_msr_slot(autostore, msr_index); 1079 in_autostore_list = msr_autostore_slot >= 0; 1080 in_vmcs12_store_list = nested_msr_store_list_has_msr(vcpu, msr_index); 1081 1082 if (in_vmcs12_store_list && !in_autostore_list) { 1083 if (autostore->nr == MAX_NR_LOADSTORE_MSRS) { 1084 /* 1085 * Emulated VMEntry does not fail here. Instead a less 1086 * accurate value will be returned by 1087 * nested_vmx_get_vmexit_msr_value() using kvm_get_msr() 1088 * instead of reading the value from the vmcs02 VMExit 1089 * MSR-store area. 1090 */ 1091 pr_warn_ratelimited( 1092 "Not enough msr entries in msr_autostore. Can't add msr %x\n", 1093 msr_index); 1094 return; 1095 } 1096 last = autostore->nr++; 1097 autostore->val[last].index = msr_index; 1098 } else if (!in_vmcs12_store_list && in_autostore_list) { 1099 last = --autostore->nr; 1100 autostore->val[msr_autostore_slot] = autostore->val[last]; 1101 } 1102 } 1103 1104 /* 1105 * Load guest's/host's cr3 at nested entry/exit. @nested_ept is true if we are 1106 * emulating VM-Entry into a guest with EPT enabled. On failure, the expected 1107 * Exit Qualification (for a VM-Entry consistency check VM-Exit) is assigned to 1108 * @entry_failure_code. 1109 */ 1110 static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, 1111 bool nested_ept, bool reload_pdptrs, 1112 enum vm_entry_failure_code *entry_failure_code) 1113 { 1114 if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3))) { 1115 *entry_failure_code = ENTRY_FAIL_DEFAULT; 1116 return -EINVAL; 1117 } 1118 1119 /* 1120 * If PAE paging and EPT are both on, CR3 is not used by the CPU and 1121 * must not be dereferenced. 1122 */ 1123 if (reload_pdptrs && !nested_ept && is_pae_paging(vcpu) && 1124 CC(!load_pdptrs(vcpu, cr3))) { 1125 *entry_failure_code = ENTRY_FAIL_PDPTE; 1126 return -EINVAL; 1127 } 1128 1129 if (!nested_ept) 1130 kvm_mmu_new_pgd(vcpu, cr3); 1131 1132 vcpu->arch.cr3 = cr3; 1133 kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); 1134 1135 /* Re-initialize the MMU, e.g. to pick up CR4 MMU role changes. */ 1136 kvm_init_mmu(vcpu); 1137 1138 return 0; 1139 } 1140 1141 /* 1142 * Returns if KVM is able to config CPU to tag TLB entries 1143 * populated by L2 differently than TLB entries populated 1144 * by L1. 1145 * 1146 * If L0 uses EPT, L1 and L2 run with different EPTP because 1147 * guest_mode is part of kvm_mmu_page_role. Thus, TLB entries 1148 * are tagged with different EPTP. 1149 * 1150 * If L1 uses VPID and we allocated a vpid02, TLB entries are tagged 1151 * with different VPID (L1 entries are tagged with vmx->vpid 1152 * while L2 entries are tagged with vmx->nested.vpid02). 1153 */ 1154 static bool nested_has_guest_tlb_tag(struct kvm_vcpu *vcpu) 1155 { 1156 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 1157 1158 return enable_ept || 1159 (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02); 1160 } 1161 1162 static void nested_vmx_transition_tlb_flush(struct kvm_vcpu *vcpu, 1163 struct vmcs12 *vmcs12, 1164 bool is_vmenter) 1165 { 1166 struct vcpu_vmx *vmx = to_vmx(vcpu); 1167 1168 /* 1169 * If vmcs12 doesn't use VPID, L1 expects linear and combined mappings 1170 * for *all* contexts to be flushed on VM-Enter/VM-Exit, i.e. it's a 1171 * full TLB flush from the guest's perspective. This is required even 1172 * if VPID is disabled in the host as KVM may need to synchronize the 1173 * MMU in response to the guest TLB flush. 1174 * 1175 * Note, using TLB_FLUSH_GUEST is correct even if nested EPT is in use. 1176 * EPT is a special snowflake, as guest-physical mappings aren't 1177 * flushed on VPID invalidations, including VM-Enter or VM-Exit with 1178 * VPID disabled. As a result, KVM _never_ needs to sync nEPT 1179 * entries on VM-Enter because L1 can't rely on VM-Enter to flush 1180 * those mappings. 1181 */ 1182 if (!nested_cpu_has_vpid(vmcs12)) { 1183 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 1184 return; 1185 } 1186 1187 /* L2 should never have a VPID if VPID is disabled. */ 1188 WARN_ON(!enable_vpid); 1189 1190 /* 1191 * VPID is enabled and in use by vmcs12. If vpid12 is changing, then 1192 * emulate a guest TLB flush as KVM does not track vpid12 history nor 1193 * is the VPID incorporated into the MMU context. I.e. KVM must assume 1194 * that the new vpid12 has never been used and thus represents a new 1195 * guest ASID that cannot have entries in the TLB. 1196 */ 1197 if (is_vmenter && vmcs12->virtual_processor_id != vmx->nested.last_vpid) { 1198 vmx->nested.last_vpid = vmcs12->virtual_processor_id; 1199 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 1200 return; 1201 } 1202 1203 /* 1204 * If VPID is enabled, used by vmc12, and vpid12 is not changing but 1205 * does not have a unique TLB tag (ASID), i.e. EPT is disabled and 1206 * KVM was unable to allocate a VPID for L2, flush the current context 1207 * as the effective ASID is common to both L1 and L2. 1208 */ 1209 if (!nested_has_guest_tlb_tag(vcpu)) 1210 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); 1211 } 1212 1213 static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask) 1214 { 1215 superset &= mask; 1216 subset &= mask; 1217 1218 return (superset | subset) == superset; 1219 } 1220 1221 static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data) 1222 { 1223 const u64 feature_and_reserved = 1224 /* feature (except bit 48; see below) */ 1225 BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) | 1226 /* reserved */ 1227 BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56); 1228 u64 vmx_basic = vmx->nested.msrs.basic; 1229 1230 if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved)) 1231 return -EINVAL; 1232 1233 /* 1234 * KVM does not emulate a version of VMX that constrains physical 1235 * addresses of VMX structures (e.g. VMCS) to 32-bits. 1236 */ 1237 if (data & BIT_ULL(48)) 1238 return -EINVAL; 1239 1240 if (vmx_basic_vmcs_revision_id(vmx_basic) != 1241 vmx_basic_vmcs_revision_id(data)) 1242 return -EINVAL; 1243 1244 if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data)) 1245 return -EINVAL; 1246 1247 vmx->nested.msrs.basic = data; 1248 return 0; 1249 } 1250 1251 static int 1252 vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) 1253 { 1254 u64 supported; 1255 u32 *lowp, *highp; 1256 1257 switch (msr_index) { 1258 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: 1259 lowp = &vmx->nested.msrs.pinbased_ctls_low; 1260 highp = &vmx->nested.msrs.pinbased_ctls_high; 1261 break; 1262 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: 1263 lowp = &vmx->nested.msrs.procbased_ctls_low; 1264 highp = &vmx->nested.msrs.procbased_ctls_high; 1265 break; 1266 case MSR_IA32_VMX_TRUE_EXIT_CTLS: 1267 lowp = &vmx->nested.msrs.exit_ctls_low; 1268 highp = &vmx->nested.msrs.exit_ctls_high; 1269 break; 1270 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: 1271 lowp = &vmx->nested.msrs.entry_ctls_low; 1272 highp = &vmx->nested.msrs.entry_ctls_high; 1273 break; 1274 case MSR_IA32_VMX_PROCBASED_CTLS2: 1275 lowp = &vmx->nested.msrs.secondary_ctls_low; 1276 highp = &vmx->nested.msrs.secondary_ctls_high; 1277 break; 1278 default: 1279 BUG(); 1280 } 1281 1282 supported = vmx_control_msr(*lowp, *highp); 1283 1284 /* Check must-be-1 bits are still 1. */ 1285 if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0))) 1286 return -EINVAL; 1287 1288 /* Check must-be-0 bits are still 0. */ 1289 if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32))) 1290 return -EINVAL; 1291 1292 *lowp = data; 1293 *highp = data >> 32; 1294 return 0; 1295 } 1296 1297 static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data) 1298 { 1299 const u64 feature_and_reserved_bits = 1300 /* feature */ 1301 BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) | 1302 BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) | 1303 /* reserved */ 1304 GENMASK_ULL(13, 9) | BIT_ULL(31); 1305 u64 vmx_misc; 1306 1307 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low, 1308 vmx->nested.msrs.misc_high); 1309 1310 if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits)) 1311 return -EINVAL; 1312 1313 if ((vmx->nested.msrs.pinbased_ctls_high & 1314 PIN_BASED_VMX_PREEMPTION_TIMER) && 1315 vmx_misc_preemption_timer_rate(data) != 1316 vmx_misc_preemption_timer_rate(vmx_misc)) 1317 return -EINVAL; 1318 1319 if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc)) 1320 return -EINVAL; 1321 1322 if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc)) 1323 return -EINVAL; 1324 1325 if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc)) 1326 return -EINVAL; 1327 1328 vmx->nested.msrs.misc_low = data; 1329 vmx->nested.msrs.misc_high = data >> 32; 1330 1331 return 0; 1332 } 1333 1334 static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data) 1335 { 1336 u64 vmx_ept_vpid_cap; 1337 1338 vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.msrs.ept_caps, 1339 vmx->nested.msrs.vpid_caps); 1340 1341 /* Every bit is either reserved or a feature bit. */ 1342 if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL)) 1343 return -EINVAL; 1344 1345 vmx->nested.msrs.ept_caps = data; 1346 vmx->nested.msrs.vpid_caps = data >> 32; 1347 return 0; 1348 } 1349 1350 static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) 1351 { 1352 u64 *msr; 1353 1354 switch (msr_index) { 1355 case MSR_IA32_VMX_CR0_FIXED0: 1356 msr = &vmx->nested.msrs.cr0_fixed0; 1357 break; 1358 case MSR_IA32_VMX_CR4_FIXED0: 1359 msr = &vmx->nested.msrs.cr4_fixed0; 1360 break; 1361 default: 1362 BUG(); 1363 } 1364 1365 /* 1366 * 1 bits (which indicates bits which "must-be-1" during VMX operation) 1367 * must be 1 in the restored value. 1368 */ 1369 if (!is_bitwise_subset(data, *msr, -1ULL)) 1370 return -EINVAL; 1371 1372 *msr = data; 1373 return 0; 1374 } 1375 1376 /* 1377 * Called when userspace is restoring VMX MSRs. 1378 * 1379 * Returns 0 on success, non-0 otherwise. 1380 */ 1381 int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) 1382 { 1383 struct vcpu_vmx *vmx = to_vmx(vcpu); 1384 1385 /* 1386 * Don't allow changes to the VMX capability MSRs while the vCPU 1387 * is in VMX operation. 1388 */ 1389 if (vmx->nested.vmxon) 1390 return -EBUSY; 1391 1392 switch (msr_index) { 1393 case MSR_IA32_VMX_BASIC: 1394 return vmx_restore_vmx_basic(vmx, data); 1395 case MSR_IA32_VMX_PINBASED_CTLS: 1396 case MSR_IA32_VMX_PROCBASED_CTLS: 1397 case MSR_IA32_VMX_EXIT_CTLS: 1398 case MSR_IA32_VMX_ENTRY_CTLS: 1399 /* 1400 * The "non-true" VMX capability MSRs are generated from the 1401 * "true" MSRs, so we do not support restoring them directly. 1402 * 1403 * If userspace wants to emulate VMX_BASIC[55]=0, userspace 1404 * should restore the "true" MSRs with the must-be-1 bits 1405 * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND 1406 * DEFAULT SETTINGS". 1407 */ 1408 return -EINVAL; 1409 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: 1410 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: 1411 case MSR_IA32_VMX_TRUE_EXIT_CTLS: 1412 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: 1413 case MSR_IA32_VMX_PROCBASED_CTLS2: 1414 return vmx_restore_control_msr(vmx, msr_index, data); 1415 case MSR_IA32_VMX_MISC: 1416 return vmx_restore_vmx_misc(vmx, data); 1417 case MSR_IA32_VMX_CR0_FIXED0: 1418 case MSR_IA32_VMX_CR4_FIXED0: 1419 return vmx_restore_fixed0_msr(vmx, msr_index, data); 1420 case MSR_IA32_VMX_CR0_FIXED1: 1421 case MSR_IA32_VMX_CR4_FIXED1: 1422 /* 1423 * These MSRs are generated based on the vCPU's CPUID, so we 1424 * do not support restoring them directly. 1425 */ 1426 return -EINVAL; 1427 case MSR_IA32_VMX_EPT_VPID_CAP: 1428 return vmx_restore_vmx_ept_vpid_cap(vmx, data); 1429 case MSR_IA32_VMX_VMCS_ENUM: 1430 vmx->nested.msrs.vmcs_enum = data; 1431 return 0; 1432 case MSR_IA32_VMX_VMFUNC: 1433 if (data & ~vmx->nested.msrs.vmfunc_controls) 1434 return -EINVAL; 1435 vmx->nested.msrs.vmfunc_controls = data; 1436 return 0; 1437 default: 1438 /* 1439 * The rest of the VMX capability MSRs do not support restore. 1440 */ 1441 return -EINVAL; 1442 } 1443 } 1444 1445 /* Returns 0 on success, non-0 otherwise. */ 1446 int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata) 1447 { 1448 switch (msr_index) { 1449 case MSR_IA32_VMX_BASIC: 1450 *pdata = msrs->basic; 1451 break; 1452 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: 1453 case MSR_IA32_VMX_PINBASED_CTLS: 1454 *pdata = vmx_control_msr( 1455 msrs->pinbased_ctls_low, 1456 msrs->pinbased_ctls_high); 1457 if (msr_index == MSR_IA32_VMX_PINBASED_CTLS) 1458 *pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR; 1459 break; 1460 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: 1461 case MSR_IA32_VMX_PROCBASED_CTLS: 1462 *pdata = vmx_control_msr( 1463 msrs->procbased_ctls_low, 1464 msrs->procbased_ctls_high); 1465 if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS) 1466 *pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR; 1467 break; 1468 case MSR_IA32_VMX_TRUE_EXIT_CTLS: 1469 case MSR_IA32_VMX_EXIT_CTLS: 1470 *pdata = vmx_control_msr( 1471 msrs->exit_ctls_low, 1472 msrs->exit_ctls_high); 1473 if (msr_index == MSR_IA32_VMX_EXIT_CTLS) 1474 *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR; 1475 break; 1476 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: 1477 case MSR_IA32_VMX_ENTRY_CTLS: 1478 *pdata = vmx_control_msr( 1479 msrs->entry_ctls_low, 1480 msrs->entry_ctls_high); 1481 if (msr_index == MSR_IA32_VMX_ENTRY_CTLS) 1482 *pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR; 1483 break; 1484 case MSR_IA32_VMX_MISC: 1485 *pdata = vmx_control_msr( 1486 msrs->misc_low, 1487 msrs->misc_high); 1488 break; 1489 case MSR_IA32_VMX_CR0_FIXED0: 1490 *pdata = msrs->cr0_fixed0; 1491 break; 1492 case MSR_IA32_VMX_CR0_FIXED1: 1493 *pdata = msrs->cr0_fixed1; 1494 break; 1495 case MSR_IA32_VMX_CR4_FIXED0: 1496 *pdata = msrs->cr4_fixed0; 1497 break; 1498 case MSR_IA32_VMX_CR4_FIXED1: 1499 *pdata = msrs->cr4_fixed1; 1500 break; 1501 case MSR_IA32_VMX_VMCS_ENUM: 1502 *pdata = msrs->vmcs_enum; 1503 break; 1504 case MSR_IA32_VMX_PROCBASED_CTLS2: 1505 *pdata = vmx_control_msr( 1506 msrs->secondary_ctls_low, 1507 msrs->secondary_ctls_high); 1508 break; 1509 case MSR_IA32_VMX_EPT_VPID_CAP: 1510 *pdata = msrs->ept_caps | 1511 ((u64)msrs->vpid_caps << 32); 1512 break; 1513 case MSR_IA32_VMX_VMFUNC: 1514 *pdata = msrs->vmfunc_controls; 1515 break; 1516 default: 1517 return 1; 1518 } 1519 1520 return 0; 1521 } 1522 1523 /* 1524 * Copy the writable VMCS shadow fields back to the VMCS12, in case they have 1525 * been modified by the L1 guest. Note, "writable" in this context means 1526 * "writable by the guest", i.e. tagged SHADOW_FIELD_RW; the set of 1527 * fields tagged SHADOW_FIELD_RO may or may not align with the "read-only" 1528 * VM-exit information fields (which are actually writable if the vCPU is 1529 * configured to support "VMWRITE to any supported field in the VMCS"). 1530 */ 1531 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx) 1532 { 1533 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; 1534 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu); 1535 struct shadow_vmcs_field field; 1536 unsigned long val; 1537 int i; 1538 1539 if (WARN_ON(!shadow_vmcs)) 1540 return; 1541 1542 preempt_disable(); 1543 1544 vmcs_load(shadow_vmcs); 1545 1546 for (i = 0; i < max_shadow_read_write_fields; i++) { 1547 field = shadow_read_write_fields[i]; 1548 val = __vmcs_readl(field.encoding); 1549 vmcs12_write_any(vmcs12, field.encoding, field.offset, val); 1550 } 1551 1552 vmcs_clear(shadow_vmcs); 1553 vmcs_load(vmx->loaded_vmcs->vmcs); 1554 1555 preempt_enable(); 1556 } 1557 1558 static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx) 1559 { 1560 const struct shadow_vmcs_field *fields[] = { 1561 shadow_read_write_fields, 1562 shadow_read_only_fields 1563 }; 1564 const int max_fields[] = { 1565 max_shadow_read_write_fields, 1566 max_shadow_read_only_fields 1567 }; 1568 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; 1569 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu); 1570 struct shadow_vmcs_field field; 1571 unsigned long val; 1572 int i, q; 1573 1574 if (WARN_ON(!shadow_vmcs)) 1575 return; 1576 1577 vmcs_load(shadow_vmcs); 1578 1579 for (q = 0; q < ARRAY_SIZE(fields); q++) { 1580 for (i = 0; i < max_fields[q]; i++) { 1581 field = fields[q][i]; 1582 val = vmcs12_read_any(vmcs12, field.encoding, 1583 field.offset); 1584 __vmcs_writel(field.encoding, val); 1585 } 1586 } 1587 1588 vmcs_clear(shadow_vmcs); 1589 vmcs_load(vmx->loaded_vmcs->vmcs); 1590 } 1591 1592 static void copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx, u32 hv_clean_fields) 1593 { 1594 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; 1595 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; 1596 1597 /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */ 1598 vmcs12->tpr_threshold = evmcs->tpr_threshold; 1599 vmcs12->guest_rip = evmcs->guest_rip; 1600 1601 if (unlikely(!(hv_clean_fields & 1602 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC))) { 1603 vmcs12->guest_rsp = evmcs->guest_rsp; 1604 vmcs12->guest_rflags = evmcs->guest_rflags; 1605 vmcs12->guest_interruptibility_info = 1606 evmcs->guest_interruptibility_info; 1607 } 1608 1609 if (unlikely(!(hv_clean_fields & 1610 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) { 1611 vmcs12->cpu_based_vm_exec_control = 1612 evmcs->cpu_based_vm_exec_control; 1613 } 1614 1615 if (unlikely(!(hv_clean_fields & 1616 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EXCPN))) { 1617 vmcs12->exception_bitmap = evmcs->exception_bitmap; 1618 } 1619 1620 if (unlikely(!(hv_clean_fields & 1621 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY))) { 1622 vmcs12->vm_entry_controls = evmcs->vm_entry_controls; 1623 } 1624 1625 if (unlikely(!(hv_clean_fields & 1626 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT))) { 1627 vmcs12->vm_entry_intr_info_field = 1628 evmcs->vm_entry_intr_info_field; 1629 vmcs12->vm_entry_exception_error_code = 1630 evmcs->vm_entry_exception_error_code; 1631 vmcs12->vm_entry_instruction_len = 1632 evmcs->vm_entry_instruction_len; 1633 } 1634 1635 if (unlikely(!(hv_clean_fields & 1636 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) { 1637 vmcs12->host_ia32_pat = evmcs->host_ia32_pat; 1638 vmcs12->host_ia32_efer = evmcs->host_ia32_efer; 1639 vmcs12->host_cr0 = evmcs->host_cr0; 1640 vmcs12->host_cr3 = evmcs->host_cr3; 1641 vmcs12->host_cr4 = evmcs->host_cr4; 1642 vmcs12->host_ia32_sysenter_esp = evmcs->host_ia32_sysenter_esp; 1643 vmcs12->host_ia32_sysenter_eip = evmcs->host_ia32_sysenter_eip; 1644 vmcs12->host_rip = evmcs->host_rip; 1645 vmcs12->host_ia32_sysenter_cs = evmcs->host_ia32_sysenter_cs; 1646 vmcs12->host_es_selector = evmcs->host_es_selector; 1647 vmcs12->host_cs_selector = evmcs->host_cs_selector; 1648 vmcs12->host_ss_selector = evmcs->host_ss_selector; 1649 vmcs12->host_ds_selector = evmcs->host_ds_selector; 1650 vmcs12->host_fs_selector = evmcs->host_fs_selector; 1651 vmcs12->host_gs_selector = evmcs->host_gs_selector; 1652 vmcs12->host_tr_selector = evmcs->host_tr_selector; 1653 } 1654 1655 if (unlikely(!(hv_clean_fields & 1656 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP1))) { 1657 vmcs12->pin_based_vm_exec_control = 1658 evmcs->pin_based_vm_exec_control; 1659 vmcs12->vm_exit_controls = evmcs->vm_exit_controls; 1660 vmcs12->secondary_vm_exec_control = 1661 evmcs->secondary_vm_exec_control; 1662 } 1663 1664 if (unlikely(!(hv_clean_fields & 1665 HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP))) { 1666 vmcs12->io_bitmap_a = evmcs->io_bitmap_a; 1667 vmcs12->io_bitmap_b = evmcs->io_bitmap_b; 1668 } 1669 1670 if (unlikely(!(hv_clean_fields & 1671 HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP))) { 1672 vmcs12->msr_bitmap = evmcs->msr_bitmap; 1673 } 1674 1675 if (unlikely(!(hv_clean_fields & 1676 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2))) { 1677 vmcs12->guest_es_base = evmcs->guest_es_base; 1678 vmcs12->guest_cs_base = evmcs->guest_cs_base; 1679 vmcs12->guest_ss_base = evmcs->guest_ss_base; 1680 vmcs12->guest_ds_base = evmcs->guest_ds_base; 1681 vmcs12->guest_fs_base = evmcs->guest_fs_base; 1682 vmcs12->guest_gs_base = evmcs->guest_gs_base; 1683 vmcs12->guest_ldtr_base = evmcs->guest_ldtr_base; 1684 vmcs12->guest_tr_base = evmcs->guest_tr_base; 1685 vmcs12->guest_gdtr_base = evmcs->guest_gdtr_base; 1686 vmcs12->guest_idtr_base = evmcs->guest_idtr_base; 1687 vmcs12->guest_es_limit = evmcs->guest_es_limit; 1688 vmcs12->guest_cs_limit = evmcs->guest_cs_limit; 1689 vmcs12->guest_ss_limit = evmcs->guest_ss_limit; 1690 vmcs12->guest_ds_limit = evmcs->guest_ds_limit; 1691 vmcs12->guest_fs_limit = evmcs->guest_fs_limit; 1692 vmcs12->guest_gs_limit = evmcs->guest_gs_limit; 1693 vmcs12->guest_ldtr_limit = evmcs->guest_ldtr_limit; 1694 vmcs12->guest_tr_limit = evmcs->guest_tr_limit; 1695 vmcs12->guest_gdtr_limit = evmcs->guest_gdtr_limit; 1696 vmcs12->guest_idtr_limit = evmcs->guest_idtr_limit; 1697 vmcs12->guest_es_ar_bytes = evmcs->guest_es_ar_bytes; 1698 vmcs12->guest_cs_ar_bytes = evmcs->guest_cs_ar_bytes; 1699 vmcs12->guest_ss_ar_bytes = evmcs->guest_ss_ar_bytes; 1700 vmcs12->guest_ds_ar_bytes = evmcs->guest_ds_ar_bytes; 1701 vmcs12->guest_fs_ar_bytes = evmcs->guest_fs_ar_bytes; 1702 vmcs12->guest_gs_ar_bytes = evmcs->guest_gs_ar_bytes; 1703 vmcs12->guest_ldtr_ar_bytes = evmcs->guest_ldtr_ar_bytes; 1704 vmcs12->guest_tr_ar_bytes = evmcs->guest_tr_ar_bytes; 1705 vmcs12->guest_es_selector = evmcs->guest_es_selector; 1706 vmcs12->guest_cs_selector = evmcs->guest_cs_selector; 1707 vmcs12->guest_ss_selector = evmcs->guest_ss_selector; 1708 vmcs12->guest_ds_selector = evmcs->guest_ds_selector; 1709 vmcs12->guest_fs_selector = evmcs->guest_fs_selector; 1710 vmcs12->guest_gs_selector = evmcs->guest_gs_selector; 1711 vmcs12->guest_ldtr_selector = evmcs->guest_ldtr_selector; 1712 vmcs12->guest_tr_selector = evmcs->guest_tr_selector; 1713 } 1714 1715 if (unlikely(!(hv_clean_fields & 1716 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2))) { 1717 vmcs12->tsc_offset = evmcs->tsc_offset; 1718 vmcs12->virtual_apic_page_addr = evmcs->virtual_apic_page_addr; 1719 vmcs12->xss_exit_bitmap = evmcs->xss_exit_bitmap; 1720 } 1721 1722 if (unlikely(!(hv_clean_fields & 1723 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR))) { 1724 vmcs12->cr0_guest_host_mask = evmcs->cr0_guest_host_mask; 1725 vmcs12->cr4_guest_host_mask = evmcs->cr4_guest_host_mask; 1726 vmcs12->cr0_read_shadow = evmcs->cr0_read_shadow; 1727 vmcs12->cr4_read_shadow = evmcs->cr4_read_shadow; 1728 vmcs12->guest_cr0 = evmcs->guest_cr0; 1729 vmcs12->guest_cr3 = evmcs->guest_cr3; 1730 vmcs12->guest_cr4 = evmcs->guest_cr4; 1731 vmcs12->guest_dr7 = evmcs->guest_dr7; 1732 } 1733 1734 if (unlikely(!(hv_clean_fields & 1735 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER))) { 1736 vmcs12->host_fs_base = evmcs->host_fs_base; 1737 vmcs12->host_gs_base = evmcs->host_gs_base; 1738 vmcs12->host_tr_base = evmcs->host_tr_base; 1739 vmcs12->host_gdtr_base = evmcs->host_gdtr_base; 1740 vmcs12->host_idtr_base = evmcs->host_idtr_base; 1741 vmcs12->host_rsp = evmcs->host_rsp; 1742 } 1743 1744 if (unlikely(!(hv_clean_fields & 1745 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT))) { 1746 vmcs12->ept_pointer = evmcs->ept_pointer; 1747 vmcs12->virtual_processor_id = evmcs->virtual_processor_id; 1748 } 1749 1750 if (unlikely(!(hv_clean_fields & 1751 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1))) { 1752 vmcs12->vmcs_link_pointer = evmcs->vmcs_link_pointer; 1753 vmcs12->guest_ia32_debugctl = evmcs->guest_ia32_debugctl; 1754 vmcs12->guest_ia32_pat = evmcs->guest_ia32_pat; 1755 vmcs12->guest_ia32_efer = evmcs->guest_ia32_efer; 1756 vmcs12->guest_pdptr0 = evmcs->guest_pdptr0; 1757 vmcs12->guest_pdptr1 = evmcs->guest_pdptr1; 1758 vmcs12->guest_pdptr2 = evmcs->guest_pdptr2; 1759 vmcs12->guest_pdptr3 = evmcs->guest_pdptr3; 1760 vmcs12->guest_pending_dbg_exceptions = 1761 evmcs->guest_pending_dbg_exceptions; 1762 vmcs12->guest_sysenter_esp = evmcs->guest_sysenter_esp; 1763 vmcs12->guest_sysenter_eip = evmcs->guest_sysenter_eip; 1764 vmcs12->guest_bndcfgs = evmcs->guest_bndcfgs; 1765 vmcs12->guest_activity_state = evmcs->guest_activity_state; 1766 vmcs12->guest_sysenter_cs = evmcs->guest_sysenter_cs; 1767 } 1768 1769 /* 1770 * Not used? 1771 * vmcs12->vm_exit_msr_store_addr = evmcs->vm_exit_msr_store_addr; 1772 * vmcs12->vm_exit_msr_load_addr = evmcs->vm_exit_msr_load_addr; 1773 * vmcs12->vm_entry_msr_load_addr = evmcs->vm_entry_msr_load_addr; 1774 * vmcs12->page_fault_error_code_mask = 1775 * evmcs->page_fault_error_code_mask; 1776 * vmcs12->page_fault_error_code_match = 1777 * evmcs->page_fault_error_code_match; 1778 * vmcs12->cr3_target_count = evmcs->cr3_target_count; 1779 * vmcs12->vm_exit_msr_store_count = evmcs->vm_exit_msr_store_count; 1780 * vmcs12->vm_exit_msr_load_count = evmcs->vm_exit_msr_load_count; 1781 * vmcs12->vm_entry_msr_load_count = evmcs->vm_entry_msr_load_count; 1782 */ 1783 1784 /* 1785 * Read only fields: 1786 * vmcs12->guest_physical_address = evmcs->guest_physical_address; 1787 * vmcs12->vm_instruction_error = evmcs->vm_instruction_error; 1788 * vmcs12->vm_exit_reason = evmcs->vm_exit_reason; 1789 * vmcs12->vm_exit_intr_info = evmcs->vm_exit_intr_info; 1790 * vmcs12->vm_exit_intr_error_code = evmcs->vm_exit_intr_error_code; 1791 * vmcs12->idt_vectoring_info_field = evmcs->idt_vectoring_info_field; 1792 * vmcs12->idt_vectoring_error_code = evmcs->idt_vectoring_error_code; 1793 * vmcs12->vm_exit_instruction_len = evmcs->vm_exit_instruction_len; 1794 * vmcs12->vmx_instruction_info = evmcs->vmx_instruction_info; 1795 * vmcs12->exit_qualification = evmcs->exit_qualification; 1796 * vmcs12->guest_linear_address = evmcs->guest_linear_address; 1797 * 1798 * Not present in struct vmcs12: 1799 * vmcs12->exit_io_instruction_ecx = evmcs->exit_io_instruction_ecx; 1800 * vmcs12->exit_io_instruction_esi = evmcs->exit_io_instruction_esi; 1801 * vmcs12->exit_io_instruction_edi = evmcs->exit_io_instruction_edi; 1802 * vmcs12->exit_io_instruction_eip = evmcs->exit_io_instruction_eip; 1803 */ 1804 1805 return; 1806 } 1807 1808 static void copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx) 1809 { 1810 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; 1811 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; 1812 1813 /* 1814 * Should not be changed by KVM: 1815 * 1816 * evmcs->host_es_selector = vmcs12->host_es_selector; 1817 * evmcs->host_cs_selector = vmcs12->host_cs_selector; 1818 * evmcs->host_ss_selector = vmcs12->host_ss_selector; 1819 * evmcs->host_ds_selector = vmcs12->host_ds_selector; 1820 * evmcs->host_fs_selector = vmcs12->host_fs_selector; 1821 * evmcs->host_gs_selector = vmcs12->host_gs_selector; 1822 * evmcs->host_tr_selector = vmcs12->host_tr_selector; 1823 * evmcs->host_ia32_pat = vmcs12->host_ia32_pat; 1824 * evmcs->host_ia32_efer = vmcs12->host_ia32_efer; 1825 * evmcs->host_cr0 = vmcs12->host_cr0; 1826 * evmcs->host_cr3 = vmcs12->host_cr3; 1827 * evmcs->host_cr4 = vmcs12->host_cr4; 1828 * evmcs->host_ia32_sysenter_esp = vmcs12->host_ia32_sysenter_esp; 1829 * evmcs->host_ia32_sysenter_eip = vmcs12->host_ia32_sysenter_eip; 1830 * evmcs->host_rip = vmcs12->host_rip; 1831 * evmcs->host_ia32_sysenter_cs = vmcs12->host_ia32_sysenter_cs; 1832 * evmcs->host_fs_base = vmcs12->host_fs_base; 1833 * evmcs->host_gs_base = vmcs12->host_gs_base; 1834 * evmcs->host_tr_base = vmcs12->host_tr_base; 1835 * evmcs->host_gdtr_base = vmcs12->host_gdtr_base; 1836 * evmcs->host_idtr_base = vmcs12->host_idtr_base; 1837 * evmcs->host_rsp = vmcs12->host_rsp; 1838 * sync_vmcs02_to_vmcs12() doesn't read these: 1839 * evmcs->io_bitmap_a = vmcs12->io_bitmap_a; 1840 * evmcs->io_bitmap_b = vmcs12->io_bitmap_b; 1841 * evmcs->msr_bitmap = vmcs12->msr_bitmap; 1842 * evmcs->ept_pointer = vmcs12->ept_pointer; 1843 * evmcs->xss_exit_bitmap = vmcs12->xss_exit_bitmap; 1844 * evmcs->vm_exit_msr_store_addr = vmcs12->vm_exit_msr_store_addr; 1845 * evmcs->vm_exit_msr_load_addr = vmcs12->vm_exit_msr_load_addr; 1846 * evmcs->vm_entry_msr_load_addr = vmcs12->vm_entry_msr_load_addr; 1847 * evmcs->tpr_threshold = vmcs12->tpr_threshold; 1848 * evmcs->virtual_processor_id = vmcs12->virtual_processor_id; 1849 * evmcs->exception_bitmap = vmcs12->exception_bitmap; 1850 * evmcs->vmcs_link_pointer = vmcs12->vmcs_link_pointer; 1851 * evmcs->pin_based_vm_exec_control = vmcs12->pin_based_vm_exec_control; 1852 * evmcs->vm_exit_controls = vmcs12->vm_exit_controls; 1853 * evmcs->secondary_vm_exec_control = vmcs12->secondary_vm_exec_control; 1854 * evmcs->page_fault_error_code_mask = 1855 * vmcs12->page_fault_error_code_mask; 1856 * evmcs->page_fault_error_code_match = 1857 * vmcs12->page_fault_error_code_match; 1858 * evmcs->cr3_target_count = vmcs12->cr3_target_count; 1859 * evmcs->virtual_apic_page_addr = vmcs12->virtual_apic_page_addr; 1860 * evmcs->tsc_offset = vmcs12->tsc_offset; 1861 * evmcs->guest_ia32_debugctl = vmcs12->guest_ia32_debugctl; 1862 * evmcs->cr0_guest_host_mask = vmcs12->cr0_guest_host_mask; 1863 * evmcs->cr4_guest_host_mask = vmcs12->cr4_guest_host_mask; 1864 * evmcs->cr0_read_shadow = vmcs12->cr0_read_shadow; 1865 * evmcs->cr4_read_shadow = vmcs12->cr4_read_shadow; 1866 * evmcs->vm_exit_msr_store_count = vmcs12->vm_exit_msr_store_count; 1867 * evmcs->vm_exit_msr_load_count = vmcs12->vm_exit_msr_load_count; 1868 * evmcs->vm_entry_msr_load_count = vmcs12->vm_entry_msr_load_count; 1869 * 1870 * Not present in struct vmcs12: 1871 * evmcs->exit_io_instruction_ecx = vmcs12->exit_io_instruction_ecx; 1872 * evmcs->exit_io_instruction_esi = vmcs12->exit_io_instruction_esi; 1873 * evmcs->exit_io_instruction_edi = vmcs12->exit_io_instruction_edi; 1874 * evmcs->exit_io_instruction_eip = vmcs12->exit_io_instruction_eip; 1875 */ 1876 1877 evmcs->guest_es_selector = vmcs12->guest_es_selector; 1878 evmcs->guest_cs_selector = vmcs12->guest_cs_selector; 1879 evmcs->guest_ss_selector = vmcs12->guest_ss_selector; 1880 evmcs->guest_ds_selector = vmcs12->guest_ds_selector; 1881 evmcs->guest_fs_selector = vmcs12->guest_fs_selector; 1882 evmcs->guest_gs_selector = vmcs12->guest_gs_selector; 1883 evmcs->guest_ldtr_selector = vmcs12->guest_ldtr_selector; 1884 evmcs->guest_tr_selector = vmcs12->guest_tr_selector; 1885 1886 evmcs->guest_es_limit = vmcs12->guest_es_limit; 1887 evmcs->guest_cs_limit = vmcs12->guest_cs_limit; 1888 evmcs->guest_ss_limit = vmcs12->guest_ss_limit; 1889 evmcs->guest_ds_limit = vmcs12->guest_ds_limit; 1890 evmcs->guest_fs_limit = vmcs12->guest_fs_limit; 1891 evmcs->guest_gs_limit = vmcs12->guest_gs_limit; 1892 evmcs->guest_ldtr_limit = vmcs12->guest_ldtr_limit; 1893 evmcs->guest_tr_limit = vmcs12->guest_tr_limit; 1894 evmcs->guest_gdtr_limit = vmcs12->guest_gdtr_limit; 1895 evmcs->guest_idtr_limit = vmcs12->guest_idtr_limit; 1896 1897 evmcs->guest_es_ar_bytes = vmcs12->guest_es_ar_bytes; 1898 evmcs->guest_cs_ar_bytes = vmcs12->guest_cs_ar_bytes; 1899 evmcs->guest_ss_ar_bytes = vmcs12->guest_ss_ar_bytes; 1900 evmcs->guest_ds_ar_bytes = vmcs12->guest_ds_ar_bytes; 1901 evmcs->guest_fs_ar_bytes = vmcs12->guest_fs_ar_bytes; 1902 evmcs->guest_gs_ar_bytes = vmcs12->guest_gs_ar_bytes; 1903 evmcs->guest_ldtr_ar_bytes = vmcs12->guest_ldtr_ar_bytes; 1904 evmcs->guest_tr_ar_bytes = vmcs12->guest_tr_ar_bytes; 1905 1906 evmcs->guest_es_base = vmcs12->guest_es_base; 1907 evmcs->guest_cs_base = vmcs12->guest_cs_base; 1908 evmcs->guest_ss_base = vmcs12->guest_ss_base; 1909 evmcs->guest_ds_base = vmcs12->guest_ds_base; 1910 evmcs->guest_fs_base = vmcs12->guest_fs_base; 1911 evmcs->guest_gs_base = vmcs12->guest_gs_base; 1912 evmcs->guest_ldtr_base = vmcs12->guest_ldtr_base; 1913 evmcs->guest_tr_base = vmcs12->guest_tr_base; 1914 evmcs->guest_gdtr_base = vmcs12->guest_gdtr_base; 1915 evmcs->guest_idtr_base = vmcs12->guest_idtr_base; 1916 1917 evmcs->guest_ia32_pat = vmcs12->guest_ia32_pat; 1918 evmcs->guest_ia32_efer = vmcs12->guest_ia32_efer; 1919 1920 evmcs->guest_pdptr0 = vmcs12->guest_pdptr0; 1921 evmcs->guest_pdptr1 = vmcs12->guest_pdptr1; 1922 evmcs->guest_pdptr2 = vmcs12->guest_pdptr2; 1923 evmcs->guest_pdptr3 = vmcs12->guest_pdptr3; 1924 1925 evmcs->guest_pending_dbg_exceptions = 1926 vmcs12->guest_pending_dbg_exceptions; 1927 evmcs->guest_sysenter_esp = vmcs12->guest_sysenter_esp; 1928 evmcs->guest_sysenter_eip = vmcs12->guest_sysenter_eip; 1929 1930 evmcs->guest_activity_state = vmcs12->guest_activity_state; 1931 evmcs->guest_sysenter_cs = vmcs12->guest_sysenter_cs; 1932 1933 evmcs->guest_cr0 = vmcs12->guest_cr0; 1934 evmcs->guest_cr3 = vmcs12->guest_cr3; 1935 evmcs->guest_cr4 = vmcs12->guest_cr4; 1936 evmcs->guest_dr7 = vmcs12->guest_dr7; 1937 1938 evmcs->guest_physical_address = vmcs12->guest_physical_address; 1939 1940 evmcs->vm_instruction_error = vmcs12->vm_instruction_error; 1941 evmcs->vm_exit_reason = vmcs12->vm_exit_reason; 1942 evmcs->vm_exit_intr_info = vmcs12->vm_exit_intr_info; 1943 evmcs->vm_exit_intr_error_code = vmcs12->vm_exit_intr_error_code; 1944 evmcs->idt_vectoring_info_field = vmcs12->idt_vectoring_info_field; 1945 evmcs->idt_vectoring_error_code = vmcs12->idt_vectoring_error_code; 1946 evmcs->vm_exit_instruction_len = vmcs12->vm_exit_instruction_len; 1947 evmcs->vmx_instruction_info = vmcs12->vmx_instruction_info; 1948 1949 evmcs->exit_qualification = vmcs12->exit_qualification; 1950 1951 evmcs->guest_linear_address = vmcs12->guest_linear_address; 1952 evmcs->guest_rsp = vmcs12->guest_rsp; 1953 evmcs->guest_rflags = vmcs12->guest_rflags; 1954 1955 evmcs->guest_interruptibility_info = 1956 vmcs12->guest_interruptibility_info; 1957 evmcs->cpu_based_vm_exec_control = vmcs12->cpu_based_vm_exec_control; 1958 evmcs->vm_entry_controls = vmcs12->vm_entry_controls; 1959 evmcs->vm_entry_intr_info_field = vmcs12->vm_entry_intr_info_field; 1960 evmcs->vm_entry_exception_error_code = 1961 vmcs12->vm_entry_exception_error_code; 1962 evmcs->vm_entry_instruction_len = vmcs12->vm_entry_instruction_len; 1963 1964 evmcs->guest_rip = vmcs12->guest_rip; 1965 1966 evmcs->guest_bndcfgs = vmcs12->guest_bndcfgs; 1967 1968 return; 1969 } 1970 1971 /* 1972 * This is an equivalent of the nested hypervisor executing the vmptrld 1973 * instruction. 1974 */ 1975 static enum nested_evmptrld_status nested_vmx_handle_enlightened_vmptrld( 1976 struct kvm_vcpu *vcpu, bool from_launch) 1977 { 1978 struct vcpu_vmx *vmx = to_vmx(vcpu); 1979 bool evmcs_gpa_changed = false; 1980 u64 evmcs_gpa; 1981 1982 if (likely(!vmx->nested.enlightened_vmcs_enabled)) 1983 return EVMPTRLD_DISABLED; 1984 1985 if (!nested_enlightened_vmentry(vcpu, &evmcs_gpa)) { 1986 nested_release_evmcs(vcpu); 1987 return EVMPTRLD_DISABLED; 1988 } 1989 1990 if (unlikely(evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) { 1991 vmx->nested.current_vmptr = INVALID_GPA; 1992 1993 nested_release_evmcs(vcpu); 1994 1995 if (kvm_vcpu_map(vcpu, gpa_to_gfn(evmcs_gpa), 1996 &vmx->nested.hv_evmcs_map)) 1997 return EVMPTRLD_ERROR; 1998 1999 vmx->nested.hv_evmcs = vmx->nested.hv_evmcs_map.hva; 2000 2001 /* 2002 * Currently, KVM only supports eVMCS version 1 2003 * (== KVM_EVMCS_VERSION) and thus we expect guest to set this 2004 * value to first u32 field of eVMCS which should specify eVMCS 2005 * VersionNumber. 2006 * 2007 * Guest should be aware of supported eVMCS versions by host by 2008 * examining CPUID.0x4000000A.EAX[0:15]. Host userspace VMM is 2009 * expected to set this CPUID leaf according to the value 2010 * returned in vmcs_version from nested_enable_evmcs(). 2011 * 2012 * However, it turns out that Microsoft Hyper-V fails to comply 2013 * to their own invented interface: When Hyper-V use eVMCS, it 2014 * just sets first u32 field of eVMCS to revision_id specified 2015 * in MSR_IA32_VMX_BASIC. Instead of used eVMCS version number 2016 * which is one of the supported versions specified in 2017 * CPUID.0x4000000A.EAX[0:15]. 2018 * 2019 * To overcome Hyper-V bug, we accept here either a supported 2020 * eVMCS version or VMCS12 revision_id as valid values for first 2021 * u32 field of eVMCS. 2022 */ 2023 if ((vmx->nested.hv_evmcs->revision_id != KVM_EVMCS_VERSION) && 2024 (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION)) { 2025 nested_release_evmcs(vcpu); 2026 return EVMPTRLD_VMFAIL; 2027 } 2028 2029 vmx->nested.hv_evmcs_vmptr = evmcs_gpa; 2030 2031 evmcs_gpa_changed = true; 2032 /* 2033 * Unlike normal vmcs12, enlightened vmcs12 is not fully 2034 * reloaded from guest's memory (read only fields, fields not 2035 * present in struct hv_enlightened_vmcs, ...). Make sure there 2036 * are no leftovers. 2037 */ 2038 if (from_launch) { 2039 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 2040 memset(vmcs12, 0, sizeof(*vmcs12)); 2041 vmcs12->hdr.revision_id = VMCS12_REVISION; 2042 } 2043 2044 } 2045 2046 /* 2047 * Clean fields data can't be used on VMLAUNCH and when we switch 2048 * between different L2 guests as KVM keeps a single VMCS12 per L1. 2049 */ 2050 if (from_launch || evmcs_gpa_changed) { 2051 vmx->nested.hv_evmcs->hv_clean_fields &= 2052 ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; 2053 2054 vmx->nested.force_msr_bitmap_recalc = true; 2055 } 2056 2057 return EVMPTRLD_SUCCEEDED; 2058 } 2059 2060 void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu) 2061 { 2062 struct vcpu_vmx *vmx = to_vmx(vcpu); 2063 2064 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) 2065 copy_vmcs12_to_enlightened(vmx); 2066 else 2067 copy_vmcs12_to_shadow(vmx); 2068 2069 vmx->nested.need_vmcs12_to_shadow_sync = false; 2070 } 2071 2072 static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer) 2073 { 2074 struct vcpu_vmx *vmx = 2075 container_of(timer, struct vcpu_vmx, nested.preemption_timer); 2076 2077 vmx->nested.preemption_timer_expired = true; 2078 kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu); 2079 kvm_vcpu_kick(&vmx->vcpu); 2080 2081 return HRTIMER_NORESTART; 2082 } 2083 2084 static u64 vmx_calc_preemption_timer_value(struct kvm_vcpu *vcpu) 2085 { 2086 struct vcpu_vmx *vmx = to_vmx(vcpu); 2087 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 2088 2089 u64 l1_scaled_tsc = kvm_read_l1_tsc(vcpu, rdtsc()) >> 2090 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; 2091 2092 if (!vmx->nested.has_preemption_timer_deadline) { 2093 vmx->nested.preemption_timer_deadline = 2094 vmcs12->vmx_preemption_timer_value + l1_scaled_tsc; 2095 vmx->nested.has_preemption_timer_deadline = true; 2096 } 2097 return vmx->nested.preemption_timer_deadline - l1_scaled_tsc; 2098 } 2099 2100 static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu, 2101 u64 preemption_timeout) 2102 { 2103 struct vcpu_vmx *vmx = to_vmx(vcpu); 2104 2105 /* 2106 * A timer value of zero is architecturally guaranteed to cause 2107 * a VMExit prior to executing any instructions in the guest. 2108 */ 2109 if (preemption_timeout == 0) { 2110 vmx_preemption_timer_fn(&vmx->nested.preemption_timer); 2111 return; 2112 } 2113 2114 if (vcpu->arch.virtual_tsc_khz == 0) 2115 return; 2116 2117 preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; 2118 preemption_timeout *= 1000000; 2119 do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz); 2120 hrtimer_start(&vmx->nested.preemption_timer, 2121 ktime_add_ns(ktime_get(), preemption_timeout), 2122 HRTIMER_MODE_ABS_PINNED); 2123 } 2124 2125 static u64 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) 2126 { 2127 if (vmx->nested.nested_run_pending && 2128 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) 2129 return vmcs12->guest_ia32_efer; 2130 else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) 2131 return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME); 2132 else 2133 return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME); 2134 } 2135 2136 static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx) 2137 { 2138 /* 2139 * If vmcs02 hasn't been initialized, set the constant vmcs02 state 2140 * according to L0's settings (vmcs12 is irrelevant here). Host 2141 * fields that come from L0 and are not constant, e.g. HOST_CR3, 2142 * will be set as needed prior to VMLAUNCH/VMRESUME. 2143 */ 2144 if (vmx->nested.vmcs02_initialized) 2145 return; 2146 vmx->nested.vmcs02_initialized = true; 2147 2148 /* 2149 * We don't care what the EPTP value is we just need to guarantee 2150 * it's valid so we don't get a false positive when doing early 2151 * consistency checks. 2152 */ 2153 if (enable_ept && nested_early_check) 2154 vmcs_write64(EPT_POINTER, 2155 construct_eptp(&vmx->vcpu, 0, PT64_ROOT_4LEVEL)); 2156 2157 /* All VMFUNCs are currently emulated through L0 vmexits. */ 2158 if (cpu_has_vmx_vmfunc()) 2159 vmcs_write64(VM_FUNCTION_CONTROL, 0); 2160 2161 if (cpu_has_vmx_posted_intr()) 2162 vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR); 2163 2164 if (cpu_has_vmx_msr_bitmap()) 2165 vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap)); 2166 2167 /* 2168 * PML is emulated for L2, but never enabled in hardware as the MMU 2169 * handles A/D emulation. Disabling PML for L2 also avoids having to 2170 * deal with filtering out L2 GPAs from the buffer. 2171 */ 2172 if (enable_pml) { 2173 vmcs_write64(PML_ADDRESS, 0); 2174 vmcs_write16(GUEST_PML_INDEX, -1); 2175 } 2176 2177 if (cpu_has_vmx_encls_vmexit()) 2178 vmcs_write64(ENCLS_EXITING_BITMAP, INVALID_GPA); 2179 2180 /* 2181 * Set the MSR load/store lists to match L0's settings. Only the 2182 * addresses are constant (for vmcs02), the counts can change based 2183 * on L2's behavior, e.g. switching to/from long mode. 2184 */ 2185 vmcs_write64(VM_EXIT_MSR_STORE_ADDR, __pa(vmx->msr_autostore.guest.val)); 2186 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val)); 2187 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val)); 2188 2189 vmx_set_constant_host_state(vmx); 2190 } 2191 2192 static void prepare_vmcs02_early_rare(struct vcpu_vmx *vmx, 2193 struct vmcs12 *vmcs12) 2194 { 2195 prepare_vmcs02_constant_state(vmx); 2196 2197 vmcs_write64(VMCS_LINK_POINTER, INVALID_GPA); 2198 2199 if (enable_vpid) { 2200 if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) 2201 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02); 2202 else 2203 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); 2204 } 2205 } 2206 2207 static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs01, 2208 struct vmcs12 *vmcs12) 2209 { 2210 u32 exec_control; 2211 u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12); 2212 2213 if (vmx->nested.dirty_vmcs12 || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) 2214 prepare_vmcs02_early_rare(vmx, vmcs12); 2215 2216 /* 2217 * PIN CONTROLS 2218 */ 2219 exec_control = __pin_controls_get(vmcs01); 2220 exec_control |= (vmcs12->pin_based_vm_exec_control & 2221 ~PIN_BASED_VMX_PREEMPTION_TIMER); 2222 2223 /* Posted interrupts setting is only taken from vmcs12. */ 2224 vmx->nested.pi_pending = false; 2225 if (nested_cpu_has_posted_intr(vmcs12)) 2226 vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv; 2227 else 2228 exec_control &= ~PIN_BASED_POSTED_INTR; 2229 pin_controls_set(vmx, exec_control); 2230 2231 /* 2232 * EXEC CONTROLS 2233 */ 2234 exec_control = __exec_controls_get(vmcs01); /* L0's desires */ 2235 exec_control &= ~CPU_BASED_INTR_WINDOW_EXITING; 2236 exec_control &= ~CPU_BASED_NMI_WINDOW_EXITING; 2237 exec_control &= ~CPU_BASED_TPR_SHADOW; 2238 exec_control |= vmcs12->cpu_based_vm_exec_control; 2239 2240 vmx->nested.l1_tpr_threshold = -1; 2241 if (exec_control & CPU_BASED_TPR_SHADOW) 2242 vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold); 2243 #ifdef CONFIG_X86_64 2244 else 2245 exec_control |= CPU_BASED_CR8_LOAD_EXITING | 2246 CPU_BASED_CR8_STORE_EXITING; 2247 #endif 2248 2249 /* 2250 * A vmexit (to either L1 hypervisor or L0 userspace) is always needed 2251 * for I/O port accesses. 2252 */ 2253 exec_control |= CPU_BASED_UNCOND_IO_EXITING; 2254 exec_control &= ~CPU_BASED_USE_IO_BITMAPS; 2255 2256 /* 2257 * This bit will be computed in nested_get_vmcs12_pages, because 2258 * we do not have access to L1's MSR bitmap yet. For now, keep 2259 * the same bit as before, hoping to avoid multiple VMWRITEs that 2260 * only set/clear this bit. 2261 */ 2262 exec_control &= ~CPU_BASED_USE_MSR_BITMAPS; 2263 exec_control |= exec_controls_get(vmx) & CPU_BASED_USE_MSR_BITMAPS; 2264 2265 exec_controls_set(vmx, exec_control); 2266 2267 /* 2268 * SECONDARY EXEC CONTROLS 2269 */ 2270 if (cpu_has_secondary_exec_ctrls()) { 2271 exec_control = __secondary_exec_controls_get(vmcs01); 2272 2273 /* Take the following fields only from vmcs12 */ 2274 exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2275 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2276 SECONDARY_EXEC_ENABLE_INVPCID | 2277 SECONDARY_EXEC_ENABLE_RDTSCP | 2278 SECONDARY_EXEC_XSAVES | 2279 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE | 2280 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2281 SECONDARY_EXEC_APIC_REGISTER_VIRT | 2282 SECONDARY_EXEC_ENABLE_VMFUNC | 2283 SECONDARY_EXEC_TSC_SCALING | 2284 SECONDARY_EXEC_DESC); 2285 2286 if (nested_cpu_has(vmcs12, 2287 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) 2288 exec_control |= vmcs12->secondary_vm_exec_control; 2289 2290 /* PML is emulated and never enabled in hardware for L2. */ 2291 exec_control &= ~SECONDARY_EXEC_ENABLE_PML; 2292 2293 /* VMCS shadowing for L2 is emulated for now */ 2294 exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS; 2295 2296 /* 2297 * Preset *DT exiting when emulating UMIP, so that vmx_set_cr4() 2298 * will not have to rewrite the controls just for this bit. 2299 */ 2300 if (!boot_cpu_has(X86_FEATURE_UMIP) && vmx_umip_emulated() && 2301 (vmcs12->guest_cr4 & X86_CR4_UMIP)) 2302 exec_control |= SECONDARY_EXEC_DESC; 2303 2304 if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) 2305 vmcs_write16(GUEST_INTR_STATUS, 2306 vmcs12->guest_intr_status); 2307 2308 if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST)) 2309 exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST; 2310 2311 if (exec_control & SECONDARY_EXEC_ENCLS_EXITING) 2312 vmx_write_encls_bitmap(&vmx->vcpu, vmcs12); 2313 2314 secondary_exec_controls_set(vmx, exec_control); 2315 } 2316 2317 /* 2318 * ENTRY CONTROLS 2319 * 2320 * vmcs12's VM_{ENTRY,EXIT}_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE 2321 * are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate 2322 * on the related bits (if supported by the CPU) in the hope that 2323 * we can avoid VMWrites during vmx_set_efer(). 2324 */ 2325 exec_control = __vm_entry_controls_get(vmcs01); 2326 exec_control |= vmcs12->vm_entry_controls; 2327 exec_control &= ~(VM_ENTRY_IA32E_MODE | VM_ENTRY_LOAD_IA32_EFER); 2328 if (cpu_has_load_ia32_efer()) { 2329 if (guest_efer & EFER_LMA) 2330 exec_control |= VM_ENTRY_IA32E_MODE; 2331 if (guest_efer != host_efer) 2332 exec_control |= VM_ENTRY_LOAD_IA32_EFER; 2333 } 2334 vm_entry_controls_set(vmx, exec_control); 2335 2336 /* 2337 * EXIT CONTROLS 2338 * 2339 * L2->L1 exit controls are emulated - the hardware exit is to L0 so 2340 * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER 2341 * bits may be modified by vmx_set_efer() in prepare_vmcs02(). 2342 */ 2343 exec_control = __vm_exit_controls_get(vmcs01); 2344 if (cpu_has_load_ia32_efer() && guest_efer != host_efer) 2345 exec_control |= VM_EXIT_LOAD_IA32_EFER; 2346 else 2347 exec_control &= ~VM_EXIT_LOAD_IA32_EFER; 2348 vm_exit_controls_set(vmx, exec_control); 2349 2350 /* 2351 * Interrupt/Exception Fields 2352 */ 2353 if (vmx->nested.nested_run_pending) { 2354 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 2355 vmcs12->vm_entry_intr_info_field); 2356 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, 2357 vmcs12->vm_entry_exception_error_code); 2358 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 2359 vmcs12->vm_entry_instruction_len); 2360 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 2361 vmcs12->guest_interruptibility_info); 2362 vmx->loaded_vmcs->nmi_known_unmasked = 2363 !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI); 2364 } else { 2365 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); 2366 } 2367 } 2368 2369 static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) 2370 { 2371 struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs; 2372 2373 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields & 2374 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) { 2375 vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector); 2376 vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector); 2377 vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector); 2378 vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector); 2379 vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector); 2380 vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector); 2381 vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector); 2382 vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector); 2383 vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit); 2384 vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit); 2385 vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit); 2386 vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit); 2387 vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit); 2388 vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit); 2389 vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit); 2390 vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit); 2391 vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit); 2392 vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit); 2393 vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes); 2394 vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes); 2395 vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes); 2396 vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes); 2397 vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes); 2398 vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes); 2399 vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes); 2400 vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes); 2401 vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base); 2402 vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base); 2403 vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base); 2404 vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base); 2405 vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base); 2406 vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base); 2407 vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base); 2408 vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base); 2409 vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base); 2410 vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base); 2411 2412 vmx->segment_cache.bitmask = 0; 2413 } 2414 2415 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields & 2416 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1)) { 2417 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs); 2418 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, 2419 vmcs12->guest_pending_dbg_exceptions); 2420 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp); 2421 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip); 2422 2423 /* 2424 * L1 may access the L2's PDPTR, so save them to construct 2425 * vmcs12 2426 */ 2427 if (enable_ept) { 2428 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0); 2429 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); 2430 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); 2431 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); 2432 } 2433 2434 if (kvm_mpx_supported() && vmx->nested.nested_run_pending && 2435 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)) 2436 vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs); 2437 } 2438 2439 if (nested_cpu_has_xsaves(vmcs12)) 2440 vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap); 2441 2442 /* 2443 * Whether page-faults are trapped is determined by a combination of 2444 * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF. If L0 2445 * doesn't care about page faults then we should set all of these to 2446 * L1's desires. However, if L0 does care about (some) page faults, it 2447 * is not easy (if at all possible?) to merge L0 and L1's desires, we 2448 * simply ask to exit on each and every L2 page fault. This is done by 2449 * setting MASK=MATCH=0 and (see below) EB.PF=1. 2450 * Note that below we don't need special code to set EB.PF beyond the 2451 * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept, 2452 * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when 2453 * !enable_ept, EB.PF is 1, so the "or" will always be 1. 2454 */ 2455 if (vmx_need_pf_intercept(&vmx->vcpu)) { 2456 /* 2457 * TODO: if both L0 and L1 need the same MASK and MATCH, 2458 * go ahead and use it? 2459 */ 2460 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0); 2461 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0); 2462 } else { 2463 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, vmcs12->page_fault_error_code_mask); 2464 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, vmcs12->page_fault_error_code_match); 2465 } 2466 2467 if (cpu_has_vmx_apicv()) { 2468 vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0); 2469 vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1); 2470 vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2); 2471 vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3); 2472 } 2473 2474 /* 2475 * Make sure the msr_autostore list is up to date before we set the 2476 * count in the vmcs02. 2477 */ 2478 prepare_vmx_msr_autostore_list(&vmx->vcpu, MSR_IA32_TSC); 2479 2480 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, vmx->msr_autostore.guest.nr); 2481 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); 2482 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); 2483 2484 set_cr4_guest_host_mask(vmx); 2485 } 2486 2487 /* 2488 * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested 2489 * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it 2490 * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2 2491 * guest in a way that will both be appropriate to L1's requests, and our 2492 * needs. In addition to modifying the active vmcs (which is vmcs02), this 2493 * function also has additional necessary side-effects, like setting various 2494 * vcpu->arch fields. 2495 * Returns 0 on success, 1 on failure. Invalid state exit qualification code 2496 * is assigned to entry_failure_code on failure. 2497 */ 2498 static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, 2499 bool from_vmentry, 2500 enum vm_entry_failure_code *entry_failure_code) 2501 { 2502 struct vcpu_vmx *vmx = to_vmx(vcpu); 2503 bool load_guest_pdptrs_vmcs12 = false; 2504 2505 if (vmx->nested.dirty_vmcs12 || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) { 2506 prepare_vmcs02_rare(vmx, vmcs12); 2507 vmx->nested.dirty_vmcs12 = false; 2508 2509 load_guest_pdptrs_vmcs12 = !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr) || 2510 !(vmx->nested.hv_evmcs->hv_clean_fields & 2511 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1); 2512 } 2513 2514 if (vmx->nested.nested_run_pending && 2515 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) { 2516 kvm_set_dr(vcpu, 7, vmcs12->guest_dr7); 2517 vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl); 2518 } else { 2519 kvm_set_dr(vcpu, 7, vcpu->arch.dr7); 2520 vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl); 2521 } 2522 if (kvm_mpx_supported() && (!vmx->nested.nested_run_pending || 2523 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))) 2524 vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs); 2525 vmx_set_rflags(vcpu, vmcs12->guest_rflags); 2526 2527 /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the 2528 * bitwise-or of what L1 wants to trap for L2, and what we want to 2529 * trap. Note that CR0.TS also needs updating - we do this later. 2530 */ 2531 vmx_update_exception_bitmap(vcpu); 2532 vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask; 2533 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); 2534 2535 if (vmx->nested.nested_run_pending && 2536 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) { 2537 vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat); 2538 vcpu->arch.pat = vmcs12->guest_ia32_pat; 2539 } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { 2540 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); 2541 } 2542 2543 vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset( 2544 vcpu->arch.l1_tsc_offset, 2545 vmx_get_l2_tsc_offset(vcpu), 2546 vmx_get_l2_tsc_multiplier(vcpu)); 2547 2548 vcpu->arch.tsc_scaling_ratio = kvm_calc_nested_tsc_multiplier( 2549 vcpu->arch.l1_tsc_scaling_ratio, 2550 vmx_get_l2_tsc_multiplier(vcpu)); 2551 2552 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); 2553 if (kvm_has_tsc_control) 2554 vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio); 2555 2556 nested_vmx_transition_tlb_flush(vcpu, vmcs12, true); 2557 2558 if (nested_cpu_has_ept(vmcs12)) 2559 nested_ept_init_mmu_context(vcpu); 2560 2561 /* 2562 * This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those 2563 * bits which we consider mandatory enabled. 2564 * The CR0_READ_SHADOW is what L2 should have expected to read given 2565 * the specifications by L1; It's not enough to take 2566 * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we 2567 * have more bits than L1 expected. 2568 */ 2569 vmx_set_cr0(vcpu, vmcs12->guest_cr0); 2570 vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12)); 2571 2572 vmx_set_cr4(vcpu, vmcs12->guest_cr4); 2573 vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12)); 2574 2575 vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12); 2576 /* Note: may modify VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */ 2577 vmx_set_efer(vcpu, vcpu->arch.efer); 2578 2579 /* 2580 * Guest state is invalid and unrestricted guest is disabled, 2581 * which means L1 attempted VMEntry to L2 with invalid state. 2582 * Fail the VMEntry. 2583 * 2584 * However when force loading the guest state (SMM exit or 2585 * loading nested state after migration, it is possible to 2586 * have invalid guest state now, which will be later fixed by 2587 * restoring L2 register state 2588 */ 2589 if (CC(from_vmentry && !vmx_guest_state_valid(vcpu))) { 2590 *entry_failure_code = ENTRY_FAIL_DEFAULT; 2591 return -EINVAL; 2592 } 2593 2594 /* Shadow page tables on either EPT or shadow page tables. */ 2595 if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12), 2596 from_vmentry, entry_failure_code)) 2597 return -EINVAL; 2598 2599 /* 2600 * Immediately write vmcs02.GUEST_CR3. It will be propagated to vmcs12 2601 * on nested VM-Exit, which can occur without actually running L2 and 2602 * thus without hitting vmx_load_mmu_pgd(), e.g. if L1 is entering L2 with 2603 * vmcs12.GUEST_ACTIVITYSTATE=HLT, in which case KVM will intercept the 2604 * transition to HLT instead of running L2. 2605 */ 2606 if (enable_ept) 2607 vmcs_writel(GUEST_CR3, vmcs12->guest_cr3); 2608 2609 /* Late preparation of GUEST_PDPTRs now that EFER and CRs are set. */ 2610 if (load_guest_pdptrs_vmcs12 && nested_cpu_has_ept(vmcs12) && 2611 is_pae_paging(vcpu)) { 2612 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0); 2613 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); 2614 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); 2615 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); 2616 } 2617 2618 if (!enable_ept) 2619 vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested; 2620 2621 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) && 2622 WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL, 2623 vmcs12->guest_ia32_perf_global_ctrl))) { 2624 *entry_failure_code = ENTRY_FAIL_DEFAULT; 2625 return -EINVAL; 2626 } 2627 2628 kvm_rsp_write(vcpu, vmcs12->guest_rsp); 2629 kvm_rip_write(vcpu, vmcs12->guest_rip); 2630 2631 /* 2632 * It was observed that genuine Hyper-V running in L1 doesn't reset 2633 * 'hv_clean_fields' by itself, it only sets the corresponding dirty 2634 * bits when it changes a field in eVMCS. Mark all fields as clean 2635 * here. 2636 */ 2637 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) 2638 vmx->nested.hv_evmcs->hv_clean_fields |= 2639 HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; 2640 2641 return 0; 2642 } 2643 2644 static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12) 2645 { 2646 if (CC(!nested_cpu_has_nmi_exiting(vmcs12) && 2647 nested_cpu_has_virtual_nmis(vmcs12))) 2648 return -EINVAL; 2649 2650 if (CC(!nested_cpu_has_virtual_nmis(vmcs12) && 2651 nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING))) 2652 return -EINVAL; 2653 2654 return 0; 2655 } 2656 2657 static bool nested_vmx_check_eptp(struct kvm_vcpu *vcpu, u64 new_eptp) 2658 { 2659 struct vcpu_vmx *vmx = to_vmx(vcpu); 2660 2661 /* Check for memory type validity */ 2662 switch (new_eptp & VMX_EPTP_MT_MASK) { 2663 case VMX_EPTP_MT_UC: 2664 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT))) 2665 return false; 2666 break; 2667 case VMX_EPTP_MT_WB: 2668 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT))) 2669 return false; 2670 break; 2671 default: 2672 return false; 2673 } 2674 2675 /* Page-walk levels validity. */ 2676 switch (new_eptp & VMX_EPTP_PWL_MASK) { 2677 case VMX_EPTP_PWL_5: 2678 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_5_BIT))) 2679 return false; 2680 break; 2681 case VMX_EPTP_PWL_4: 2682 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_4_BIT))) 2683 return false; 2684 break; 2685 default: 2686 return false; 2687 } 2688 2689 /* Reserved bits should not be set */ 2690 if (CC(kvm_vcpu_is_illegal_gpa(vcpu, new_eptp) || ((new_eptp >> 7) & 0x1f))) 2691 return false; 2692 2693 /* AD, if set, should be supported */ 2694 if (new_eptp & VMX_EPTP_AD_ENABLE_BIT) { 2695 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT))) 2696 return false; 2697 } 2698 2699 return true; 2700 } 2701 2702 /* 2703 * Checks related to VM-Execution Control Fields 2704 */ 2705 static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu, 2706 struct vmcs12 *vmcs12) 2707 { 2708 struct vcpu_vmx *vmx = to_vmx(vcpu); 2709 2710 if (CC(!vmx_control_verify(vmcs12->pin_based_vm_exec_control, 2711 vmx->nested.msrs.pinbased_ctls_low, 2712 vmx->nested.msrs.pinbased_ctls_high)) || 2713 CC(!vmx_control_verify(vmcs12->cpu_based_vm_exec_control, 2714 vmx->nested.msrs.procbased_ctls_low, 2715 vmx->nested.msrs.procbased_ctls_high))) 2716 return -EINVAL; 2717 2718 if (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) && 2719 CC(!vmx_control_verify(vmcs12->secondary_vm_exec_control, 2720 vmx->nested.msrs.secondary_ctls_low, 2721 vmx->nested.msrs.secondary_ctls_high))) 2722 return -EINVAL; 2723 2724 if (CC(vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu)) || 2725 nested_vmx_check_io_bitmap_controls(vcpu, vmcs12) || 2726 nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12) || 2727 nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12) || 2728 nested_vmx_check_apic_access_controls(vcpu, vmcs12) || 2729 nested_vmx_check_apicv_controls(vcpu, vmcs12) || 2730 nested_vmx_check_nmi_controls(vmcs12) || 2731 nested_vmx_check_pml_controls(vcpu, vmcs12) || 2732 nested_vmx_check_unrestricted_guest_controls(vcpu, vmcs12) || 2733 nested_vmx_check_mode_based_ept_exec_controls(vcpu, vmcs12) || 2734 nested_vmx_check_shadow_vmcs_controls(vcpu, vmcs12) || 2735 CC(nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id)) 2736 return -EINVAL; 2737 2738 if (!nested_cpu_has_preemption_timer(vmcs12) && 2739 nested_cpu_has_save_preemption_timer(vmcs12)) 2740 return -EINVAL; 2741 2742 if (nested_cpu_has_ept(vmcs12) && 2743 CC(!nested_vmx_check_eptp(vcpu, vmcs12->ept_pointer))) 2744 return -EINVAL; 2745 2746 if (nested_cpu_has_vmfunc(vmcs12)) { 2747 if (CC(vmcs12->vm_function_control & 2748 ~vmx->nested.msrs.vmfunc_controls)) 2749 return -EINVAL; 2750 2751 if (nested_cpu_has_eptp_switching(vmcs12)) { 2752 if (CC(!nested_cpu_has_ept(vmcs12)) || 2753 CC(!page_address_valid(vcpu, vmcs12->eptp_list_address))) 2754 return -EINVAL; 2755 } 2756 } 2757 2758 return 0; 2759 } 2760 2761 /* 2762 * Checks related to VM-Exit Control Fields 2763 */ 2764 static int nested_check_vm_exit_controls(struct kvm_vcpu *vcpu, 2765 struct vmcs12 *vmcs12) 2766 { 2767 struct vcpu_vmx *vmx = to_vmx(vcpu); 2768 2769 if (CC(!vmx_control_verify(vmcs12->vm_exit_controls, 2770 vmx->nested.msrs.exit_ctls_low, 2771 vmx->nested.msrs.exit_ctls_high)) || 2772 CC(nested_vmx_check_exit_msr_switch_controls(vcpu, vmcs12))) 2773 return -EINVAL; 2774 2775 return 0; 2776 } 2777 2778 /* 2779 * Checks related to VM-Entry Control Fields 2780 */ 2781 static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu, 2782 struct vmcs12 *vmcs12) 2783 { 2784 struct vcpu_vmx *vmx = to_vmx(vcpu); 2785 2786 if (CC(!vmx_control_verify(vmcs12->vm_entry_controls, 2787 vmx->nested.msrs.entry_ctls_low, 2788 vmx->nested.msrs.entry_ctls_high))) 2789 return -EINVAL; 2790 2791 /* 2792 * From the Intel SDM, volume 3: 2793 * Fields relevant to VM-entry event injection must be set properly. 2794 * These fields are the VM-entry interruption-information field, the 2795 * VM-entry exception error code, and the VM-entry instruction length. 2796 */ 2797 if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) { 2798 u32 intr_info = vmcs12->vm_entry_intr_info_field; 2799 u8 vector = intr_info & INTR_INFO_VECTOR_MASK; 2800 u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK; 2801 bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK; 2802 bool should_have_error_code; 2803 bool urg = nested_cpu_has2(vmcs12, 2804 SECONDARY_EXEC_UNRESTRICTED_GUEST); 2805 bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE; 2806 2807 /* VM-entry interruption-info field: interruption type */ 2808 if (CC(intr_type == INTR_TYPE_RESERVED) || 2809 CC(intr_type == INTR_TYPE_OTHER_EVENT && 2810 !nested_cpu_supports_monitor_trap_flag(vcpu))) 2811 return -EINVAL; 2812 2813 /* VM-entry interruption-info field: vector */ 2814 if (CC(intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) || 2815 CC(intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) || 2816 CC(intr_type == INTR_TYPE_OTHER_EVENT && vector != 0)) 2817 return -EINVAL; 2818 2819 /* VM-entry interruption-info field: deliver error code */ 2820 should_have_error_code = 2821 intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode && 2822 x86_exception_has_error_code(vector); 2823 if (CC(has_error_code != should_have_error_code)) 2824 return -EINVAL; 2825 2826 /* VM-entry exception error code */ 2827 if (CC(has_error_code && 2828 vmcs12->vm_entry_exception_error_code & GENMASK(31, 16))) 2829 return -EINVAL; 2830 2831 /* VM-entry interruption-info field: reserved bits */ 2832 if (CC(intr_info & INTR_INFO_RESVD_BITS_MASK)) 2833 return -EINVAL; 2834 2835 /* VM-entry instruction length */ 2836 switch (intr_type) { 2837 case INTR_TYPE_SOFT_EXCEPTION: 2838 case INTR_TYPE_SOFT_INTR: 2839 case INTR_TYPE_PRIV_SW_EXCEPTION: 2840 if (CC(vmcs12->vm_entry_instruction_len > 15) || 2841 CC(vmcs12->vm_entry_instruction_len == 0 && 2842 CC(!nested_cpu_has_zero_length_injection(vcpu)))) 2843 return -EINVAL; 2844 } 2845 } 2846 2847 if (nested_vmx_check_entry_msr_switch_controls(vcpu, vmcs12)) 2848 return -EINVAL; 2849 2850 return 0; 2851 } 2852 2853 static int nested_vmx_check_controls(struct kvm_vcpu *vcpu, 2854 struct vmcs12 *vmcs12) 2855 { 2856 if (nested_check_vm_execution_controls(vcpu, vmcs12) || 2857 nested_check_vm_exit_controls(vcpu, vmcs12) || 2858 nested_check_vm_entry_controls(vcpu, vmcs12)) 2859 return -EINVAL; 2860 2861 if (to_vmx(vcpu)->nested.enlightened_vmcs_enabled) 2862 return nested_evmcs_check_controls(vmcs12); 2863 2864 return 0; 2865 } 2866 2867 static int nested_vmx_check_address_space_size(struct kvm_vcpu *vcpu, 2868 struct vmcs12 *vmcs12) 2869 { 2870 #ifdef CONFIG_X86_64 2871 if (CC(!!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) != 2872 !!(vcpu->arch.efer & EFER_LMA))) 2873 return -EINVAL; 2874 #endif 2875 return 0; 2876 } 2877 2878 static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu, 2879 struct vmcs12 *vmcs12) 2880 { 2881 bool ia32e; 2882 2883 if (CC(!nested_host_cr0_valid(vcpu, vmcs12->host_cr0)) || 2884 CC(!nested_host_cr4_valid(vcpu, vmcs12->host_cr4)) || 2885 CC(kvm_vcpu_is_illegal_gpa(vcpu, vmcs12->host_cr3))) 2886 return -EINVAL; 2887 2888 if (CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_esp, vcpu)) || 2889 CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_eip, vcpu))) 2890 return -EINVAL; 2891 2892 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) && 2893 CC(!kvm_pat_valid(vmcs12->host_ia32_pat))) 2894 return -EINVAL; 2895 2896 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) && 2897 CC(!kvm_valid_perf_global_ctrl(vcpu_to_pmu(vcpu), 2898 vmcs12->host_ia32_perf_global_ctrl))) 2899 return -EINVAL; 2900 2901 #ifdef CONFIG_X86_64 2902 ia32e = !!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE); 2903 #else 2904 ia32e = false; 2905 #endif 2906 2907 if (ia32e) { 2908 if (CC(!(vmcs12->host_cr4 & X86_CR4_PAE))) 2909 return -EINVAL; 2910 } else { 2911 if (CC(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) || 2912 CC(vmcs12->host_cr4 & X86_CR4_PCIDE) || 2913 CC((vmcs12->host_rip) >> 32)) 2914 return -EINVAL; 2915 } 2916 2917 if (CC(vmcs12->host_cs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 2918 CC(vmcs12->host_ss_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 2919 CC(vmcs12->host_ds_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 2920 CC(vmcs12->host_es_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 2921 CC(vmcs12->host_fs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 2922 CC(vmcs12->host_gs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 2923 CC(vmcs12->host_tr_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 2924 CC(vmcs12->host_cs_selector == 0) || 2925 CC(vmcs12->host_tr_selector == 0) || 2926 CC(vmcs12->host_ss_selector == 0 && !ia32e)) 2927 return -EINVAL; 2928 2929 if (CC(is_noncanonical_address(vmcs12->host_fs_base, vcpu)) || 2930 CC(is_noncanonical_address(vmcs12->host_gs_base, vcpu)) || 2931 CC(is_noncanonical_address(vmcs12->host_gdtr_base, vcpu)) || 2932 CC(is_noncanonical_address(vmcs12->host_idtr_base, vcpu)) || 2933 CC(is_noncanonical_address(vmcs12->host_tr_base, vcpu)) || 2934 CC(is_noncanonical_address(vmcs12->host_rip, vcpu))) 2935 return -EINVAL; 2936 2937 /* 2938 * If the load IA32_EFER VM-exit control is 1, bits reserved in the 2939 * IA32_EFER MSR must be 0 in the field for that register. In addition, 2940 * the values of the LMA and LME bits in the field must each be that of 2941 * the host address-space size VM-exit control. 2942 */ 2943 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) { 2944 if (CC(!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer)) || 2945 CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA)) || 2946 CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LME))) 2947 return -EINVAL; 2948 } 2949 2950 return 0; 2951 } 2952 2953 static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu, 2954 struct vmcs12 *vmcs12) 2955 { 2956 struct vcpu_vmx *vmx = to_vmx(vcpu); 2957 struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache; 2958 struct vmcs_hdr hdr; 2959 2960 if (vmcs12->vmcs_link_pointer == INVALID_GPA) 2961 return 0; 2962 2963 if (CC(!page_address_valid(vcpu, vmcs12->vmcs_link_pointer))) 2964 return -EINVAL; 2965 2966 if (ghc->gpa != vmcs12->vmcs_link_pointer && 2967 CC(kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, 2968 vmcs12->vmcs_link_pointer, VMCS12_SIZE))) 2969 return -EINVAL; 2970 2971 if (CC(kvm_read_guest_offset_cached(vcpu->kvm, ghc, &hdr, 2972 offsetof(struct vmcs12, hdr), 2973 sizeof(hdr)))) 2974 return -EINVAL; 2975 2976 if (CC(hdr.revision_id != VMCS12_REVISION) || 2977 CC(hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12))) 2978 return -EINVAL; 2979 2980 return 0; 2981 } 2982 2983 /* 2984 * Checks related to Guest Non-register State 2985 */ 2986 static int nested_check_guest_non_reg_state(struct vmcs12 *vmcs12) 2987 { 2988 if (CC(vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE && 2989 vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT && 2990 vmcs12->guest_activity_state != GUEST_ACTIVITY_WAIT_SIPI)) 2991 return -EINVAL; 2992 2993 return 0; 2994 } 2995 2996 static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu, 2997 struct vmcs12 *vmcs12, 2998 enum vm_entry_failure_code *entry_failure_code) 2999 { 3000 bool ia32e; 3001 3002 *entry_failure_code = ENTRY_FAIL_DEFAULT; 3003 3004 if (CC(!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0)) || 3005 CC(!nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4))) 3006 return -EINVAL; 3007 3008 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) && 3009 CC(!kvm_dr7_valid(vmcs12->guest_dr7))) 3010 return -EINVAL; 3011 3012 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) && 3013 CC(!kvm_pat_valid(vmcs12->guest_ia32_pat))) 3014 return -EINVAL; 3015 3016 if (nested_vmx_check_vmcs_link_ptr(vcpu, vmcs12)) { 3017 *entry_failure_code = ENTRY_FAIL_VMCS_LINK_PTR; 3018 return -EINVAL; 3019 } 3020 3021 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) && 3022 CC(!kvm_valid_perf_global_ctrl(vcpu_to_pmu(vcpu), 3023 vmcs12->guest_ia32_perf_global_ctrl))) 3024 return -EINVAL; 3025 3026 /* 3027 * If the load IA32_EFER VM-entry control is 1, the following checks 3028 * are performed on the field for the IA32_EFER MSR: 3029 * - Bits reserved in the IA32_EFER MSR must be 0. 3030 * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of 3031 * the IA-32e mode guest VM-exit control. It must also be identical 3032 * to bit 8 (LME) if bit 31 in the CR0 field (corresponding to 3033 * CR0.PG) is 1. 3034 */ 3035 if (to_vmx(vcpu)->nested.nested_run_pending && 3036 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) { 3037 ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0; 3038 if (CC(!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer)) || 3039 CC(ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA)) || 3040 CC(((vmcs12->guest_cr0 & X86_CR0_PG) && 3041 ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME)))) 3042 return -EINVAL; 3043 } 3044 3045 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) && 3046 (CC(is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu)) || 3047 CC((vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD)))) 3048 return -EINVAL; 3049 3050 if (nested_check_guest_non_reg_state(vmcs12)) 3051 return -EINVAL; 3052 3053 return 0; 3054 } 3055 3056 static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu) 3057 { 3058 struct vcpu_vmx *vmx = to_vmx(vcpu); 3059 unsigned long cr4; 3060 bool vm_fail; 3061 3062 if (!nested_early_check) 3063 return 0; 3064 3065 if (vmx->msr_autoload.host.nr) 3066 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); 3067 if (vmx->msr_autoload.guest.nr) 3068 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0); 3069 3070 preempt_disable(); 3071 3072 vmx_prepare_switch_to_guest(vcpu); 3073 3074 /* 3075 * Induce a consistency check VMExit by clearing bit 1 in GUEST_RFLAGS, 3076 * which is reserved to '1' by hardware. GUEST_RFLAGS is guaranteed to 3077 * be written (by prepare_vmcs02()) before the "real" VMEnter, i.e. 3078 * there is no need to preserve other bits or save/restore the field. 3079 */ 3080 vmcs_writel(GUEST_RFLAGS, 0); 3081 3082 cr4 = cr4_read_shadow(); 3083 if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) { 3084 vmcs_writel(HOST_CR4, cr4); 3085 vmx->loaded_vmcs->host_state.cr4 = cr4; 3086 } 3087 3088 vm_fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs, 3089 vmx->loaded_vmcs->launched); 3090 3091 if (vmx->msr_autoload.host.nr) 3092 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); 3093 if (vmx->msr_autoload.guest.nr) 3094 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); 3095 3096 if (vm_fail) { 3097 u32 error = vmcs_read32(VM_INSTRUCTION_ERROR); 3098 3099 preempt_enable(); 3100 3101 trace_kvm_nested_vmenter_failed( 3102 "early hardware check VM-instruction error: ", error); 3103 WARN_ON_ONCE(error != VMXERR_ENTRY_INVALID_CONTROL_FIELD); 3104 return 1; 3105 } 3106 3107 /* 3108 * VMExit clears RFLAGS.IF and DR7, even on a consistency check. 3109 */ 3110 if (hw_breakpoint_active()) 3111 set_debugreg(__this_cpu_read(cpu_dr7), 7); 3112 local_irq_enable(); 3113 preempt_enable(); 3114 3115 /* 3116 * A non-failing VMEntry means we somehow entered guest mode with 3117 * an illegal RIP, and that's just the tip of the iceberg. There 3118 * is no telling what memory has been modified or what state has 3119 * been exposed to unknown code. Hitting this all but guarantees 3120 * a (very critical) hardware issue. 3121 */ 3122 WARN_ON(!(vmcs_read32(VM_EXIT_REASON) & 3123 VMX_EXIT_REASONS_FAILED_VMENTRY)); 3124 3125 return 0; 3126 } 3127 3128 static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu) 3129 { 3130 struct vcpu_vmx *vmx = to_vmx(vcpu); 3131 3132 /* 3133 * hv_evmcs may end up being not mapped after migration (when 3134 * L2 was running), map it here to make sure vmcs12 changes are 3135 * properly reflected. 3136 */ 3137 if (vmx->nested.enlightened_vmcs_enabled && 3138 vmx->nested.hv_evmcs_vmptr == EVMPTR_MAP_PENDING) { 3139 enum nested_evmptrld_status evmptrld_status = 3140 nested_vmx_handle_enlightened_vmptrld(vcpu, false); 3141 3142 if (evmptrld_status == EVMPTRLD_VMFAIL || 3143 evmptrld_status == EVMPTRLD_ERROR) 3144 return false; 3145 3146 /* 3147 * Post migration VMCS12 always provides the most actual 3148 * information, copy it to eVMCS upon entry. 3149 */ 3150 vmx->nested.need_vmcs12_to_shadow_sync = true; 3151 } 3152 3153 return true; 3154 } 3155 3156 static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu) 3157 { 3158 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 3159 struct vcpu_vmx *vmx = to_vmx(vcpu); 3160 struct kvm_host_map *map; 3161 struct page *page; 3162 u64 hpa; 3163 3164 if (!vcpu->arch.pdptrs_from_userspace && 3165 !nested_cpu_has_ept(vmcs12) && is_pae_paging(vcpu)) { 3166 /* 3167 * Reload the guest's PDPTRs since after a migration 3168 * the guest CR3 might be restored prior to setting the nested 3169 * state which can lead to a load of wrong PDPTRs. 3170 */ 3171 if (CC(!load_pdptrs(vcpu, vcpu->arch.cr3))) 3172 return false; 3173 } 3174 3175 3176 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { 3177 /* 3178 * Translate L1 physical address to host physical 3179 * address for vmcs02. Keep the page pinned, so this 3180 * physical address remains valid. We keep a reference 3181 * to it so we can release it later. 3182 */ 3183 if (vmx->nested.apic_access_page) { /* shouldn't happen */ 3184 kvm_release_page_clean(vmx->nested.apic_access_page); 3185 vmx->nested.apic_access_page = NULL; 3186 } 3187 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->apic_access_addr); 3188 if (!is_error_page(page)) { 3189 vmx->nested.apic_access_page = page; 3190 hpa = page_to_phys(vmx->nested.apic_access_page); 3191 vmcs_write64(APIC_ACCESS_ADDR, hpa); 3192 } else { 3193 pr_debug_ratelimited("%s: no backing 'struct page' for APIC-access address in vmcs12\n", 3194 __func__); 3195 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 3196 vcpu->run->internal.suberror = 3197 KVM_INTERNAL_ERROR_EMULATION; 3198 vcpu->run->internal.ndata = 0; 3199 return false; 3200 } 3201 } 3202 3203 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) { 3204 map = &vmx->nested.virtual_apic_map; 3205 3206 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->virtual_apic_page_addr), map)) { 3207 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, pfn_to_hpa(map->pfn)); 3208 } else if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING) && 3209 nested_cpu_has(vmcs12, CPU_BASED_CR8_STORE_EXITING) && 3210 !nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { 3211 /* 3212 * The processor will never use the TPR shadow, simply 3213 * clear the bit from the execution control. Such a 3214 * configuration is useless, but it happens in tests. 3215 * For any other configuration, failing the vm entry is 3216 * _not_ what the processor does but it's basically the 3217 * only possibility we have. 3218 */ 3219 exec_controls_clearbit(vmx, CPU_BASED_TPR_SHADOW); 3220 } else { 3221 /* 3222 * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR to 3223 * force VM-Entry to fail. 3224 */ 3225 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, INVALID_GPA); 3226 } 3227 } 3228 3229 if (nested_cpu_has_posted_intr(vmcs12)) { 3230 map = &vmx->nested.pi_desc_map; 3231 3232 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->posted_intr_desc_addr), map)) { 3233 vmx->nested.pi_desc = 3234 (struct pi_desc *)(((void *)map->hva) + 3235 offset_in_page(vmcs12->posted_intr_desc_addr)); 3236 vmcs_write64(POSTED_INTR_DESC_ADDR, 3237 pfn_to_hpa(map->pfn) + offset_in_page(vmcs12->posted_intr_desc_addr)); 3238 } else { 3239 /* 3240 * Defer the KVM_INTERNAL_EXIT until KVM tries to 3241 * access the contents of the VMCS12 posted interrupt 3242 * descriptor. (Note that KVM may do this when it 3243 * should not, per the architectural specification.) 3244 */ 3245 vmx->nested.pi_desc = NULL; 3246 pin_controls_clearbit(vmx, PIN_BASED_POSTED_INTR); 3247 } 3248 } 3249 if (nested_vmx_prepare_msr_bitmap(vcpu, vmcs12)) 3250 exec_controls_setbit(vmx, CPU_BASED_USE_MSR_BITMAPS); 3251 else 3252 exec_controls_clearbit(vmx, CPU_BASED_USE_MSR_BITMAPS); 3253 3254 return true; 3255 } 3256 3257 static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu) 3258 { 3259 if (!nested_get_evmcs_page(vcpu)) { 3260 pr_debug_ratelimited("%s: enlightened vmptrld failed\n", 3261 __func__); 3262 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 3263 vcpu->run->internal.suberror = 3264 KVM_INTERNAL_ERROR_EMULATION; 3265 vcpu->run->internal.ndata = 0; 3266 3267 return false; 3268 } 3269 3270 if (is_guest_mode(vcpu) && !nested_get_vmcs12_pages(vcpu)) 3271 return false; 3272 3273 return true; 3274 } 3275 3276 static int nested_vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa) 3277 { 3278 struct vmcs12 *vmcs12; 3279 struct vcpu_vmx *vmx = to_vmx(vcpu); 3280 gpa_t dst; 3281 3282 if (WARN_ON_ONCE(!is_guest_mode(vcpu))) 3283 return 0; 3284 3285 if (WARN_ON_ONCE(vmx->nested.pml_full)) 3286 return 1; 3287 3288 /* 3289 * Check if PML is enabled for the nested guest. Whether eptp bit 6 is 3290 * set is already checked as part of A/D emulation. 3291 */ 3292 vmcs12 = get_vmcs12(vcpu); 3293 if (!nested_cpu_has_pml(vmcs12)) 3294 return 0; 3295 3296 if (vmcs12->guest_pml_index >= PML_ENTITY_NUM) { 3297 vmx->nested.pml_full = true; 3298 return 1; 3299 } 3300 3301 gpa &= ~0xFFFull; 3302 dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index; 3303 3304 if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa, 3305 offset_in_page(dst), sizeof(gpa))) 3306 return 0; 3307 3308 vmcs12->guest_pml_index--; 3309 3310 return 0; 3311 } 3312 3313 /* 3314 * Intel's VMX Instruction Reference specifies a common set of prerequisites 3315 * for running VMX instructions (except VMXON, whose prerequisites are 3316 * slightly different). It also specifies what exception to inject otherwise. 3317 * Note that many of these exceptions have priority over VM exits, so they 3318 * don't have to be checked again here. 3319 */ 3320 static int nested_vmx_check_permission(struct kvm_vcpu *vcpu) 3321 { 3322 if (!to_vmx(vcpu)->nested.vmxon) { 3323 kvm_queue_exception(vcpu, UD_VECTOR); 3324 return 0; 3325 } 3326 3327 if (vmx_get_cpl(vcpu)) { 3328 kvm_inject_gp(vcpu, 0); 3329 return 0; 3330 } 3331 3332 return 1; 3333 } 3334 3335 static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu) 3336 { 3337 u8 rvi = vmx_get_rvi(); 3338 u8 vppr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_PROCPRI); 3339 3340 return ((rvi & 0xf0) > (vppr & 0xf0)); 3341 } 3342 3343 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, 3344 struct vmcs12 *vmcs12); 3345 3346 /* 3347 * If from_vmentry is false, this is being called from state restore (either RSM 3348 * or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume. 3349 * 3350 * Returns: 3351 * NVMX_VMENTRY_SUCCESS: Entered VMX non-root mode 3352 * NVMX_VMENTRY_VMFAIL: Consistency check VMFail 3353 * NVMX_VMENTRY_VMEXIT: Consistency check VMExit 3354 * NVMX_VMENTRY_KVM_INTERNAL_ERROR: KVM internal error 3355 */ 3356 enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, 3357 bool from_vmentry) 3358 { 3359 struct vcpu_vmx *vmx = to_vmx(vcpu); 3360 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 3361 enum vm_entry_failure_code entry_failure_code; 3362 bool evaluate_pending_interrupts; 3363 union vmx_exit_reason exit_reason = { 3364 .basic = EXIT_REASON_INVALID_STATE, 3365 .failed_vmentry = 1, 3366 }; 3367 u32 failed_index; 3368 3369 kvm_service_local_tlb_flush_requests(vcpu); 3370 3371 evaluate_pending_interrupts = exec_controls_get(vmx) & 3372 (CPU_BASED_INTR_WINDOW_EXITING | CPU_BASED_NMI_WINDOW_EXITING); 3373 if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu)) 3374 evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu); 3375 3376 if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) 3377 vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); 3378 if (kvm_mpx_supported() && 3379 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)) 3380 vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS); 3381 3382 /* 3383 * Overwrite vmcs01.GUEST_CR3 with L1's CR3 if EPT is disabled *and* 3384 * nested early checks are disabled. In the event of a "late" VM-Fail, 3385 * i.e. a VM-Fail detected by hardware but not KVM, KVM must unwind its 3386 * software model to the pre-VMEntry host state. When EPT is disabled, 3387 * GUEST_CR3 holds KVM's shadow CR3, not L1's "real" CR3, which causes 3388 * nested_vmx_restore_host_state() to corrupt vcpu->arch.cr3. Stuffing 3389 * vmcs01.GUEST_CR3 results in the unwind naturally setting arch.cr3 to 3390 * the correct value. Smashing vmcs01.GUEST_CR3 is safe because nested 3391 * VM-Exits, and the unwind, reset KVM's MMU, i.e. vmcs01.GUEST_CR3 is 3392 * guaranteed to be overwritten with a shadow CR3 prior to re-entering 3393 * L1. Don't stuff vmcs01.GUEST_CR3 when using nested early checks as 3394 * KVM modifies vcpu->arch.cr3 if and only if the early hardware checks 3395 * pass, and early VM-Fails do not reset KVM's MMU, i.e. the VM-Fail 3396 * path would need to manually save/restore vmcs01.GUEST_CR3. 3397 */ 3398 if (!enable_ept && !nested_early_check) 3399 vmcs_writel(GUEST_CR3, vcpu->arch.cr3); 3400 3401 vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02); 3402 3403 prepare_vmcs02_early(vmx, &vmx->vmcs01, vmcs12); 3404 3405 if (from_vmentry) { 3406 if (unlikely(!nested_get_vmcs12_pages(vcpu))) { 3407 vmx_switch_vmcs(vcpu, &vmx->vmcs01); 3408 return NVMX_VMENTRY_KVM_INTERNAL_ERROR; 3409 } 3410 3411 if (nested_vmx_check_vmentry_hw(vcpu)) { 3412 vmx_switch_vmcs(vcpu, &vmx->vmcs01); 3413 return NVMX_VMENTRY_VMFAIL; 3414 } 3415 3416 if (nested_vmx_check_guest_state(vcpu, vmcs12, 3417 &entry_failure_code)) { 3418 exit_reason.basic = EXIT_REASON_INVALID_STATE; 3419 vmcs12->exit_qualification = entry_failure_code; 3420 goto vmentry_fail_vmexit; 3421 } 3422 } 3423 3424 enter_guest_mode(vcpu); 3425 3426 if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &entry_failure_code)) { 3427 exit_reason.basic = EXIT_REASON_INVALID_STATE; 3428 vmcs12->exit_qualification = entry_failure_code; 3429 goto vmentry_fail_vmexit_guest_mode; 3430 } 3431 3432 if (from_vmentry) { 3433 failed_index = nested_vmx_load_msr(vcpu, 3434 vmcs12->vm_entry_msr_load_addr, 3435 vmcs12->vm_entry_msr_load_count); 3436 if (failed_index) { 3437 exit_reason.basic = EXIT_REASON_MSR_LOAD_FAIL; 3438 vmcs12->exit_qualification = failed_index; 3439 goto vmentry_fail_vmexit_guest_mode; 3440 } 3441 } else { 3442 /* 3443 * The MMU is not initialized to point at the right entities yet and 3444 * "get pages" would need to read data from the guest (i.e. we will 3445 * need to perform gpa to hpa translation). Request a call 3446 * to nested_get_vmcs12_pages before the next VM-entry. The MSRs 3447 * have already been set at vmentry time and should not be reset. 3448 */ 3449 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); 3450 } 3451 3452 /* 3453 * If L1 had a pending IRQ/NMI until it executed 3454 * VMLAUNCH/VMRESUME which wasn't delivered because it was 3455 * disallowed (e.g. interrupts disabled), L0 needs to 3456 * evaluate if this pending event should cause an exit from L2 3457 * to L1 or delivered directly to L2 (e.g. In case L1 don't 3458 * intercept EXTERNAL_INTERRUPT). 3459 * 3460 * Usually this would be handled by the processor noticing an 3461 * IRQ/NMI window request, or checking RVI during evaluation of 3462 * pending virtual interrupts. However, this setting was done 3463 * on VMCS01 and now VMCS02 is active instead. Thus, we force L0 3464 * to perform pending event evaluation by requesting a KVM_REQ_EVENT. 3465 */ 3466 if (unlikely(evaluate_pending_interrupts)) 3467 kvm_make_request(KVM_REQ_EVENT, vcpu); 3468 3469 /* 3470 * Do not start the preemption timer hrtimer until after we know 3471 * we are successful, so that only nested_vmx_vmexit needs to cancel 3472 * the timer. 3473 */ 3474 vmx->nested.preemption_timer_expired = false; 3475 if (nested_cpu_has_preemption_timer(vmcs12)) { 3476 u64 timer_value = vmx_calc_preemption_timer_value(vcpu); 3477 vmx_start_preemption_timer(vcpu, timer_value); 3478 } 3479 3480 /* 3481 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point 3482 * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet 3483 * returned as far as L1 is concerned. It will only return (and set 3484 * the success flag) when L2 exits (see nested_vmx_vmexit()). 3485 */ 3486 return NVMX_VMENTRY_SUCCESS; 3487 3488 /* 3489 * A failed consistency check that leads to a VMExit during L1's 3490 * VMEnter to L2 is a variation of a normal VMexit, as explained in 3491 * 26.7 "VM-entry failures during or after loading guest state". 3492 */ 3493 vmentry_fail_vmexit_guest_mode: 3494 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING) 3495 vcpu->arch.tsc_offset -= vmcs12->tsc_offset; 3496 leave_guest_mode(vcpu); 3497 3498 vmentry_fail_vmexit: 3499 vmx_switch_vmcs(vcpu, &vmx->vmcs01); 3500 3501 if (!from_vmentry) 3502 return NVMX_VMENTRY_VMEXIT; 3503 3504 load_vmcs12_host_state(vcpu, vmcs12); 3505 vmcs12->vm_exit_reason = exit_reason.full; 3506 if (enable_shadow_vmcs || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) 3507 vmx->nested.need_vmcs12_to_shadow_sync = true; 3508 return NVMX_VMENTRY_VMEXIT; 3509 } 3510 3511 /* 3512 * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1 3513 * for running an L2 nested guest. 3514 */ 3515 static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) 3516 { 3517 struct vmcs12 *vmcs12; 3518 enum nvmx_vmentry_status status; 3519 struct vcpu_vmx *vmx = to_vmx(vcpu); 3520 u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu); 3521 enum nested_evmptrld_status evmptrld_status; 3522 3523 if (!nested_vmx_check_permission(vcpu)) 3524 return 1; 3525 3526 evmptrld_status = nested_vmx_handle_enlightened_vmptrld(vcpu, launch); 3527 if (evmptrld_status == EVMPTRLD_ERROR) { 3528 kvm_queue_exception(vcpu, UD_VECTOR); 3529 return 1; 3530 } 3531 3532 kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_BRANCH_INSTRUCTIONS); 3533 3534 if (CC(evmptrld_status == EVMPTRLD_VMFAIL)) 3535 return nested_vmx_failInvalid(vcpu); 3536 3537 if (CC(!evmptr_is_valid(vmx->nested.hv_evmcs_vmptr) && 3538 vmx->nested.current_vmptr == INVALID_GPA)) 3539 return nested_vmx_failInvalid(vcpu); 3540 3541 vmcs12 = get_vmcs12(vcpu); 3542 3543 /* 3544 * Can't VMLAUNCH or VMRESUME a shadow VMCS. Despite the fact 3545 * that there *is* a valid VMCS pointer, RFLAGS.CF is set 3546 * rather than RFLAGS.ZF, and no error number is stored to the 3547 * VM-instruction error field. 3548 */ 3549 if (CC(vmcs12->hdr.shadow_vmcs)) 3550 return nested_vmx_failInvalid(vcpu); 3551 3552 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) { 3553 copy_enlightened_to_vmcs12(vmx, vmx->nested.hv_evmcs->hv_clean_fields); 3554 /* Enlightened VMCS doesn't have launch state */ 3555 vmcs12->launch_state = !launch; 3556 } else if (enable_shadow_vmcs) { 3557 copy_shadow_to_vmcs12(vmx); 3558 } 3559 3560 /* 3561 * The nested entry process starts with enforcing various prerequisites 3562 * on vmcs12 as required by the Intel SDM, and act appropriately when 3563 * they fail: As the SDM explains, some conditions should cause the 3564 * instruction to fail, while others will cause the instruction to seem 3565 * to succeed, but return an EXIT_REASON_INVALID_STATE. 3566 * To speed up the normal (success) code path, we should avoid checking 3567 * for misconfigurations which will anyway be caught by the processor 3568 * when using the merged vmcs02. 3569 */ 3570 if (CC(interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS)) 3571 return nested_vmx_fail(vcpu, VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS); 3572 3573 if (CC(vmcs12->launch_state == launch)) 3574 return nested_vmx_fail(vcpu, 3575 launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS 3576 : VMXERR_VMRESUME_NONLAUNCHED_VMCS); 3577 3578 if (nested_vmx_check_controls(vcpu, vmcs12)) 3579 return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); 3580 3581 if (nested_vmx_check_address_space_size(vcpu, vmcs12)) 3582 return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD); 3583 3584 if (nested_vmx_check_host_state(vcpu, vmcs12)) 3585 return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD); 3586 3587 /* 3588 * We're finally done with prerequisite checking, and can start with 3589 * the nested entry. 3590 */ 3591 vmx->nested.nested_run_pending = 1; 3592 vmx->nested.has_preemption_timer_deadline = false; 3593 status = nested_vmx_enter_non_root_mode(vcpu, true); 3594 if (unlikely(status != NVMX_VMENTRY_SUCCESS)) 3595 goto vmentry_failed; 3596 3597 /* Emulate processing of posted interrupts on VM-Enter. */ 3598 if (nested_cpu_has_posted_intr(vmcs12) && 3599 kvm_apic_has_interrupt(vcpu) == vmx->nested.posted_intr_nv) { 3600 vmx->nested.pi_pending = true; 3601 kvm_make_request(KVM_REQ_EVENT, vcpu); 3602 kvm_apic_clear_irr(vcpu, vmx->nested.posted_intr_nv); 3603 } 3604 3605 /* Hide L1D cache contents from the nested guest. */ 3606 vmx->vcpu.arch.l1tf_flush_l1d = true; 3607 3608 /* 3609 * Must happen outside of nested_vmx_enter_non_root_mode() as it will 3610 * also be used as part of restoring nVMX state for 3611 * snapshot restore (migration). 3612 * 3613 * In this flow, it is assumed that vmcs12 cache was 3614 * transferred as part of captured nVMX state and should 3615 * therefore not be read from guest memory (which may not 3616 * exist on destination host yet). 3617 */ 3618 nested_cache_shadow_vmcs12(vcpu, vmcs12); 3619 3620 switch (vmcs12->guest_activity_state) { 3621 case GUEST_ACTIVITY_HLT: 3622 /* 3623 * If we're entering a halted L2 vcpu and the L2 vcpu won't be 3624 * awakened by event injection or by an NMI-window VM-exit or 3625 * by an interrupt-window VM-exit, halt the vcpu. 3626 */ 3627 if (!(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) && 3628 !nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING) && 3629 !(nested_cpu_has(vmcs12, CPU_BASED_INTR_WINDOW_EXITING) && 3630 (vmcs12->guest_rflags & X86_EFLAGS_IF))) { 3631 vmx->nested.nested_run_pending = 0; 3632 return kvm_emulate_halt_noskip(vcpu); 3633 } 3634 break; 3635 case GUEST_ACTIVITY_WAIT_SIPI: 3636 vmx->nested.nested_run_pending = 0; 3637 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; 3638 break; 3639 default: 3640 break; 3641 } 3642 3643 return 1; 3644 3645 vmentry_failed: 3646 vmx->nested.nested_run_pending = 0; 3647 if (status == NVMX_VMENTRY_KVM_INTERNAL_ERROR) 3648 return 0; 3649 if (status == NVMX_VMENTRY_VMEXIT) 3650 return 1; 3651 WARN_ON_ONCE(status != NVMX_VMENTRY_VMFAIL); 3652 return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); 3653 } 3654 3655 /* 3656 * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date 3657 * because L2 may have changed some cr0 bits directly (CR0_GUEST_HOST_MASK). 3658 * This function returns the new value we should put in vmcs12.guest_cr0. 3659 * It's not enough to just return the vmcs02 GUEST_CR0. Rather, 3660 * 1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now 3661 * available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0 3662 * didn't trap the bit, because if L1 did, so would L0). 3663 * 2. Bits that L1 asked to trap (and therefore L0 also did) could not have 3664 * been modified by L2, and L1 knows it. So just leave the old value of 3665 * the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0 3666 * isn't relevant, because if L0 traps this bit it can set it to anything. 3667 * 3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have 3668 * changed these bits, and therefore they need to be updated, but L0 3669 * didn't necessarily allow them to be changed in GUEST_CR0 - and rather 3670 * put them in vmcs02 CR0_READ_SHADOW. So take these bits from there. 3671 */ 3672 static inline unsigned long 3673 vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) 3674 { 3675 return 3676 /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) | 3677 /*2*/ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) | 3678 /*3*/ (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask | 3679 vcpu->arch.cr0_guest_owned_bits)); 3680 } 3681 3682 static inline unsigned long 3683 vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) 3684 { 3685 return 3686 /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) | 3687 /*2*/ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) | 3688 /*3*/ (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask | 3689 vcpu->arch.cr4_guest_owned_bits)); 3690 } 3691 3692 static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu, 3693 struct vmcs12 *vmcs12) 3694 { 3695 u32 idt_vectoring; 3696 unsigned int nr; 3697 3698 if (vcpu->arch.exception.injected) { 3699 nr = vcpu->arch.exception.nr; 3700 idt_vectoring = nr | VECTORING_INFO_VALID_MASK; 3701 3702 if (kvm_exception_is_soft(nr)) { 3703 vmcs12->vm_exit_instruction_len = 3704 vcpu->arch.event_exit_inst_len; 3705 idt_vectoring |= INTR_TYPE_SOFT_EXCEPTION; 3706 } else 3707 idt_vectoring |= INTR_TYPE_HARD_EXCEPTION; 3708 3709 if (vcpu->arch.exception.has_error_code) { 3710 idt_vectoring |= VECTORING_INFO_DELIVER_CODE_MASK; 3711 vmcs12->idt_vectoring_error_code = 3712 vcpu->arch.exception.error_code; 3713 } 3714 3715 vmcs12->idt_vectoring_info_field = idt_vectoring; 3716 } else if (vcpu->arch.nmi_injected) { 3717 vmcs12->idt_vectoring_info_field = 3718 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR; 3719 } else if (vcpu->arch.interrupt.injected) { 3720 nr = vcpu->arch.interrupt.nr; 3721 idt_vectoring = nr | VECTORING_INFO_VALID_MASK; 3722 3723 if (vcpu->arch.interrupt.soft) { 3724 idt_vectoring |= INTR_TYPE_SOFT_INTR; 3725 vmcs12->vm_entry_instruction_len = 3726 vcpu->arch.event_exit_inst_len; 3727 } else 3728 idt_vectoring |= INTR_TYPE_EXT_INTR; 3729 3730 vmcs12->idt_vectoring_info_field = idt_vectoring; 3731 } 3732 } 3733 3734 3735 void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu) 3736 { 3737 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 3738 gfn_t gfn; 3739 3740 /* 3741 * Don't need to mark the APIC access page dirty; it is never 3742 * written to by the CPU during APIC virtualization. 3743 */ 3744 3745 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) { 3746 gfn = vmcs12->virtual_apic_page_addr >> PAGE_SHIFT; 3747 kvm_vcpu_mark_page_dirty(vcpu, gfn); 3748 } 3749 3750 if (nested_cpu_has_posted_intr(vmcs12)) { 3751 gfn = vmcs12->posted_intr_desc_addr >> PAGE_SHIFT; 3752 kvm_vcpu_mark_page_dirty(vcpu, gfn); 3753 } 3754 } 3755 3756 static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu) 3757 { 3758 struct vcpu_vmx *vmx = to_vmx(vcpu); 3759 int max_irr; 3760 void *vapic_page; 3761 u16 status; 3762 3763 if (!vmx->nested.pi_pending) 3764 return 0; 3765 3766 if (!vmx->nested.pi_desc) 3767 goto mmio_needed; 3768 3769 vmx->nested.pi_pending = false; 3770 3771 if (!pi_test_and_clear_on(vmx->nested.pi_desc)) 3772 return 0; 3773 3774 max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256); 3775 if (max_irr != 256) { 3776 vapic_page = vmx->nested.virtual_apic_map.hva; 3777 if (!vapic_page) 3778 goto mmio_needed; 3779 3780 __kvm_apic_update_irr(vmx->nested.pi_desc->pir, 3781 vapic_page, &max_irr); 3782 status = vmcs_read16(GUEST_INTR_STATUS); 3783 if ((u8)max_irr > ((u8)status & 0xff)) { 3784 status &= ~0xff; 3785 status |= (u8)max_irr; 3786 vmcs_write16(GUEST_INTR_STATUS, status); 3787 } 3788 } 3789 3790 nested_mark_vmcs12_pages_dirty(vcpu); 3791 return 0; 3792 3793 mmio_needed: 3794 kvm_handle_memory_failure(vcpu, X86EMUL_IO_NEEDED, NULL); 3795 return -ENXIO; 3796 } 3797 3798 static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu, 3799 unsigned long exit_qual) 3800 { 3801 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 3802 unsigned int nr = vcpu->arch.exception.nr; 3803 u32 intr_info = nr | INTR_INFO_VALID_MASK; 3804 3805 if (vcpu->arch.exception.has_error_code) { 3806 vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code; 3807 intr_info |= INTR_INFO_DELIVER_CODE_MASK; 3808 } 3809 3810 if (kvm_exception_is_soft(nr)) 3811 intr_info |= INTR_TYPE_SOFT_EXCEPTION; 3812 else 3813 intr_info |= INTR_TYPE_HARD_EXCEPTION; 3814 3815 if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) && 3816 vmx_get_nmi_mask(vcpu)) 3817 intr_info |= INTR_INFO_UNBLOCK_NMI; 3818 3819 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual); 3820 } 3821 3822 /* 3823 * Returns true if a debug trap is pending delivery. 3824 * 3825 * In KVM, debug traps bear an exception payload. As such, the class of a #DB 3826 * exception may be inferred from the presence of an exception payload. 3827 */ 3828 static inline bool vmx_pending_dbg_trap(struct kvm_vcpu *vcpu) 3829 { 3830 return vcpu->arch.exception.pending && 3831 vcpu->arch.exception.nr == DB_VECTOR && 3832 vcpu->arch.exception.payload; 3833 } 3834 3835 /* 3836 * Certain VM-exits set the 'pending debug exceptions' field to indicate a 3837 * recognized #DB (data or single-step) that has yet to be delivered. Since KVM 3838 * represents these debug traps with a payload that is said to be compatible 3839 * with the 'pending debug exceptions' field, write the payload to the VMCS 3840 * field if a VM-exit is delivered before the debug trap. 3841 */ 3842 static void nested_vmx_update_pending_dbg(struct kvm_vcpu *vcpu) 3843 { 3844 if (vmx_pending_dbg_trap(vcpu)) 3845 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, 3846 vcpu->arch.exception.payload); 3847 } 3848 3849 static bool nested_vmx_preemption_timer_pending(struct kvm_vcpu *vcpu) 3850 { 3851 return nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) && 3852 to_vmx(vcpu)->nested.preemption_timer_expired; 3853 } 3854 3855 static int vmx_check_nested_events(struct kvm_vcpu *vcpu) 3856 { 3857 struct vcpu_vmx *vmx = to_vmx(vcpu); 3858 unsigned long exit_qual; 3859 bool block_nested_events = 3860 vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu); 3861 bool mtf_pending = vmx->nested.mtf_pending; 3862 struct kvm_lapic *apic = vcpu->arch.apic; 3863 3864 /* 3865 * Clear the MTF state. If a higher priority VM-exit is delivered first, 3866 * this state is discarded. 3867 */ 3868 if (!block_nested_events) 3869 vmx->nested.mtf_pending = false; 3870 3871 if (lapic_in_kernel(vcpu) && 3872 test_bit(KVM_APIC_INIT, &apic->pending_events)) { 3873 if (block_nested_events) 3874 return -EBUSY; 3875 nested_vmx_update_pending_dbg(vcpu); 3876 clear_bit(KVM_APIC_INIT, &apic->pending_events); 3877 if (vcpu->arch.mp_state != KVM_MP_STATE_INIT_RECEIVED) 3878 nested_vmx_vmexit(vcpu, EXIT_REASON_INIT_SIGNAL, 0, 0); 3879 return 0; 3880 } 3881 3882 if (lapic_in_kernel(vcpu) && 3883 test_bit(KVM_APIC_SIPI, &apic->pending_events)) { 3884 if (block_nested_events) 3885 return -EBUSY; 3886 3887 clear_bit(KVM_APIC_SIPI, &apic->pending_events); 3888 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) 3889 nested_vmx_vmexit(vcpu, EXIT_REASON_SIPI_SIGNAL, 0, 3890 apic->sipi_vector & 0xFFUL); 3891 return 0; 3892 } 3893 3894 /* 3895 * Process any exceptions that are not debug traps before MTF. 3896 * 3897 * Note that only a pending nested run can block a pending exception. 3898 * Otherwise an injected NMI/interrupt should either be 3899 * lost or delivered to the nested hypervisor in the IDT_VECTORING_INFO, 3900 * while delivering the pending exception. 3901 */ 3902 3903 if (vcpu->arch.exception.pending && !vmx_pending_dbg_trap(vcpu)) { 3904 if (vmx->nested.nested_run_pending) 3905 return -EBUSY; 3906 if (!nested_vmx_check_exception(vcpu, &exit_qual)) 3907 goto no_vmexit; 3908 nested_vmx_inject_exception_vmexit(vcpu, exit_qual); 3909 return 0; 3910 } 3911 3912 if (mtf_pending) { 3913 if (block_nested_events) 3914 return -EBUSY; 3915 nested_vmx_update_pending_dbg(vcpu); 3916 nested_vmx_vmexit(vcpu, EXIT_REASON_MONITOR_TRAP_FLAG, 0, 0); 3917 return 0; 3918 } 3919 3920 if (vcpu->arch.exception.pending) { 3921 if (vmx->nested.nested_run_pending) 3922 return -EBUSY; 3923 if (!nested_vmx_check_exception(vcpu, &exit_qual)) 3924 goto no_vmexit; 3925 nested_vmx_inject_exception_vmexit(vcpu, exit_qual); 3926 return 0; 3927 } 3928 3929 if (nested_vmx_preemption_timer_pending(vcpu)) { 3930 if (block_nested_events) 3931 return -EBUSY; 3932 nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0); 3933 return 0; 3934 } 3935 3936 if (vcpu->arch.smi_pending && !is_smm(vcpu)) { 3937 if (block_nested_events) 3938 return -EBUSY; 3939 goto no_vmexit; 3940 } 3941 3942 if (vcpu->arch.nmi_pending && !vmx_nmi_blocked(vcpu)) { 3943 if (block_nested_events) 3944 return -EBUSY; 3945 if (!nested_exit_on_nmi(vcpu)) 3946 goto no_vmexit; 3947 3948 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, 3949 NMI_VECTOR | INTR_TYPE_NMI_INTR | 3950 INTR_INFO_VALID_MASK, 0); 3951 /* 3952 * The NMI-triggered VM exit counts as injection: 3953 * clear this one and block further NMIs. 3954 */ 3955 vcpu->arch.nmi_pending = 0; 3956 vmx_set_nmi_mask(vcpu, true); 3957 return 0; 3958 } 3959 3960 if (kvm_cpu_has_interrupt(vcpu) && !vmx_interrupt_blocked(vcpu)) { 3961 if (block_nested_events) 3962 return -EBUSY; 3963 if (!nested_exit_on_intr(vcpu)) 3964 goto no_vmexit; 3965 nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0); 3966 return 0; 3967 } 3968 3969 no_vmexit: 3970 return vmx_complete_nested_posted_interrupt(vcpu); 3971 } 3972 3973 static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu) 3974 { 3975 ktime_t remaining = 3976 hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer); 3977 u64 value; 3978 3979 if (ktime_to_ns(remaining) <= 0) 3980 return 0; 3981 3982 value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz; 3983 do_div(value, 1000000); 3984 return value >> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; 3985 } 3986 3987 static bool is_vmcs12_ext_field(unsigned long field) 3988 { 3989 switch (field) { 3990 case GUEST_ES_SELECTOR: 3991 case GUEST_CS_SELECTOR: 3992 case GUEST_SS_SELECTOR: 3993 case GUEST_DS_SELECTOR: 3994 case GUEST_FS_SELECTOR: 3995 case GUEST_GS_SELECTOR: 3996 case GUEST_LDTR_SELECTOR: 3997 case GUEST_TR_SELECTOR: 3998 case GUEST_ES_LIMIT: 3999 case GUEST_CS_LIMIT: 4000 case GUEST_SS_LIMIT: 4001 case GUEST_DS_LIMIT: 4002 case GUEST_FS_LIMIT: 4003 case GUEST_GS_LIMIT: 4004 case GUEST_LDTR_LIMIT: 4005 case GUEST_TR_LIMIT: 4006 case GUEST_GDTR_LIMIT: 4007 case GUEST_IDTR_LIMIT: 4008 case GUEST_ES_AR_BYTES: 4009 case GUEST_DS_AR_BYTES: 4010 case GUEST_FS_AR_BYTES: 4011 case GUEST_GS_AR_BYTES: 4012 case GUEST_LDTR_AR_BYTES: 4013 case GUEST_TR_AR_BYTES: 4014 case GUEST_ES_BASE: 4015 case GUEST_CS_BASE: 4016 case GUEST_SS_BASE: 4017 case GUEST_DS_BASE: 4018 case GUEST_FS_BASE: 4019 case GUEST_GS_BASE: 4020 case GUEST_LDTR_BASE: 4021 case GUEST_TR_BASE: 4022 case GUEST_GDTR_BASE: 4023 case GUEST_IDTR_BASE: 4024 case GUEST_PENDING_DBG_EXCEPTIONS: 4025 case GUEST_BNDCFGS: 4026 return true; 4027 default: 4028 break; 4029 } 4030 4031 return false; 4032 } 4033 4034 static void sync_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu, 4035 struct vmcs12 *vmcs12) 4036 { 4037 struct vcpu_vmx *vmx = to_vmx(vcpu); 4038 4039 vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR); 4040 vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR); 4041 vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR); 4042 vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR); 4043 vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR); 4044 vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR); 4045 vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR); 4046 vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR); 4047 vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT); 4048 vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT); 4049 vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT); 4050 vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT); 4051 vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT); 4052 vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT); 4053 vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT); 4054 vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT); 4055 vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT); 4056 vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT); 4057 vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES); 4058 vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES); 4059 vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES); 4060 vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES); 4061 vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES); 4062 vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES); 4063 vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE); 4064 vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE); 4065 vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE); 4066 vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE); 4067 vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE); 4068 vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE); 4069 vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE); 4070 vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE); 4071 vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE); 4072 vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE); 4073 vmcs12->guest_pending_dbg_exceptions = 4074 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS); 4075 if (kvm_mpx_supported()) 4076 vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS); 4077 4078 vmx->nested.need_sync_vmcs02_to_vmcs12_rare = false; 4079 } 4080 4081 static void copy_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu, 4082 struct vmcs12 *vmcs12) 4083 { 4084 struct vcpu_vmx *vmx = to_vmx(vcpu); 4085 int cpu; 4086 4087 if (!vmx->nested.need_sync_vmcs02_to_vmcs12_rare) 4088 return; 4089 4090 4091 WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01); 4092 4093 cpu = get_cpu(); 4094 vmx->loaded_vmcs = &vmx->nested.vmcs02; 4095 vmx_vcpu_load_vmcs(vcpu, cpu, &vmx->vmcs01); 4096 4097 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12); 4098 4099 vmx->loaded_vmcs = &vmx->vmcs01; 4100 vmx_vcpu_load_vmcs(vcpu, cpu, &vmx->nested.vmcs02); 4101 put_cpu(); 4102 } 4103 4104 /* 4105 * Update the guest state fields of vmcs12 to reflect changes that 4106 * occurred while L2 was running. (The "IA-32e mode guest" bit of the 4107 * VM-entry controls is also updated, since this is really a guest 4108 * state bit.) 4109 */ 4110 static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) 4111 { 4112 struct vcpu_vmx *vmx = to_vmx(vcpu); 4113 4114 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) 4115 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12); 4116 4117 vmx->nested.need_sync_vmcs02_to_vmcs12_rare = 4118 !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr); 4119 4120 vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12); 4121 vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12); 4122 4123 vmcs12->guest_rsp = kvm_rsp_read(vcpu); 4124 vmcs12->guest_rip = kvm_rip_read(vcpu); 4125 vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS); 4126 4127 vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES); 4128 vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES); 4129 4130 vmcs12->guest_interruptibility_info = 4131 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); 4132 4133 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) 4134 vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT; 4135 else if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) 4136 vmcs12->guest_activity_state = GUEST_ACTIVITY_WAIT_SIPI; 4137 else 4138 vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE; 4139 4140 if (nested_cpu_has_preemption_timer(vmcs12) && 4141 vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER && 4142 !vmx->nested.nested_run_pending) 4143 vmcs12->vmx_preemption_timer_value = 4144 vmx_get_preemption_timer_value(vcpu); 4145 4146 /* 4147 * In some cases (usually, nested EPT), L2 is allowed to change its 4148 * own CR3 without exiting. If it has changed it, we must keep it. 4149 * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined 4150 * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12. 4151 * 4152 * Additionally, restore L2's PDPTR to vmcs12. 4153 */ 4154 if (enable_ept) { 4155 vmcs12->guest_cr3 = vmcs_readl(GUEST_CR3); 4156 if (nested_cpu_has_ept(vmcs12) && is_pae_paging(vcpu)) { 4157 vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0); 4158 vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1); 4159 vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2); 4160 vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3); 4161 } 4162 } 4163 4164 vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS); 4165 4166 if (nested_cpu_has_vid(vmcs12)) 4167 vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS); 4168 4169 vmcs12->vm_entry_controls = 4170 (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) | 4171 (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE); 4172 4173 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) 4174 kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7); 4175 4176 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER) 4177 vmcs12->guest_ia32_efer = vcpu->arch.efer; 4178 } 4179 4180 /* 4181 * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits 4182 * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12), 4183 * and this function updates it to reflect the changes to the guest state while 4184 * L2 was running (and perhaps made some exits which were handled directly by L0 4185 * without going back to L1), and to reflect the exit reason. 4186 * Note that we do not have to copy here all VMCS fields, just those that 4187 * could have changed by the L2 guest or the exit - i.e., the guest-state and 4188 * exit-information fields only. Other fields are modified by L1 with VMWRITE, 4189 * which already writes to vmcs12 directly. 4190 */ 4191 static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, 4192 u32 vm_exit_reason, u32 exit_intr_info, 4193 unsigned long exit_qualification) 4194 { 4195 /* update exit information fields: */ 4196 vmcs12->vm_exit_reason = vm_exit_reason; 4197 if (to_vmx(vcpu)->exit_reason.enclave_mode) 4198 vmcs12->vm_exit_reason |= VMX_EXIT_REASONS_SGX_ENCLAVE_MODE; 4199 vmcs12->exit_qualification = exit_qualification; 4200 vmcs12->vm_exit_intr_info = exit_intr_info; 4201 4202 vmcs12->idt_vectoring_info_field = 0; 4203 vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); 4204 vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 4205 4206 if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) { 4207 vmcs12->launch_state = 1; 4208 4209 /* vm_entry_intr_info_field is cleared on exit. Emulate this 4210 * instead of reading the real value. */ 4211 vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK; 4212 4213 /* 4214 * Transfer the event that L0 or L1 may wanted to inject into 4215 * L2 to IDT_VECTORING_INFO_FIELD. 4216 */ 4217 vmcs12_save_pending_event(vcpu, vmcs12); 4218 4219 /* 4220 * According to spec, there's no need to store the guest's 4221 * MSRs if the exit is due to a VM-entry failure that occurs 4222 * during or after loading the guest state. Since this exit 4223 * does not fall in that category, we need to save the MSRs. 4224 */ 4225 if (nested_vmx_store_msr(vcpu, 4226 vmcs12->vm_exit_msr_store_addr, 4227 vmcs12->vm_exit_msr_store_count)) 4228 nested_vmx_abort(vcpu, 4229 VMX_ABORT_SAVE_GUEST_MSR_FAIL); 4230 } 4231 4232 /* 4233 * Drop what we picked up for L2 via vmx_complete_interrupts. It is 4234 * preserved above and would only end up incorrectly in L1. 4235 */ 4236 vcpu->arch.nmi_injected = false; 4237 kvm_clear_exception_queue(vcpu); 4238 kvm_clear_interrupt_queue(vcpu); 4239 } 4240 4241 /* 4242 * A part of what we need to when the nested L2 guest exits and we want to 4243 * run its L1 parent, is to reset L1's guest state to the host state specified 4244 * in vmcs12. 4245 * This function is to be called not only on normal nested exit, but also on 4246 * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry 4247 * Failures During or After Loading Guest State"). 4248 * This function should be called when the active VMCS is L1's (vmcs01). 4249 */ 4250 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, 4251 struct vmcs12 *vmcs12) 4252 { 4253 enum vm_entry_failure_code ignored; 4254 struct kvm_segment seg; 4255 4256 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) 4257 vcpu->arch.efer = vmcs12->host_ia32_efer; 4258 else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) 4259 vcpu->arch.efer |= (EFER_LMA | EFER_LME); 4260 else 4261 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME); 4262 vmx_set_efer(vcpu, vcpu->arch.efer); 4263 4264 kvm_rsp_write(vcpu, vmcs12->host_rsp); 4265 kvm_rip_write(vcpu, vmcs12->host_rip); 4266 vmx_set_rflags(vcpu, X86_EFLAGS_FIXED); 4267 vmx_set_interrupt_shadow(vcpu, 0); 4268 4269 /* 4270 * Note that calling vmx_set_cr0 is important, even if cr0 hasn't 4271 * actually changed, because vmx_set_cr0 refers to efer set above. 4272 * 4273 * CR0_GUEST_HOST_MASK is already set in the original vmcs01 4274 * (KVM doesn't change it); 4275 */ 4276 vcpu->arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS; 4277 vmx_set_cr0(vcpu, vmcs12->host_cr0); 4278 4279 /* Same as above - no reason to call set_cr4_guest_host_mask(). */ 4280 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); 4281 vmx_set_cr4(vcpu, vmcs12->host_cr4); 4282 4283 nested_ept_uninit_mmu_context(vcpu); 4284 4285 /* 4286 * Only PDPTE load can fail as the value of cr3 was checked on entry and 4287 * couldn't have changed. 4288 */ 4289 if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, true, &ignored)) 4290 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL); 4291 4292 nested_vmx_transition_tlb_flush(vcpu, vmcs12, false); 4293 4294 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs); 4295 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp); 4296 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip); 4297 vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base); 4298 vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base); 4299 vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF); 4300 vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF); 4301 4302 /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */ 4303 if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS) 4304 vmcs_write64(GUEST_BNDCFGS, 0); 4305 4306 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) { 4307 vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat); 4308 vcpu->arch.pat = vmcs12->host_ia32_pat; 4309 } 4310 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) 4311 WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL, 4312 vmcs12->host_ia32_perf_global_ctrl)); 4313 4314 /* Set L1 segment info according to Intel SDM 4315 27.5.2 Loading Host Segment and Descriptor-Table Registers */ 4316 seg = (struct kvm_segment) { 4317 .base = 0, 4318 .limit = 0xFFFFFFFF, 4319 .selector = vmcs12->host_cs_selector, 4320 .type = 11, 4321 .present = 1, 4322 .s = 1, 4323 .g = 1 4324 }; 4325 if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) 4326 seg.l = 1; 4327 else 4328 seg.db = 1; 4329 __vmx_set_segment(vcpu, &seg, VCPU_SREG_CS); 4330 seg = (struct kvm_segment) { 4331 .base = 0, 4332 .limit = 0xFFFFFFFF, 4333 .type = 3, 4334 .present = 1, 4335 .s = 1, 4336 .db = 1, 4337 .g = 1 4338 }; 4339 seg.selector = vmcs12->host_ds_selector; 4340 __vmx_set_segment(vcpu, &seg, VCPU_SREG_DS); 4341 seg.selector = vmcs12->host_es_selector; 4342 __vmx_set_segment(vcpu, &seg, VCPU_SREG_ES); 4343 seg.selector = vmcs12->host_ss_selector; 4344 __vmx_set_segment(vcpu, &seg, VCPU_SREG_SS); 4345 seg.selector = vmcs12->host_fs_selector; 4346 seg.base = vmcs12->host_fs_base; 4347 __vmx_set_segment(vcpu, &seg, VCPU_SREG_FS); 4348 seg.selector = vmcs12->host_gs_selector; 4349 seg.base = vmcs12->host_gs_base; 4350 __vmx_set_segment(vcpu, &seg, VCPU_SREG_GS); 4351 seg = (struct kvm_segment) { 4352 .base = vmcs12->host_tr_base, 4353 .limit = 0x67, 4354 .selector = vmcs12->host_tr_selector, 4355 .type = 11, 4356 .present = 1 4357 }; 4358 __vmx_set_segment(vcpu, &seg, VCPU_SREG_TR); 4359 4360 memset(&seg, 0, sizeof(seg)); 4361 seg.unusable = 1; 4362 __vmx_set_segment(vcpu, &seg, VCPU_SREG_LDTR); 4363 4364 kvm_set_dr(vcpu, 7, 0x400); 4365 vmcs_write64(GUEST_IA32_DEBUGCTL, 0); 4366 4367 if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr, 4368 vmcs12->vm_exit_msr_load_count)) 4369 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL); 4370 4371 to_vmx(vcpu)->emulation_required = vmx_emulation_required(vcpu); 4372 } 4373 4374 static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx) 4375 { 4376 struct vmx_uret_msr *efer_msr; 4377 unsigned int i; 4378 4379 if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_EFER) 4380 return vmcs_read64(GUEST_IA32_EFER); 4381 4382 if (cpu_has_load_ia32_efer()) 4383 return host_efer; 4384 4385 for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) { 4386 if (vmx->msr_autoload.guest.val[i].index == MSR_EFER) 4387 return vmx->msr_autoload.guest.val[i].value; 4388 } 4389 4390 efer_msr = vmx_find_uret_msr(vmx, MSR_EFER); 4391 if (efer_msr) 4392 return efer_msr->data; 4393 4394 return host_efer; 4395 } 4396 4397 static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu) 4398 { 4399 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 4400 struct vcpu_vmx *vmx = to_vmx(vcpu); 4401 struct vmx_msr_entry g, h; 4402 gpa_t gpa; 4403 u32 i, j; 4404 4405 vcpu->arch.pat = vmcs_read64(GUEST_IA32_PAT); 4406 4407 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) { 4408 /* 4409 * L1's host DR7 is lost if KVM_GUESTDBG_USE_HW_BP is set 4410 * as vmcs01.GUEST_DR7 contains a userspace defined value 4411 * and vcpu->arch.dr7 is not squirreled away before the 4412 * nested VMENTER (not worth adding a variable in nested_vmx). 4413 */ 4414 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) 4415 kvm_set_dr(vcpu, 7, DR7_FIXED_1); 4416 else 4417 WARN_ON(kvm_set_dr(vcpu, 7, vmcs_readl(GUEST_DR7))); 4418 } 4419 4420 /* 4421 * Note that calling vmx_set_{efer,cr0,cr4} is important as they 4422 * handle a variety of side effects to KVM's software model. 4423 */ 4424 vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx)); 4425 4426 vcpu->arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS; 4427 vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW)); 4428 4429 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); 4430 vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW)); 4431 4432 nested_ept_uninit_mmu_context(vcpu); 4433 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); 4434 kvm_register_mark_available(vcpu, VCPU_EXREG_CR3); 4435 4436 /* 4437 * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs 4438 * from vmcs01 (if necessary). The PDPTRs are not loaded on 4439 * VMFail, like everything else we just need to ensure our 4440 * software model is up-to-date. 4441 */ 4442 if (enable_ept && is_pae_paging(vcpu)) 4443 ept_save_pdptrs(vcpu); 4444 4445 kvm_mmu_reset_context(vcpu); 4446 4447 /* 4448 * This nasty bit of open coding is a compromise between blindly 4449 * loading L1's MSRs using the exit load lists (incorrect emulation 4450 * of VMFail), leaving the nested VM's MSRs in the software model 4451 * (incorrect behavior) and snapshotting the modified MSRs (too 4452 * expensive since the lists are unbound by hardware). For each 4453 * MSR that was (prematurely) loaded from the nested VMEntry load 4454 * list, reload it from the exit load list if it exists and differs 4455 * from the guest value. The intent is to stuff host state as 4456 * silently as possible, not to fully process the exit load list. 4457 */ 4458 for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) { 4459 gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g)); 4460 if (kvm_vcpu_read_guest(vcpu, gpa, &g, sizeof(g))) { 4461 pr_debug_ratelimited( 4462 "%s read MSR index failed (%u, 0x%08llx)\n", 4463 __func__, i, gpa); 4464 goto vmabort; 4465 } 4466 4467 for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) { 4468 gpa = vmcs12->vm_exit_msr_load_addr + (j * sizeof(h)); 4469 if (kvm_vcpu_read_guest(vcpu, gpa, &h, sizeof(h))) { 4470 pr_debug_ratelimited( 4471 "%s read MSR failed (%u, 0x%08llx)\n", 4472 __func__, j, gpa); 4473 goto vmabort; 4474 } 4475 if (h.index != g.index) 4476 continue; 4477 if (h.value == g.value) 4478 break; 4479 4480 if (nested_vmx_load_msr_check(vcpu, &h)) { 4481 pr_debug_ratelimited( 4482 "%s check failed (%u, 0x%x, 0x%x)\n", 4483 __func__, j, h.index, h.reserved); 4484 goto vmabort; 4485 } 4486 4487 if (kvm_set_msr(vcpu, h.index, h.value)) { 4488 pr_debug_ratelimited( 4489 "%s WRMSR failed (%u, 0x%x, 0x%llx)\n", 4490 __func__, j, h.index, h.value); 4491 goto vmabort; 4492 } 4493 } 4494 } 4495 4496 return; 4497 4498 vmabort: 4499 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL); 4500 } 4501 4502 /* 4503 * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1 4504 * and modify vmcs12 to make it see what it would expect to see there if 4505 * L2 was its real guest. Must only be called when in L2 (is_guest_mode()) 4506 */ 4507 void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason, 4508 u32 exit_intr_info, unsigned long exit_qualification) 4509 { 4510 struct vcpu_vmx *vmx = to_vmx(vcpu); 4511 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 4512 4513 /* trying to cancel vmlaunch/vmresume is a bug */ 4514 WARN_ON_ONCE(vmx->nested.nested_run_pending); 4515 4516 /* Similarly, triple faults in L2 should never escape. */ 4517 WARN_ON_ONCE(kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)); 4518 4519 if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) { 4520 /* 4521 * KVM_REQ_GET_NESTED_STATE_PAGES is also used to map 4522 * Enlightened VMCS after migration and we still need to 4523 * do that when something is forcing L2->L1 exit prior to 4524 * the first L2 run. 4525 */ 4526 (void)nested_get_evmcs_page(vcpu); 4527 } 4528 4529 /* Service pending TLB flush requests for L2 before switching to L1. */ 4530 kvm_service_local_tlb_flush_requests(vcpu); 4531 4532 /* 4533 * VCPU_EXREG_PDPTR will be clobbered in arch/x86/kvm/vmx/vmx.h between 4534 * now and the new vmentry. Ensure that the VMCS02 PDPTR fields are 4535 * up-to-date before switching to L1. 4536 */ 4537 if (enable_ept && is_pae_paging(vcpu)) 4538 vmx_ept_load_pdptrs(vcpu); 4539 4540 leave_guest_mode(vcpu); 4541 4542 if (nested_cpu_has_preemption_timer(vmcs12)) 4543 hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer); 4544 4545 if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETTING)) { 4546 vcpu->arch.tsc_offset = vcpu->arch.l1_tsc_offset; 4547 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_TSC_SCALING)) 4548 vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio; 4549 } 4550 4551 if (likely(!vmx->fail)) { 4552 sync_vmcs02_to_vmcs12(vcpu, vmcs12); 4553 4554 if (vm_exit_reason != -1) 4555 prepare_vmcs12(vcpu, vmcs12, vm_exit_reason, 4556 exit_intr_info, exit_qualification); 4557 4558 /* 4559 * Must happen outside of sync_vmcs02_to_vmcs12() as it will 4560 * also be used to capture vmcs12 cache as part of 4561 * capturing nVMX state for snapshot (migration). 4562 * 4563 * Otherwise, this flush will dirty guest memory at a 4564 * point it is already assumed by user-space to be 4565 * immutable. 4566 */ 4567 nested_flush_cached_shadow_vmcs12(vcpu, vmcs12); 4568 } else { 4569 /* 4570 * The only expected VM-instruction error is "VM entry with 4571 * invalid control field(s)." Anything else indicates a 4572 * problem with L0. And we should never get here with a 4573 * VMFail of any type if early consistency checks are enabled. 4574 */ 4575 WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) != 4576 VMXERR_ENTRY_INVALID_CONTROL_FIELD); 4577 WARN_ON_ONCE(nested_early_check); 4578 } 4579 4580 vmx_switch_vmcs(vcpu, &vmx->vmcs01); 4581 4582 /* Update any VMCS fields that might have changed while L2 ran */ 4583 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); 4584 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); 4585 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); 4586 if (kvm_has_tsc_control) 4587 vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio); 4588 4589 if (vmx->nested.l1_tpr_threshold != -1) 4590 vmcs_write32(TPR_THRESHOLD, vmx->nested.l1_tpr_threshold); 4591 4592 if (vmx->nested.change_vmcs01_virtual_apic_mode) { 4593 vmx->nested.change_vmcs01_virtual_apic_mode = false; 4594 vmx_set_virtual_apic_mode(vcpu); 4595 } 4596 4597 if (vmx->nested.update_vmcs01_cpu_dirty_logging) { 4598 vmx->nested.update_vmcs01_cpu_dirty_logging = false; 4599 vmx_update_cpu_dirty_logging(vcpu); 4600 } 4601 4602 /* Unpin physical memory we referred to in vmcs02 */ 4603 if (vmx->nested.apic_access_page) { 4604 kvm_release_page_clean(vmx->nested.apic_access_page); 4605 vmx->nested.apic_access_page = NULL; 4606 } 4607 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true); 4608 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true); 4609 vmx->nested.pi_desc = NULL; 4610 4611 if (vmx->nested.reload_vmcs01_apic_access_page) { 4612 vmx->nested.reload_vmcs01_apic_access_page = false; 4613 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); 4614 } 4615 4616 if ((vm_exit_reason != -1) && 4617 (enable_shadow_vmcs || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))) 4618 vmx->nested.need_vmcs12_to_shadow_sync = true; 4619 4620 /* in case we halted in L2 */ 4621 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 4622 4623 if (likely(!vmx->fail)) { 4624 if ((u16)vm_exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT && 4625 nested_exit_intr_ack_set(vcpu)) { 4626 int irq = kvm_cpu_get_interrupt(vcpu); 4627 WARN_ON(irq < 0); 4628 vmcs12->vm_exit_intr_info = irq | 4629 INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR; 4630 } 4631 4632 if (vm_exit_reason != -1) 4633 trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason, 4634 vmcs12->exit_qualification, 4635 vmcs12->idt_vectoring_info_field, 4636 vmcs12->vm_exit_intr_info, 4637 vmcs12->vm_exit_intr_error_code, 4638 KVM_ISA_VMX); 4639 4640 load_vmcs12_host_state(vcpu, vmcs12); 4641 4642 return; 4643 } 4644 4645 /* 4646 * After an early L2 VM-entry failure, we're now back 4647 * in L1 which thinks it just finished a VMLAUNCH or 4648 * VMRESUME instruction, so we need to set the failure 4649 * flag and the VM-instruction error field of the VMCS 4650 * accordingly, and skip the emulated instruction. 4651 */ 4652 (void)nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); 4653 4654 /* 4655 * Restore L1's host state to KVM's software model. We're here 4656 * because a consistency check was caught by hardware, which 4657 * means some amount of guest state has been propagated to KVM's 4658 * model and needs to be unwound to the host's state. 4659 */ 4660 nested_vmx_restore_host_state(vcpu); 4661 4662 vmx->fail = 0; 4663 } 4664 4665 static void nested_vmx_triple_fault(struct kvm_vcpu *vcpu) 4666 { 4667 nested_vmx_vmexit(vcpu, EXIT_REASON_TRIPLE_FAULT, 0, 0); 4668 } 4669 4670 /* 4671 * Decode the memory-address operand of a vmx instruction, as recorded on an 4672 * exit caused by such an instruction (run by a guest hypervisor). 4673 * On success, returns 0. When the operand is invalid, returns 1 and throws 4674 * #UD, #GP, or #SS. 4675 */ 4676 int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification, 4677 u32 vmx_instruction_info, bool wr, int len, gva_t *ret) 4678 { 4679 gva_t off; 4680 bool exn; 4681 struct kvm_segment s; 4682 4683 /* 4684 * According to Vol. 3B, "Information for VM Exits Due to Instruction 4685 * Execution", on an exit, vmx_instruction_info holds most of the 4686 * addressing components of the operand. Only the displacement part 4687 * is put in exit_qualification (see 3B, "Basic VM-Exit Information"). 4688 * For how an actual address is calculated from all these components, 4689 * refer to Vol. 1, "Operand Addressing". 4690 */ 4691 int scaling = vmx_instruction_info & 3; 4692 int addr_size = (vmx_instruction_info >> 7) & 7; 4693 bool is_reg = vmx_instruction_info & (1u << 10); 4694 int seg_reg = (vmx_instruction_info >> 15) & 7; 4695 int index_reg = (vmx_instruction_info >> 18) & 0xf; 4696 bool index_is_valid = !(vmx_instruction_info & (1u << 22)); 4697 int base_reg = (vmx_instruction_info >> 23) & 0xf; 4698 bool base_is_valid = !(vmx_instruction_info & (1u << 27)); 4699 4700 if (is_reg) { 4701 kvm_queue_exception(vcpu, UD_VECTOR); 4702 return 1; 4703 } 4704 4705 /* Addr = segment_base + offset */ 4706 /* offset = base + [index * scale] + displacement */ 4707 off = exit_qualification; /* holds the displacement */ 4708 if (addr_size == 1) 4709 off = (gva_t)sign_extend64(off, 31); 4710 else if (addr_size == 0) 4711 off = (gva_t)sign_extend64(off, 15); 4712 if (base_is_valid) 4713 off += kvm_register_read(vcpu, base_reg); 4714 if (index_is_valid) 4715 off += kvm_register_read(vcpu, index_reg) << scaling; 4716 vmx_get_segment(vcpu, &s, seg_reg); 4717 4718 /* 4719 * The effective address, i.e. @off, of a memory operand is truncated 4720 * based on the address size of the instruction. Note that this is 4721 * the *effective address*, i.e. the address prior to accounting for 4722 * the segment's base. 4723 */ 4724 if (addr_size == 1) /* 32 bit */ 4725 off &= 0xffffffff; 4726 else if (addr_size == 0) /* 16 bit */ 4727 off &= 0xffff; 4728 4729 /* Checks for #GP/#SS exceptions. */ 4730 exn = false; 4731 if (is_long_mode(vcpu)) { 4732 /* 4733 * The virtual/linear address is never truncated in 64-bit 4734 * mode, e.g. a 32-bit address size can yield a 64-bit virtual 4735 * address when using FS/GS with a non-zero base. 4736 */ 4737 if (seg_reg == VCPU_SREG_FS || seg_reg == VCPU_SREG_GS) 4738 *ret = s.base + off; 4739 else 4740 *ret = off; 4741 4742 /* Long mode: #GP(0)/#SS(0) if the memory address is in a 4743 * non-canonical form. This is the only check on the memory 4744 * destination for long mode! 4745 */ 4746 exn = is_noncanonical_address(*ret, vcpu); 4747 } else { 4748 /* 4749 * When not in long mode, the virtual/linear address is 4750 * unconditionally truncated to 32 bits regardless of the 4751 * address size. 4752 */ 4753 *ret = (s.base + off) & 0xffffffff; 4754 4755 /* Protected mode: apply checks for segment validity in the 4756 * following order: 4757 * - segment type check (#GP(0) may be thrown) 4758 * - usability check (#GP(0)/#SS(0)) 4759 * - limit check (#GP(0)/#SS(0)) 4760 */ 4761 if (wr) 4762 /* #GP(0) if the destination operand is located in a 4763 * read-only data segment or any code segment. 4764 */ 4765 exn = ((s.type & 0xa) == 0 || (s.type & 8)); 4766 else 4767 /* #GP(0) if the source operand is located in an 4768 * execute-only code segment 4769 */ 4770 exn = ((s.type & 0xa) == 8); 4771 if (exn) { 4772 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); 4773 return 1; 4774 } 4775 /* Protected mode: #GP(0)/#SS(0) if the segment is unusable. 4776 */ 4777 exn = (s.unusable != 0); 4778 4779 /* 4780 * Protected mode: #GP(0)/#SS(0) if the memory operand is 4781 * outside the segment limit. All CPUs that support VMX ignore 4782 * limit checks for flat segments, i.e. segments with base==0, 4783 * limit==0xffffffff and of type expand-up data or code. 4784 */ 4785 if (!(s.base == 0 && s.limit == 0xffffffff && 4786 ((s.type & 8) || !(s.type & 4)))) 4787 exn = exn || ((u64)off + len - 1 > s.limit); 4788 } 4789 if (exn) { 4790 kvm_queue_exception_e(vcpu, 4791 seg_reg == VCPU_SREG_SS ? 4792 SS_VECTOR : GP_VECTOR, 4793 0); 4794 return 1; 4795 } 4796 4797 return 0; 4798 } 4799 4800 void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu) 4801 { 4802 struct vcpu_vmx *vmx; 4803 4804 if (!nested_vmx_allowed(vcpu)) 4805 return; 4806 4807 vmx = to_vmx(vcpu); 4808 if (kvm_x86_ops.pmu_ops->is_valid_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL)) { 4809 vmx->nested.msrs.entry_ctls_high |= 4810 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL; 4811 vmx->nested.msrs.exit_ctls_high |= 4812 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL; 4813 } else { 4814 vmx->nested.msrs.entry_ctls_high &= 4815 ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL; 4816 vmx->nested.msrs.exit_ctls_high &= 4817 ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL; 4818 } 4819 } 4820 4821 static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer, 4822 int *ret) 4823 { 4824 gva_t gva; 4825 struct x86_exception e; 4826 int r; 4827 4828 if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu), 4829 vmcs_read32(VMX_INSTRUCTION_INFO), false, 4830 sizeof(*vmpointer), &gva)) { 4831 *ret = 1; 4832 return -EINVAL; 4833 } 4834 4835 r = kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e); 4836 if (r != X86EMUL_CONTINUE) { 4837 *ret = kvm_handle_memory_failure(vcpu, r, &e); 4838 return -EINVAL; 4839 } 4840 4841 return 0; 4842 } 4843 4844 /* 4845 * Allocate a shadow VMCS and associate it with the currently loaded 4846 * VMCS, unless such a shadow VMCS already exists. The newly allocated 4847 * VMCS is also VMCLEARed, so that it is ready for use. 4848 */ 4849 static struct vmcs *alloc_shadow_vmcs(struct kvm_vcpu *vcpu) 4850 { 4851 struct vcpu_vmx *vmx = to_vmx(vcpu); 4852 struct loaded_vmcs *loaded_vmcs = vmx->loaded_vmcs; 4853 4854 /* 4855 * KVM allocates a shadow VMCS only when L1 executes VMXON and frees it 4856 * when L1 executes VMXOFF or the vCPU is forced out of nested 4857 * operation. VMXON faults if the CPU is already post-VMXON, so it 4858 * should be impossible to already have an allocated shadow VMCS. KVM 4859 * doesn't support virtualization of VMCS shadowing, so vmcs01 should 4860 * always be the loaded VMCS. 4861 */ 4862 if (WARN_ON(loaded_vmcs != &vmx->vmcs01 || loaded_vmcs->shadow_vmcs)) 4863 return loaded_vmcs->shadow_vmcs; 4864 4865 loaded_vmcs->shadow_vmcs = alloc_vmcs(true); 4866 if (loaded_vmcs->shadow_vmcs) 4867 vmcs_clear(loaded_vmcs->shadow_vmcs); 4868 4869 return loaded_vmcs->shadow_vmcs; 4870 } 4871 4872 static int enter_vmx_operation(struct kvm_vcpu *vcpu) 4873 { 4874 struct vcpu_vmx *vmx = to_vmx(vcpu); 4875 int r; 4876 4877 r = alloc_loaded_vmcs(&vmx->nested.vmcs02); 4878 if (r < 0) 4879 goto out_vmcs02; 4880 4881 vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT); 4882 if (!vmx->nested.cached_vmcs12) 4883 goto out_cached_vmcs12; 4884 4885 vmx->nested.shadow_vmcs12_cache.gpa = INVALID_GPA; 4886 vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT); 4887 if (!vmx->nested.cached_shadow_vmcs12) 4888 goto out_cached_shadow_vmcs12; 4889 4890 if (enable_shadow_vmcs && !alloc_shadow_vmcs(vcpu)) 4891 goto out_shadow_vmcs; 4892 4893 hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC, 4894 HRTIMER_MODE_ABS_PINNED); 4895 vmx->nested.preemption_timer.function = vmx_preemption_timer_fn; 4896 4897 vmx->nested.vpid02 = allocate_vpid(); 4898 4899 vmx->nested.vmcs02_initialized = false; 4900 vmx->nested.vmxon = true; 4901 4902 if (vmx_pt_mode_is_host_guest()) { 4903 vmx->pt_desc.guest.ctl = 0; 4904 pt_update_intercept_for_msr(vcpu); 4905 } 4906 4907 return 0; 4908 4909 out_shadow_vmcs: 4910 kfree(vmx->nested.cached_shadow_vmcs12); 4911 4912 out_cached_shadow_vmcs12: 4913 kfree(vmx->nested.cached_vmcs12); 4914 4915 out_cached_vmcs12: 4916 free_loaded_vmcs(&vmx->nested.vmcs02); 4917 4918 out_vmcs02: 4919 return -ENOMEM; 4920 } 4921 4922 /* Emulate the VMXON instruction. */ 4923 static int handle_vmon(struct kvm_vcpu *vcpu) 4924 { 4925 int ret; 4926 gpa_t vmptr; 4927 uint32_t revision; 4928 struct vcpu_vmx *vmx = to_vmx(vcpu); 4929 const u64 VMXON_NEEDED_FEATURES = FEAT_CTL_LOCKED 4930 | FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX; 4931 4932 /* 4933 * The Intel VMX Instruction Reference lists a bunch of bits that are 4934 * prerequisite to running VMXON, most notably cr4.VMXE must be set to 4935 * 1 (see vmx_is_valid_cr4() for when we allow the guest to set this). 4936 * Otherwise, we should fail with #UD. But most faulting conditions 4937 * have already been checked by hardware, prior to the VM-exit for 4938 * VMXON. We do test guest cr4.VMXE because processor CR4 always has 4939 * that bit set to 1 in non-root mode. 4940 */ 4941 if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) { 4942 kvm_queue_exception(vcpu, UD_VECTOR); 4943 return 1; 4944 } 4945 4946 /* CPL=0 must be checked manually. */ 4947 if (vmx_get_cpl(vcpu)) { 4948 kvm_inject_gp(vcpu, 0); 4949 return 1; 4950 } 4951 4952 if (vmx->nested.vmxon) 4953 return nested_vmx_fail(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION); 4954 4955 if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES) 4956 != VMXON_NEEDED_FEATURES) { 4957 kvm_inject_gp(vcpu, 0); 4958 return 1; 4959 } 4960 4961 if (nested_vmx_get_vmptr(vcpu, &vmptr, &ret)) 4962 return ret; 4963 4964 /* 4965 * SDM 3: 24.11.5 4966 * The first 4 bytes of VMXON region contain the supported 4967 * VMCS revision identifier 4968 * 4969 * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case; 4970 * which replaces physical address width with 32 4971 */ 4972 if (!page_address_valid(vcpu, vmptr)) 4973 return nested_vmx_failInvalid(vcpu); 4974 4975 if (kvm_read_guest(vcpu->kvm, vmptr, &revision, sizeof(revision)) || 4976 revision != VMCS12_REVISION) 4977 return nested_vmx_failInvalid(vcpu); 4978 4979 vmx->nested.vmxon_ptr = vmptr; 4980 ret = enter_vmx_operation(vcpu); 4981 if (ret) 4982 return ret; 4983 4984 return nested_vmx_succeed(vcpu); 4985 } 4986 4987 static inline void nested_release_vmcs12(struct kvm_vcpu *vcpu) 4988 { 4989 struct vcpu_vmx *vmx = to_vmx(vcpu); 4990 4991 if (vmx->nested.current_vmptr == INVALID_GPA) 4992 return; 4993 4994 copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu)); 4995 4996 if (enable_shadow_vmcs) { 4997 /* copy to memory all shadowed fields in case 4998 they were modified */ 4999 copy_shadow_to_vmcs12(vmx); 5000 vmx_disable_shadow_vmcs(vmx); 5001 } 5002 vmx->nested.posted_intr_nv = -1; 5003 5004 /* Flush VMCS12 to guest memory */ 5005 kvm_vcpu_write_guest_page(vcpu, 5006 vmx->nested.current_vmptr >> PAGE_SHIFT, 5007 vmx->nested.cached_vmcs12, 0, VMCS12_SIZE); 5008 5009 kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); 5010 5011 vmx->nested.current_vmptr = INVALID_GPA; 5012 } 5013 5014 /* Emulate the VMXOFF instruction */ 5015 static int handle_vmoff(struct kvm_vcpu *vcpu) 5016 { 5017 if (!nested_vmx_check_permission(vcpu)) 5018 return 1; 5019 5020 free_nested(vcpu); 5021 5022 /* Process a latched INIT during time CPU was in VMX operation */ 5023 kvm_make_request(KVM_REQ_EVENT, vcpu); 5024 5025 return nested_vmx_succeed(vcpu); 5026 } 5027 5028 /* Emulate the VMCLEAR instruction */ 5029 static int handle_vmclear(struct kvm_vcpu *vcpu) 5030 { 5031 struct vcpu_vmx *vmx = to_vmx(vcpu); 5032 u32 zero = 0; 5033 gpa_t vmptr; 5034 u64 evmcs_gpa; 5035 int r; 5036 5037 if (!nested_vmx_check_permission(vcpu)) 5038 return 1; 5039 5040 if (nested_vmx_get_vmptr(vcpu, &vmptr, &r)) 5041 return r; 5042 5043 if (!page_address_valid(vcpu, vmptr)) 5044 return nested_vmx_fail(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS); 5045 5046 if (vmptr == vmx->nested.vmxon_ptr) 5047 return nested_vmx_fail(vcpu, VMXERR_VMCLEAR_VMXON_POINTER); 5048 5049 /* 5050 * When Enlightened VMEntry is enabled on the calling CPU we treat 5051 * memory area pointer by vmptr as Enlightened VMCS (as there's no good 5052 * way to distinguish it from VMCS12) and we must not corrupt it by 5053 * writing to the non-existent 'launch_state' field. The area doesn't 5054 * have to be the currently active EVMCS on the calling CPU and there's 5055 * nothing KVM has to do to transition it from 'active' to 'non-active' 5056 * state. It is possible that the area will stay mapped as 5057 * vmx->nested.hv_evmcs but this shouldn't be a problem. 5058 */ 5059 if (likely(!vmx->nested.enlightened_vmcs_enabled || 5060 !nested_enlightened_vmentry(vcpu, &evmcs_gpa))) { 5061 if (vmptr == vmx->nested.current_vmptr) 5062 nested_release_vmcs12(vcpu); 5063 5064 kvm_vcpu_write_guest(vcpu, 5065 vmptr + offsetof(struct vmcs12, 5066 launch_state), 5067 &zero, sizeof(zero)); 5068 } else if (vmx->nested.hv_evmcs && vmptr == vmx->nested.hv_evmcs_vmptr) { 5069 nested_release_evmcs(vcpu); 5070 } 5071 5072 return nested_vmx_succeed(vcpu); 5073 } 5074 5075 /* Emulate the VMLAUNCH instruction */ 5076 static int handle_vmlaunch(struct kvm_vcpu *vcpu) 5077 { 5078 return nested_vmx_run(vcpu, true); 5079 } 5080 5081 /* Emulate the VMRESUME instruction */ 5082 static int handle_vmresume(struct kvm_vcpu *vcpu) 5083 { 5084 5085 return nested_vmx_run(vcpu, false); 5086 } 5087 5088 static int handle_vmread(struct kvm_vcpu *vcpu) 5089 { 5090 struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu) 5091 : get_vmcs12(vcpu); 5092 unsigned long exit_qualification = vmx_get_exit_qual(vcpu); 5093 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO); 5094 struct vcpu_vmx *vmx = to_vmx(vcpu); 5095 struct x86_exception e; 5096 unsigned long field; 5097 u64 value; 5098 gva_t gva = 0; 5099 short offset; 5100 int len, r; 5101 5102 if (!nested_vmx_check_permission(vcpu)) 5103 return 1; 5104 5105 /* Decode instruction info and find the field to read */ 5106 field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf)); 5107 5108 if (!evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) { 5109 /* 5110 * In VMX non-root operation, when the VMCS-link pointer is INVALID_GPA, 5111 * any VMREAD sets the ALU flags for VMfailInvalid. 5112 */ 5113 if (vmx->nested.current_vmptr == INVALID_GPA || 5114 (is_guest_mode(vcpu) && 5115 get_vmcs12(vcpu)->vmcs_link_pointer == INVALID_GPA)) 5116 return nested_vmx_failInvalid(vcpu); 5117 5118 offset = get_vmcs12_field_offset(field); 5119 if (offset < 0) 5120 return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); 5121 5122 if (!is_guest_mode(vcpu) && is_vmcs12_ext_field(field)) 5123 copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12); 5124 5125 /* Read the field, zero-extended to a u64 value */ 5126 value = vmcs12_read_any(vmcs12, field, offset); 5127 } else { 5128 /* 5129 * Hyper-V TLFS (as of 6.0b) explicitly states, that while an 5130 * enlightened VMCS is active VMREAD/VMWRITE instructions are 5131 * unsupported. Unfortunately, certain versions of Windows 11 5132 * don't comply with this requirement which is not enforced in 5133 * genuine Hyper-V. Allow VMREAD from an enlightened VMCS as a 5134 * workaround, as misbehaving guests will panic on VM-Fail. 5135 * Note, enlightened VMCS is incompatible with shadow VMCS so 5136 * all VMREADs from L2 should go to L1. 5137 */ 5138 if (WARN_ON_ONCE(is_guest_mode(vcpu))) 5139 return nested_vmx_failInvalid(vcpu); 5140 5141 offset = evmcs_field_offset(field, NULL); 5142 if (offset < 0) 5143 return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); 5144 5145 /* Read the field, zero-extended to a u64 value */ 5146 value = evmcs_read_any(vmx->nested.hv_evmcs, field, offset); 5147 } 5148 5149 /* 5150 * Now copy part of this value to register or memory, as requested. 5151 * Note that the number of bits actually copied is 32 or 64 depending 5152 * on the guest's mode (32 or 64 bit), not on the given field's length. 5153 */ 5154 if (instr_info & BIT(10)) { 5155 kvm_register_write(vcpu, (((instr_info) >> 3) & 0xf), value); 5156 } else { 5157 len = is_64_bit_mode(vcpu) ? 8 : 4; 5158 if (get_vmx_mem_address(vcpu, exit_qualification, 5159 instr_info, true, len, &gva)) 5160 return 1; 5161 /* _system ok, nested_vmx_check_permission has verified cpl=0 */ 5162 r = kvm_write_guest_virt_system(vcpu, gva, &value, len, &e); 5163 if (r != X86EMUL_CONTINUE) 5164 return kvm_handle_memory_failure(vcpu, r, &e); 5165 } 5166 5167 return nested_vmx_succeed(vcpu); 5168 } 5169 5170 static bool is_shadow_field_rw(unsigned long field) 5171 { 5172 switch (field) { 5173 #define SHADOW_FIELD_RW(x, y) case x: 5174 #include "vmcs_shadow_fields.h" 5175 return true; 5176 default: 5177 break; 5178 } 5179 return false; 5180 } 5181 5182 static bool is_shadow_field_ro(unsigned long field) 5183 { 5184 switch (field) { 5185 #define SHADOW_FIELD_RO(x, y) case x: 5186 #include "vmcs_shadow_fields.h" 5187 return true; 5188 default: 5189 break; 5190 } 5191 return false; 5192 } 5193 5194 static int handle_vmwrite(struct kvm_vcpu *vcpu) 5195 { 5196 struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu) 5197 : get_vmcs12(vcpu); 5198 unsigned long exit_qualification = vmx_get_exit_qual(vcpu); 5199 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO); 5200 struct vcpu_vmx *vmx = to_vmx(vcpu); 5201 struct x86_exception e; 5202 unsigned long field; 5203 short offset; 5204 gva_t gva; 5205 int len, r; 5206 5207 /* 5208 * The value to write might be 32 or 64 bits, depending on L1's long 5209 * mode, and eventually we need to write that into a field of several 5210 * possible lengths. The code below first zero-extends the value to 64 5211 * bit (value), and then copies only the appropriate number of 5212 * bits into the vmcs12 field. 5213 */ 5214 u64 value = 0; 5215 5216 if (!nested_vmx_check_permission(vcpu)) 5217 return 1; 5218 5219 /* 5220 * In VMX non-root operation, when the VMCS-link pointer is INVALID_GPA, 5221 * any VMWRITE sets the ALU flags for VMfailInvalid. 5222 */ 5223 if (vmx->nested.current_vmptr == INVALID_GPA || 5224 (is_guest_mode(vcpu) && 5225 get_vmcs12(vcpu)->vmcs_link_pointer == INVALID_GPA)) 5226 return nested_vmx_failInvalid(vcpu); 5227 5228 if (instr_info & BIT(10)) 5229 value = kvm_register_read(vcpu, (((instr_info) >> 3) & 0xf)); 5230 else { 5231 len = is_64_bit_mode(vcpu) ? 8 : 4; 5232 if (get_vmx_mem_address(vcpu, exit_qualification, 5233 instr_info, false, len, &gva)) 5234 return 1; 5235 r = kvm_read_guest_virt(vcpu, gva, &value, len, &e); 5236 if (r != X86EMUL_CONTINUE) 5237 return kvm_handle_memory_failure(vcpu, r, &e); 5238 } 5239 5240 field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf)); 5241 5242 offset = get_vmcs12_field_offset(field); 5243 if (offset < 0) 5244 return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); 5245 5246 /* 5247 * If the vCPU supports "VMWRITE to any supported field in the 5248 * VMCS," then the "read-only" fields are actually read/write. 5249 */ 5250 if (vmcs_field_readonly(field) && 5251 !nested_cpu_has_vmwrite_any_field(vcpu)) 5252 return nested_vmx_fail(vcpu, VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT); 5253 5254 /* 5255 * Ensure vmcs12 is up-to-date before any VMWRITE that dirties 5256 * vmcs12, else we may crush a field or consume a stale value. 5257 */ 5258 if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field)) 5259 copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12); 5260 5261 /* 5262 * Some Intel CPUs intentionally drop the reserved bits of the AR byte 5263 * fields on VMWRITE. Emulate this behavior to ensure consistent KVM 5264 * behavior regardless of the underlying hardware, e.g. if an AR_BYTE 5265 * field is intercepted for VMWRITE but not VMREAD (in L1), then VMREAD 5266 * from L1 will return a different value than VMREAD from L2 (L1 sees 5267 * the stripped down value, L2 sees the full value as stored by KVM). 5268 */ 5269 if (field >= GUEST_ES_AR_BYTES && field <= GUEST_TR_AR_BYTES) 5270 value &= 0x1f0ff; 5271 5272 vmcs12_write_any(vmcs12, field, offset, value); 5273 5274 /* 5275 * Do not track vmcs12 dirty-state if in guest-mode as we actually 5276 * dirty shadow vmcs12 instead of vmcs12. Fields that can be updated 5277 * by L1 without a vmexit are always updated in the vmcs02, i.e. don't 5278 * "dirty" vmcs12, all others go down the prepare_vmcs02() slow path. 5279 */ 5280 if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field)) { 5281 /* 5282 * L1 can read these fields without exiting, ensure the 5283 * shadow VMCS is up-to-date. 5284 */ 5285 if (enable_shadow_vmcs && is_shadow_field_ro(field)) { 5286 preempt_disable(); 5287 vmcs_load(vmx->vmcs01.shadow_vmcs); 5288 5289 __vmcs_writel(field, value); 5290 5291 vmcs_clear(vmx->vmcs01.shadow_vmcs); 5292 vmcs_load(vmx->loaded_vmcs->vmcs); 5293 preempt_enable(); 5294 } 5295 vmx->nested.dirty_vmcs12 = true; 5296 } 5297 5298 return nested_vmx_succeed(vcpu); 5299 } 5300 5301 static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr) 5302 { 5303 vmx->nested.current_vmptr = vmptr; 5304 if (enable_shadow_vmcs) { 5305 secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_SHADOW_VMCS); 5306 vmcs_write64(VMCS_LINK_POINTER, 5307 __pa(vmx->vmcs01.shadow_vmcs)); 5308 vmx->nested.need_vmcs12_to_shadow_sync = true; 5309 } 5310 vmx->nested.dirty_vmcs12 = true; 5311 vmx->nested.force_msr_bitmap_recalc = true; 5312 } 5313 5314 /* Emulate the VMPTRLD instruction */ 5315 static int handle_vmptrld(struct kvm_vcpu *vcpu) 5316 { 5317 struct vcpu_vmx *vmx = to_vmx(vcpu); 5318 gpa_t vmptr; 5319 int r; 5320 5321 if (!nested_vmx_check_permission(vcpu)) 5322 return 1; 5323 5324 if (nested_vmx_get_vmptr(vcpu, &vmptr, &r)) 5325 return r; 5326 5327 if (!page_address_valid(vcpu, vmptr)) 5328 return nested_vmx_fail(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS); 5329 5330 if (vmptr == vmx->nested.vmxon_ptr) 5331 return nested_vmx_fail(vcpu, VMXERR_VMPTRLD_VMXON_POINTER); 5332 5333 /* Forbid normal VMPTRLD if Enlightened version was used */ 5334 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) 5335 return 1; 5336 5337 if (vmx->nested.current_vmptr != vmptr) { 5338 struct gfn_to_hva_cache *ghc = &vmx->nested.vmcs12_cache; 5339 struct vmcs_hdr hdr; 5340 5341 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, vmptr, VMCS12_SIZE)) { 5342 /* 5343 * Reads from an unbacked page return all 1s, 5344 * which means that the 32 bits located at the 5345 * given physical address won't match the required 5346 * VMCS12_REVISION identifier. 5347 */ 5348 return nested_vmx_fail(vcpu, 5349 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); 5350 } 5351 5352 if (kvm_read_guest_offset_cached(vcpu->kvm, ghc, &hdr, 5353 offsetof(struct vmcs12, hdr), 5354 sizeof(hdr))) { 5355 return nested_vmx_fail(vcpu, 5356 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); 5357 } 5358 5359 if (hdr.revision_id != VMCS12_REVISION || 5360 (hdr.shadow_vmcs && 5361 !nested_cpu_has_vmx_shadow_vmcs(vcpu))) { 5362 return nested_vmx_fail(vcpu, 5363 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); 5364 } 5365 5366 nested_release_vmcs12(vcpu); 5367 5368 /* 5369 * Load VMCS12 from guest memory since it is not already 5370 * cached. 5371 */ 5372 if (kvm_read_guest_cached(vcpu->kvm, ghc, vmx->nested.cached_vmcs12, 5373 VMCS12_SIZE)) { 5374 return nested_vmx_fail(vcpu, 5375 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); 5376 } 5377 5378 set_current_vmptr(vmx, vmptr); 5379 } 5380 5381 return nested_vmx_succeed(vcpu); 5382 } 5383 5384 /* Emulate the VMPTRST instruction */ 5385 static int handle_vmptrst(struct kvm_vcpu *vcpu) 5386 { 5387 unsigned long exit_qual = vmx_get_exit_qual(vcpu); 5388 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO); 5389 gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr; 5390 struct x86_exception e; 5391 gva_t gva; 5392 int r; 5393 5394 if (!nested_vmx_check_permission(vcpu)) 5395 return 1; 5396 5397 if (unlikely(evmptr_is_valid(to_vmx(vcpu)->nested.hv_evmcs_vmptr))) 5398 return 1; 5399 5400 if (get_vmx_mem_address(vcpu, exit_qual, instr_info, 5401 true, sizeof(gpa_t), &gva)) 5402 return 1; 5403 /* *_system ok, nested_vmx_check_permission has verified cpl=0 */ 5404 r = kvm_write_guest_virt_system(vcpu, gva, (void *)¤t_vmptr, 5405 sizeof(gpa_t), &e); 5406 if (r != X86EMUL_CONTINUE) 5407 return kvm_handle_memory_failure(vcpu, r, &e); 5408 5409 return nested_vmx_succeed(vcpu); 5410 } 5411 5412 /* Emulate the INVEPT instruction */ 5413 static int handle_invept(struct kvm_vcpu *vcpu) 5414 { 5415 struct vcpu_vmx *vmx = to_vmx(vcpu); 5416 u32 vmx_instruction_info, types; 5417 unsigned long type, roots_to_free; 5418 struct kvm_mmu *mmu; 5419 gva_t gva; 5420 struct x86_exception e; 5421 struct { 5422 u64 eptp, gpa; 5423 } operand; 5424 int i, r, gpr_index; 5425 5426 if (!(vmx->nested.msrs.secondary_ctls_high & 5427 SECONDARY_EXEC_ENABLE_EPT) || 5428 !(vmx->nested.msrs.ept_caps & VMX_EPT_INVEPT_BIT)) { 5429 kvm_queue_exception(vcpu, UD_VECTOR); 5430 return 1; 5431 } 5432 5433 if (!nested_vmx_check_permission(vcpu)) 5434 return 1; 5435 5436 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 5437 gpr_index = vmx_get_instr_info_reg2(vmx_instruction_info); 5438 type = kvm_register_read(vcpu, gpr_index); 5439 5440 types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; 5441 5442 if (type >= 32 || !(types & (1 << type))) 5443 return nested_vmx_fail(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 5444 5445 /* According to the Intel VMX instruction reference, the memory 5446 * operand is read even if it isn't needed (e.g., for type==global) 5447 */ 5448 if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu), 5449 vmx_instruction_info, false, sizeof(operand), &gva)) 5450 return 1; 5451 r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e); 5452 if (r != X86EMUL_CONTINUE) 5453 return kvm_handle_memory_failure(vcpu, r, &e); 5454 5455 /* 5456 * Nested EPT roots are always held through guest_mmu, 5457 * not root_mmu. 5458 */ 5459 mmu = &vcpu->arch.guest_mmu; 5460 5461 switch (type) { 5462 case VMX_EPT_EXTENT_CONTEXT: 5463 if (!nested_vmx_check_eptp(vcpu, operand.eptp)) 5464 return nested_vmx_fail(vcpu, 5465 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 5466 5467 roots_to_free = 0; 5468 if (nested_ept_root_matches(mmu->root_hpa, mmu->root_pgd, 5469 operand.eptp)) 5470 roots_to_free |= KVM_MMU_ROOT_CURRENT; 5471 5472 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { 5473 if (nested_ept_root_matches(mmu->prev_roots[i].hpa, 5474 mmu->prev_roots[i].pgd, 5475 operand.eptp)) 5476 roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i); 5477 } 5478 break; 5479 case VMX_EPT_EXTENT_GLOBAL: 5480 roots_to_free = KVM_MMU_ROOTS_ALL; 5481 break; 5482 default: 5483 BUG(); 5484 break; 5485 } 5486 5487 if (roots_to_free) 5488 kvm_mmu_free_roots(vcpu, mmu, roots_to_free); 5489 5490 return nested_vmx_succeed(vcpu); 5491 } 5492 5493 static int handle_invvpid(struct kvm_vcpu *vcpu) 5494 { 5495 struct vcpu_vmx *vmx = to_vmx(vcpu); 5496 u32 vmx_instruction_info; 5497 unsigned long type, types; 5498 gva_t gva; 5499 struct x86_exception e; 5500 struct { 5501 u64 vpid; 5502 u64 gla; 5503 } operand; 5504 u16 vpid02; 5505 int r, gpr_index; 5506 5507 if (!(vmx->nested.msrs.secondary_ctls_high & 5508 SECONDARY_EXEC_ENABLE_VPID) || 5509 !(vmx->nested.msrs.vpid_caps & VMX_VPID_INVVPID_BIT)) { 5510 kvm_queue_exception(vcpu, UD_VECTOR); 5511 return 1; 5512 } 5513 5514 if (!nested_vmx_check_permission(vcpu)) 5515 return 1; 5516 5517 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 5518 gpr_index = vmx_get_instr_info_reg2(vmx_instruction_info); 5519 type = kvm_register_read(vcpu, gpr_index); 5520 5521 types = (vmx->nested.msrs.vpid_caps & 5522 VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8; 5523 5524 if (type >= 32 || !(types & (1 << type))) 5525 return nested_vmx_fail(vcpu, 5526 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 5527 5528 /* according to the intel vmx instruction reference, the memory 5529 * operand is read even if it isn't needed (e.g., for type==global) 5530 */ 5531 if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu), 5532 vmx_instruction_info, false, sizeof(operand), &gva)) 5533 return 1; 5534 r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e); 5535 if (r != X86EMUL_CONTINUE) 5536 return kvm_handle_memory_failure(vcpu, r, &e); 5537 5538 if (operand.vpid >> 16) 5539 return nested_vmx_fail(vcpu, 5540 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 5541 5542 vpid02 = nested_get_vpid02(vcpu); 5543 switch (type) { 5544 case VMX_VPID_EXTENT_INDIVIDUAL_ADDR: 5545 if (!operand.vpid || 5546 is_noncanonical_address(operand.gla, vcpu)) 5547 return nested_vmx_fail(vcpu, 5548 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 5549 vpid_sync_vcpu_addr(vpid02, operand.gla); 5550 break; 5551 case VMX_VPID_EXTENT_SINGLE_CONTEXT: 5552 case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL: 5553 if (!operand.vpid) 5554 return nested_vmx_fail(vcpu, 5555 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 5556 vpid_sync_context(vpid02); 5557 break; 5558 case VMX_VPID_EXTENT_ALL_CONTEXT: 5559 vpid_sync_context(vpid02); 5560 break; 5561 default: 5562 WARN_ON_ONCE(1); 5563 return kvm_skip_emulated_instruction(vcpu); 5564 } 5565 5566 /* 5567 * Sync the shadow page tables if EPT is disabled, L1 is invalidating 5568 * linear mappings for L2 (tagged with L2's VPID). Free all guest 5569 * roots as VPIDs are not tracked in the MMU role. 5570 * 5571 * Note, this operates on root_mmu, not guest_mmu, as L1 and L2 share 5572 * an MMU when EPT is disabled. 5573 * 5574 * TODO: sync only the affected SPTEs for INVDIVIDUAL_ADDR. 5575 */ 5576 if (!enable_ept) 5577 kvm_mmu_free_guest_mode_roots(vcpu, &vcpu->arch.root_mmu); 5578 5579 return nested_vmx_succeed(vcpu); 5580 } 5581 5582 static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu, 5583 struct vmcs12 *vmcs12) 5584 { 5585 u32 index = kvm_rcx_read(vcpu); 5586 u64 new_eptp; 5587 5588 if (WARN_ON_ONCE(!nested_cpu_has_ept(vmcs12))) 5589 return 1; 5590 if (index >= VMFUNC_EPTP_ENTRIES) 5591 return 1; 5592 5593 if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT, 5594 &new_eptp, index * 8, 8)) 5595 return 1; 5596 5597 /* 5598 * If the (L2) guest does a vmfunc to the currently 5599 * active ept pointer, we don't have to do anything else 5600 */ 5601 if (vmcs12->ept_pointer != new_eptp) { 5602 if (!nested_vmx_check_eptp(vcpu, new_eptp)) 5603 return 1; 5604 5605 vmcs12->ept_pointer = new_eptp; 5606 nested_ept_new_eptp(vcpu); 5607 5608 if (!nested_cpu_has_vpid(vmcs12)) 5609 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 5610 } 5611 5612 return 0; 5613 } 5614 5615 static int handle_vmfunc(struct kvm_vcpu *vcpu) 5616 { 5617 struct vcpu_vmx *vmx = to_vmx(vcpu); 5618 struct vmcs12 *vmcs12; 5619 u32 function = kvm_rax_read(vcpu); 5620 5621 /* 5622 * VMFUNC is only supported for nested guests, but we always enable the 5623 * secondary control for simplicity; for non-nested mode, fake that we 5624 * didn't by injecting #UD. 5625 */ 5626 if (!is_guest_mode(vcpu)) { 5627 kvm_queue_exception(vcpu, UD_VECTOR); 5628 return 1; 5629 } 5630 5631 vmcs12 = get_vmcs12(vcpu); 5632 5633 /* 5634 * #UD on out-of-bounds function has priority over VM-Exit, and VMFUNC 5635 * is enabled in vmcs02 if and only if it's enabled in vmcs12. 5636 */ 5637 if (WARN_ON_ONCE((function > 63) || !nested_cpu_has_vmfunc(vmcs12))) { 5638 kvm_queue_exception(vcpu, UD_VECTOR); 5639 return 1; 5640 } 5641 5642 if (!(vmcs12->vm_function_control & BIT_ULL(function))) 5643 goto fail; 5644 5645 switch (function) { 5646 case 0: 5647 if (nested_vmx_eptp_switching(vcpu, vmcs12)) 5648 goto fail; 5649 break; 5650 default: 5651 goto fail; 5652 } 5653 return kvm_skip_emulated_instruction(vcpu); 5654 5655 fail: 5656 /* 5657 * This is effectively a reflected VM-Exit, as opposed to a synthesized 5658 * nested VM-Exit. Pass the original exit reason, i.e. don't hardcode 5659 * EXIT_REASON_VMFUNC as the exit reason. 5660 */ 5661 nested_vmx_vmexit(vcpu, vmx->exit_reason.full, 5662 vmx_get_intr_info(vcpu), 5663 vmx_get_exit_qual(vcpu)); 5664 return 1; 5665 } 5666 5667 /* 5668 * Return true if an IO instruction with the specified port and size should cause 5669 * a VM-exit into L1. 5670 */ 5671 bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port, 5672 int size) 5673 { 5674 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 5675 gpa_t bitmap, last_bitmap; 5676 u8 b; 5677 5678 last_bitmap = INVALID_GPA; 5679 b = -1; 5680 5681 while (size > 0) { 5682 if (port < 0x8000) 5683 bitmap = vmcs12->io_bitmap_a; 5684 else if (port < 0x10000) 5685 bitmap = vmcs12->io_bitmap_b; 5686 else 5687 return true; 5688 bitmap += (port & 0x7fff) / 8; 5689 5690 if (last_bitmap != bitmap) 5691 if (kvm_vcpu_read_guest(vcpu, bitmap, &b, 1)) 5692 return true; 5693 if (b & (1 << (port & 7))) 5694 return true; 5695 5696 port++; 5697 size--; 5698 last_bitmap = bitmap; 5699 } 5700 5701 return false; 5702 } 5703 5704 static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu, 5705 struct vmcs12 *vmcs12) 5706 { 5707 unsigned long exit_qualification; 5708 unsigned short port; 5709 int size; 5710 5711 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) 5712 return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING); 5713 5714 exit_qualification = vmx_get_exit_qual(vcpu); 5715 5716 port = exit_qualification >> 16; 5717 size = (exit_qualification & 7) + 1; 5718 5719 return nested_vmx_check_io_bitmaps(vcpu, port, size); 5720 } 5721 5722 /* 5723 * Return 1 if we should exit from L2 to L1 to handle an MSR access, 5724 * rather than handle it ourselves in L0. I.e., check whether L1 expressed 5725 * disinterest in the current event (read or write a specific MSR) by using an 5726 * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps. 5727 */ 5728 static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu, 5729 struct vmcs12 *vmcs12, 5730 union vmx_exit_reason exit_reason) 5731 { 5732 u32 msr_index = kvm_rcx_read(vcpu); 5733 gpa_t bitmap; 5734 5735 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) 5736 return true; 5737 5738 /* 5739 * The MSR_BITMAP page is divided into four 1024-byte bitmaps, 5740 * for the four combinations of read/write and low/high MSR numbers. 5741 * First we need to figure out which of the four to use: 5742 */ 5743 bitmap = vmcs12->msr_bitmap; 5744 if (exit_reason.basic == EXIT_REASON_MSR_WRITE) 5745 bitmap += 2048; 5746 if (msr_index >= 0xc0000000) { 5747 msr_index -= 0xc0000000; 5748 bitmap += 1024; 5749 } 5750 5751 /* Then read the msr_index'th bit from this bitmap: */ 5752 if (msr_index < 1024*8) { 5753 unsigned char b; 5754 if (kvm_vcpu_read_guest(vcpu, bitmap + msr_index/8, &b, 1)) 5755 return true; 5756 return 1 & (b >> (msr_index & 7)); 5757 } else 5758 return true; /* let L1 handle the wrong parameter */ 5759 } 5760 5761 /* 5762 * Return 1 if we should exit from L2 to L1 to handle a CR access exit, 5763 * rather than handle it ourselves in L0. I.e., check if L1 wanted to 5764 * intercept (via guest_host_mask etc.) the current event. 5765 */ 5766 static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, 5767 struct vmcs12 *vmcs12) 5768 { 5769 unsigned long exit_qualification = vmx_get_exit_qual(vcpu); 5770 int cr = exit_qualification & 15; 5771 int reg; 5772 unsigned long val; 5773 5774 switch ((exit_qualification >> 4) & 3) { 5775 case 0: /* mov to cr */ 5776 reg = (exit_qualification >> 8) & 15; 5777 val = kvm_register_read(vcpu, reg); 5778 switch (cr) { 5779 case 0: 5780 if (vmcs12->cr0_guest_host_mask & 5781 (val ^ vmcs12->cr0_read_shadow)) 5782 return true; 5783 break; 5784 case 3: 5785 if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING)) 5786 return true; 5787 break; 5788 case 4: 5789 if (vmcs12->cr4_guest_host_mask & 5790 (vmcs12->cr4_read_shadow ^ val)) 5791 return true; 5792 break; 5793 case 8: 5794 if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING)) 5795 return true; 5796 break; 5797 } 5798 break; 5799 case 2: /* clts */ 5800 if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) && 5801 (vmcs12->cr0_read_shadow & X86_CR0_TS)) 5802 return true; 5803 break; 5804 case 1: /* mov from cr */ 5805 switch (cr) { 5806 case 3: 5807 if (vmcs12->cpu_based_vm_exec_control & 5808 CPU_BASED_CR3_STORE_EXITING) 5809 return true; 5810 break; 5811 case 8: 5812 if (vmcs12->cpu_based_vm_exec_control & 5813 CPU_BASED_CR8_STORE_EXITING) 5814 return true; 5815 break; 5816 } 5817 break; 5818 case 3: /* lmsw */ 5819 /* 5820 * lmsw can change bits 1..3 of cr0, and only set bit 0 of 5821 * cr0. Other attempted changes are ignored, with no exit. 5822 */ 5823 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f; 5824 if (vmcs12->cr0_guest_host_mask & 0xe & 5825 (val ^ vmcs12->cr0_read_shadow)) 5826 return true; 5827 if ((vmcs12->cr0_guest_host_mask & 0x1) && 5828 !(vmcs12->cr0_read_shadow & 0x1) && 5829 (val & 0x1)) 5830 return true; 5831 break; 5832 } 5833 return false; 5834 } 5835 5836 static bool nested_vmx_exit_handled_encls(struct kvm_vcpu *vcpu, 5837 struct vmcs12 *vmcs12) 5838 { 5839 u32 encls_leaf; 5840 5841 if (!guest_cpuid_has(vcpu, X86_FEATURE_SGX) || 5842 !nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENCLS_EXITING)) 5843 return false; 5844 5845 encls_leaf = kvm_rax_read(vcpu); 5846 if (encls_leaf > 62) 5847 encls_leaf = 63; 5848 return vmcs12->encls_exiting_bitmap & BIT_ULL(encls_leaf); 5849 } 5850 5851 static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu, 5852 struct vmcs12 *vmcs12, gpa_t bitmap) 5853 { 5854 u32 vmx_instruction_info; 5855 unsigned long field; 5856 u8 b; 5857 5858 if (!nested_cpu_has_shadow_vmcs(vmcs12)) 5859 return true; 5860 5861 /* Decode instruction info and find the field to access */ 5862 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 5863 field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); 5864 5865 /* Out-of-range fields always cause a VM exit from L2 to L1 */ 5866 if (field >> 15) 5867 return true; 5868 5869 if (kvm_vcpu_read_guest(vcpu, bitmap + field/8, &b, 1)) 5870 return true; 5871 5872 return 1 & (b >> (field & 7)); 5873 } 5874 5875 static bool nested_vmx_exit_handled_mtf(struct vmcs12 *vmcs12) 5876 { 5877 u32 entry_intr_info = vmcs12->vm_entry_intr_info_field; 5878 5879 if (nested_cpu_has_mtf(vmcs12)) 5880 return true; 5881 5882 /* 5883 * An MTF VM-exit may be injected into the guest by setting the 5884 * interruption-type to 7 (other event) and the vector field to 0. Such 5885 * is the case regardless of the 'monitor trap flag' VM-execution 5886 * control. 5887 */ 5888 return entry_intr_info == (INTR_INFO_VALID_MASK 5889 | INTR_TYPE_OTHER_EVENT); 5890 } 5891 5892 /* 5893 * Return true if L0 wants to handle an exit from L2 regardless of whether or not 5894 * L1 wants the exit. Only call this when in is_guest_mode (L2). 5895 */ 5896 static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu, 5897 union vmx_exit_reason exit_reason) 5898 { 5899 u32 intr_info; 5900 5901 switch ((u16)exit_reason.basic) { 5902 case EXIT_REASON_EXCEPTION_NMI: 5903 intr_info = vmx_get_intr_info(vcpu); 5904 if (is_nmi(intr_info)) 5905 return true; 5906 else if (is_page_fault(intr_info)) 5907 return vcpu->arch.apf.host_apf_flags || 5908 vmx_need_pf_intercept(vcpu); 5909 else if (is_debug(intr_info) && 5910 vcpu->guest_debug & 5911 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) 5912 return true; 5913 else if (is_breakpoint(intr_info) && 5914 vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) 5915 return true; 5916 else if (is_alignment_check(intr_info) && 5917 !vmx_guest_inject_ac(vcpu)) 5918 return true; 5919 return false; 5920 case EXIT_REASON_EXTERNAL_INTERRUPT: 5921 return true; 5922 case EXIT_REASON_MCE_DURING_VMENTRY: 5923 return true; 5924 case EXIT_REASON_EPT_VIOLATION: 5925 /* 5926 * L0 always deals with the EPT violation. If nested EPT is 5927 * used, and the nested mmu code discovers that the address is 5928 * missing in the guest EPT table (EPT12), the EPT violation 5929 * will be injected with nested_ept_inject_page_fault() 5930 */ 5931 return true; 5932 case EXIT_REASON_EPT_MISCONFIG: 5933 /* 5934 * L2 never uses directly L1's EPT, but rather L0's own EPT 5935 * table (shadow on EPT) or a merged EPT table that L0 built 5936 * (EPT on EPT). So any problems with the structure of the 5937 * table is L0's fault. 5938 */ 5939 return true; 5940 case EXIT_REASON_PREEMPTION_TIMER: 5941 return true; 5942 case EXIT_REASON_PML_FULL: 5943 /* 5944 * PML is emulated for an L1 VMM and should never be enabled in 5945 * vmcs02, always "handle" PML_FULL by exiting to userspace. 5946 */ 5947 return true; 5948 case EXIT_REASON_VMFUNC: 5949 /* VM functions are emulated through L2->L0 vmexits. */ 5950 return true; 5951 case EXIT_REASON_BUS_LOCK: 5952 /* 5953 * At present, bus lock VM exit is never exposed to L1. 5954 * Handle L2's bus locks in L0 directly. 5955 */ 5956 return true; 5957 default: 5958 break; 5959 } 5960 return false; 5961 } 5962 5963 /* 5964 * Return 1 if L1 wants to intercept an exit from L2. Only call this when in 5965 * is_guest_mode (L2). 5966 */ 5967 static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu, 5968 union vmx_exit_reason exit_reason) 5969 { 5970 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 5971 u32 intr_info; 5972 5973 switch ((u16)exit_reason.basic) { 5974 case EXIT_REASON_EXCEPTION_NMI: 5975 intr_info = vmx_get_intr_info(vcpu); 5976 if (is_nmi(intr_info)) 5977 return true; 5978 else if (is_page_fault(intr_info)) 5979 return true; 5980 return vmcs12->exception_bitmap & 5981 (1u << (intr_info & INTR_INFO_VECTOR_MASK)); 5982 case EXIT_REASON_EXTERNAL_INTERRUPT: 5983 return nested_exit_on_intr(vcpu); 5984 case EXIT_REASON_TRIPLE_FAULT: 5985 return true; 5986 case EXIT_REASON_INTERRUPT_WINDOW: 5987 return nested_cpu_has(vmcs12, CPU_BASED_INTR_WINDOW_EXITING); 5988 case EXIT_REASON_NMI_WINDOW: 5989 return nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING); 5990 case EXIT_REASON_TASK_SWITCH: 5991 return true; 5992 case EXIT_REASON_CPUID: 5993 return true; 5994 case EXIT_REASON_HLT: 5995 return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING); 5996 case EXIT_REASON_INVD: 5997 return true; 5998 case EXIT_REASON_INVLPG: 5999 return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING); 6000 case EXIT_REASON_RDPMC: 6001 return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING); 6002 case EXIT_REASON_RDRAND: 6003 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDRAND_EXITING); 6004 case EXIT_REASON_RDSEED: 6005 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDSEED_EXITING); 6006 case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP: 6007 return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING); 6008 case EXIT_REASON_VMREAD: 6009 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12, 6010 vmcs12->vmread_bitmap); 6011 case EXIT_REASON_VMWRITE: 6012 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12, 6013 vmcs12->vmwrite_bitmap); 6014 case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR: 6015 case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD: 6016 case EXIT_REASON_VMPTRST: case EXIT_REASON_VMRESUME: 6017 case EXIT_REASON_VMOFF: case EXIT_REASON_VMON: 6018 case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID: 6019 /* 6020 * VMX instructions trap unconditionally. This allows L1 to 6021 * emulate them for its L2 guest, i.e., allows 3-level nesting! 6022 */ 6023 return true; 6024 case EXIT_REASON_CR_ACCESS: 6025 return nested_vmx_exit_handled_cr(vcpu, vmcs12); 6026 case EXIT_REASON_DR_ACCESS: 6027 return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING); 6028 case EXIT_REASON_IO_INSTRUCTION: 6029 return nested_vmx_exit_handled_io(vcpu, vmcs12); 6030 case EXIT_REASON_GDTR_IDTR: case EXIT_REASON_LDTR_TR: 6031 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC); 6032 case EXIT_REASON_MSR_READ: 6033 case EXIT_REASON_MSR_WRITE: 6034 return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason); 6035 case EXIT_REASON_INVALID_STATE: 6036 return true; 6037 case EXIT_REASON_MWAIT_INSTRUCTION: 6038 return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING); 6039 case EXIT_REASON_MONITOR_TRAP_FLAG: 6040 return nested_vmx_exit_handled_mtf(vmcs12); 6041 case EXIT_REASON_MONITOR_INSTRUCTION: 6042 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING); 6043 case EXIT_REASON_PAUSE_INSTRUCTION: 6044 return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) || 6045 nested_cpu_has2(vmcs12, 6046 SECONDARY_EXEC_PAUSE_LOOP_EXITING); 6047 case EXIT_REASON_MCE_DURING_VMENTRY: 6048 return true; 6049 case EXIT_REASON_TPR_BELOW_THRESHOLD: 6050 return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW); 6051 case EXIT_REASON_APIC_ACCESS: 6052 case EXIT_REASON_APIC_WRITE: 6053 case EXIT_REASON_EOI_INDUCED: 6054 /* 6055 * The controls for "virtualize APIC accesses," "APIC- 6056 * register virtualization," and "virtual-interrupt 6057 * delivery" only come from vmcs12. 6058 */ 6059 return true; 6060 case EXIT_REASON_INVPCID: 6061 return 6062 nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_INVPCID) && 6063 nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING); 6064 case EXIT_REASON_WBINVD: 6065 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING); 6066 case EXIT_REASON_XSETBV: 6067 return true; 6068 case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS: 6069 /* 6070 * This should never happen, since it is not possible to 6071 * set XSS to a non-zero value---neither in L1 nor in L2. 6072 * If if it were, XSS would have to be checked against 6073 * the XSS exit bitmap in vmcs12. 6074 */ 6075 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES); 6076 case EXIT_REASON_UMWAIT: 6077 case EXIT_REASON_TPAUSE: 6078 return nested_cpu_has2(vmcs12, 6079 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE); 6080 case EXIT_REASON_ENCLS: 6081 return nested_vmx_exit_handled_encls(vcpu, vmcs12); 6082 default: 6083 return true; 6084 } 6085 } 6086 6087 /* 6088 * Conditionally reflect a VM-Exit into L1. Returns %true if the VM-Exit was 6089 * reflected into L1. 6090 */ 6091 bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu) 6092 { 6093 struct vcpu_vmx *vmx = to_vmx(vcpu); 6094 union vmx_exit_reason exit_reason = vmx->exit_reason; 6095 unsigned long exit_qual; 6096 u32 exit_intr_info; 6097 6098 WARN_ON_ONCE(vmx->nested.nested_run_pending); 6099 6100 /* 6101 * Late nested VM-Fail shares the same flow as nested VM-Exit since KVM 6102 * has already loaded L2's state. 6103 */ 6104 if (unlikely(vmx->fail)) { 6105 trace_kvm_nested_vmenter_failed( 6106 "hardware VM-instruction error: ", 6107 vmcs_read32(VM_INSTRUCTION_ERROR)); 6108 exit_intr_info = 0; 6109 exit_qual = 0; 6110 goto reflect_vmexit; 6111 } 6112 6113 trace_kvm_nested_vmexit(vcpu, KVM_ISA_VMX); 6114 6115 /* If L0 (KVM) wants the exit, it trumps L1's desires. */ 6116 if (nested_vmx_l0_wants_exit(vcpu, exit_reason)) 6117 return false; 6118 6119 /* If L1 doesn't want the exit, handle it in L0. */ 6120 if (!nested_vmx_l1_wants_exit(vcpu, exit_reason)) 6121 return false; 6122 6123 /* 6124 * vmcs.VM_EXIT_INTR_INFO is only valid for EXCEPTION_NMI exits. For 6125 * EXTERNAL_INTERRUPT, the value for vmcs12->vm_exit_intr_info would 6126 * need to be synthesized by querying the in-kernel LAPIC, but external 6127 * interrupts are never reflected to L1 so it's a non-issue. 6128 */ 6129 exit_intr_info = vmx_get_intr_info(vcpu); 6130 if (is_exception_with_error_code(exit_intr_info)) { 6131 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 6132 6133 vmcs12->vm_exit_intr_error_code = 6134 vmcs_read32(VM_EXIT_INTR_ERROR_CODE); 6135 } 6136 exit_qual = vmx_get_exit_qual(vcpu); 6137 6138 reflect_vmexit: 6139 nested_vmx_vmexit(vcpu, exit_reason.full, exit_intr_info, exit_qual); 6140 return true; 6141 } 6142 6143 static int vmx_get_nested_state(struct kvm_vcpu *vcpu, 6144 struct kvm_nested_state __user *user_kvm_nested_state, 6145 u32 user_data_size) 6146 { 6147 struct vcpu_vmx *vmx; 6148 struct vmcs12 *vmcs12; 6149 struct kvm_nested_state kvm_state = { 6150 .flags = 0, 6151 .format = KVM_STATE_NESTED_FORMAT_VMX, 6152 .size = sizeof(kvm_state), 6153 .hdr.vmx.flags = 0, 6154 .hdr.vmx.vmxon_pa = INVALID_GPA, 6155 .hdr.vmx.vmcs12_pa = INVALID_GPA, 6156 .hdr.vmx.preemption_timer_deadline = 0, 6157 }; 6158 struct kvm_vmx_nested_state_data __user *user_vmx_nested_state = 6159 &user_kvm_nested_state->data.vmx[0]; 6160 6161 if (!vcpu) 6162 return kvm_state.size + sizeof(*user_vmx_nested_state); 6163 6164 vmx = to_vmx(vcpu); 6165 vmcs12 = get_vmcs12(vcpu); 6166 6167 if (nested_vmx_allowed(vcpu) && 6168 (vmx->nested.vmxon || vmx->nested.smm.vmxon)) { 6169 kvm_state.hdr.vmx.vmxon_pa = vmx->nested.vmxon_ptr; 6170 kvm_state.hdr.vmx.vmcs12_pa = vmx->nested.current_vmptr; 6171 6172 if (vmx_has_valid_vmcs12(vcpu)) { 6173 kvm_state.size += sizeof(user_vmx_nested_state->vmcs12); 6174 6175 /* 'hv_evmcs_vmptr' can also be EVMPTR_MAP_PENDING here */ 6176 if (vmx->nested.hv_evmcs_vmptr != EVMPTR_INVALID) 6177 kvm_state.flags |= KVM_STATE_NESTED_EVMCS; 6178 6179 if (is_guest_mode(vcpu) && 6180 nested_cpu_has_shadow_vmcs(vmcs12) && 6181 vmcs12->vmcs_link_pointer != INVALID_GPA) 6182 kvm_state.size += sizeof(user_vmx_nested_state->shadow_vmcs12); 6183 } 6184 6185 if (vmx->nested.smm.vmxon) 6186 kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON; 6187 6188 if (vmx->nested.smm.guest_mode) 6189 kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE; 6190 6191 if (is_guest_mode(vcpu)) { 6192 kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE; 6193 6194 if (vmx->nested.nested_run_pending) 6195 kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING; 6196 6197 if (vmx->nested.mtf_pending) 6198 kvm_state.flags |= KVM_STATE_NESTED_MTF_PENDING; 6199 6200 if (nested_cpu_has_preemption_timer(vmcs12) && 6201 vmx->nested.has_preemption_timer_deadline) { 6202 kvm_state.hdr.vmx.flags |= 6203 KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE; 6204 kvm_state.hdr.vmx.preemption_timer_deadline = 6205 vmx->nested.preemption_timer_deadline; 6206 } 6207 } 6208 } 6209 6210 if (user_data_size < kvm_state.size) 6211 goto out; 6212 6213 if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state))) 6214 return -EFAULT; 6215 6216 if (!vmx_has_valid_vmcs12(vcpu)) 6217 goto out; 6218 6219 /* 6220 * When running L2, the authoritative vmcs12 state is in the 6221 * vmcs02. When running L1, the authoritative vmcs12 state is 6222 * in the shadow or enlightened vmcs linked to vmcs01, unless 6223 * need_vmcs12_to_shadow_sync is set, in which case, the authoritative 6224 * vmcs12 state is in the vmcs12 already. 6225 */ 6226 if (is_guest_mode(vcpu)) { 6227 sync_vmcs02_to_vmcs12(vcpu, vmcs12); 6228 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12); 6229 } else { 6230 copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu)); 6231 if (!vmx->nested.need_vmcs12_to_shadow_sync) { 6232 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) 6233 /* 6234 * L1 hypervisor is not obliged to keep eVMCS 6235 * clean fields data always up-to-date while 6236 * not in guest mode, 'hv_clean_fields' is only 6237 * supposed to be actual upon vmentry so we need 6238 * to ignore it here and do full copy. 6239 */ 6240 copy_enlightened_to_vmcs12(vmx, 0); 6241 else if (enable_shadow_vmcs) 6242 copy_shadow_to_vmcs12(vmx); 6243 } 6244 } 6245 6246 BUILD_BUG_ON(sizeof(user_vmx_nested_state->vmcs12) < VMCS12_SIZE); 6247 BUILD_BUG_ON(sizeof(user_vmx_nested_state->shadow_vmcs12) < VMCS12_SIZE); 6248 6249 /* 6250 * Copy over the full allocated size of vmcs12 rather than just the size 6251 * of the struct. 6252 */ 6253 if (copy_to_user(user_vmx_nested_state->vmcs12, vmcs12, VMCS12_SIZE)) 6254 return -EFAULT; 6255 6256 if (nested_cpu_has_shadow_vmcs(vmcs12) && 6257 vmcs12->vmcs_link_pointer != INVALID_GPA) { 6258 if (copy_to_user(user_vmx_nested_state->shadow_vmcs12, 6259 get_shadow_vmcs12(vcpu), VMCS12_SIZE)) 6260 return -EFAULT; 6261 } 6262 out: 6263 return kvm_state.size; 6264 } 6265 6266 /* 6267 * Forcibly leave nested mode in order to be able to reset the VCPU later on. 6268 */ 6269 void vmx_leave_nested(struct kvm_vcpu *vcpu) 6270 { 6271 if (is_guest_mode(vcpu)) { 6272 to_vmx(vcpu)->nested.nested_run_pending = 0; 6273 nested_vmx_vmexit(vcpu, -1, 0, 0); 6274 } 6275 free_nested(vcpu); 6276 } 6277 6278 static int vmx_set_nested_state(struct kvm_vcpu *vcpu, 6279 struct kvm_nested_state __user *user_kvm_nested_state, 6280 struct kvm_nested_state *kvm_state) 6281 { 6282 struct vcpu_vmx *vmx = to_vmx(vcpu); 6283 struct vmcs12 *vmcs12; 6284 enum vm_entry_failure_code ignored; 6285 struct kvm_vmx_nested_state_data __user *user_vmx_nested_state = 6286 &user_kvm_nested_state->data.vmx[0]; 6287 int ret; 6288 6289 if (kvm_state->format != KVM_STATE_NESTED_FORMAT_VMX) 6290 return -EINVAL; 6291 6292 if (kvm_state->hdr.vmx.vmxon_pa == INVALID_GPA) { 6293 if (kvm_state->hdr.vmx.smm.flags) 6294 return -EINVAL; 6295 6296 if (kvm_state->hdr.vmx.vmcs12_pa != INVALID_GPA) 6297 return -EINVAL; 6298 6299 /* 6300 * KVM_STATE_NESTED_EVMCS used to signal that KVM should 6301 * enable eVMCS capability on vCPU. However, since then 6302 * code was changed such that flag signals vmcs12 should 6303 * be copied into eVMCS in guest memory. 6304 * 6305 * To preserve backwards compatability, allow user 6306 * to set this flag even when there is no VMXON region. 6307 */ 6308 if (kvm_state->flags & ~KVM_STATE_NESTED_EVMCS) 6309 return -EINVAL; 6310 } else { 6311 if (!nested_vmx_allowed(vcpu)) 6312 return -EINVAL; 6313 6314 if (!page_address_valid(vcpu, kvm_state->hdr.vmx.vmxon_pa)) 6315 return -EINVAL; 6316 } 6317 6318 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && 6319 (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) 6320 return -EINVAL; 6321 6322 if (kvm_state->hdr.vmx.smm.flags & 6323 ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON)) 6324 return -EINVAL; 6325 6326 if (kvm_state->hdr.vmx.flags & ~KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE) 6327 return -EINVAL; 6328 6329 /* 6330 * SMM temporarily disables VMX, so we cannot be in guest mode, 6331 * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags 6332 * must be zero. 6333 */ 6334 if (is_smm(vcpu) ? 6335 (kvm_state->flags & 6336 (KVM_STATE_NESTED_GUEST_MODE | KVM_STATE_NESTED_RUN_PENDING)) 6337 : kvm_state->hdr.vmx.smm.flags) 6338 return -EINVAL; 6339 6340 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && 6341 !(kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON)) 6342 return -EINVAL; 6343 6344 if ((kvm_state->flags & KVM_STATE_NESTED_EVMCS) && 6345 (!nested_vmx_allowed(vcpu) || !vmx->nested.enlightened_vmcs_enabled)) 6346 return -EINVAL; 6347 6348 vmx_leave_nested(vcpu); 6349 6350 if (kvm_state->hdr.vmx.vmxon_pa == INVALID_GPA) 6351 return 0; 6352 6353 vmx->nested.vmxon_ptr = kvm_state->hdr.vmx.vmxon_pa; 6354 ret = enter_vmx_operation(vcpu); 6355 if (ret) 6356 return ret; 6357 6358 /* Empty 'VMXON' state is permitted if no VMCS loaded */ 6359 if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12)) { 6360 /* See vmx_has_valid_vmcs12. */ 6361 if ((kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE) || 6362 (kvm_state->flags & KVM_STATE_NESTED_EVMCS) || 6363 (kvm_state->hdr.vmx.vmcs12_pa != INVALID_GPA)) 6364 return -EINVAL; 6365 else 6366 return 0; 6367 } 6368 6369 if (kvm_state->hdr.vmx.vmcs12_pa != INVALID_GPA) { 6370 if (kvm_state->hdr.vmx.vmcs12_pa == kvm_state->hdr.vmx.vmxon_pa || 6371 !page_address_valid(vcpu, kvm_state->hdr.vmx.vmcs12_pa)) 6372 return -EINVAL; 6373 6374 set_current_vmptr(vmx, kvm_state->hdr.vmx.vmcs12_pa); 6375 } else if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) { 6376 /* 6377 * nested_vmx_handle_enlightened_vmptrld() cannot be called 6378 * directly from here as HV_X64_MSR_VP_ASSIST_PAGE may not be 6379 * restored yet. EVMCS will be mapped from 6380 * nested_get_vmcs12_pages(). 6381 */ 6382 vmx->nested.hv_evmcs_vmptr = EVMPTR_MAP_PENDING; 6383 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); 6384 } else { 6385 return -EINVAL; 6386 } 6387 6388 if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) { 6389 vmx->nested.smm.vmxon = true; 6390 vmx->nested.vmxon = false; 6391 6392 if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) 6393 vmx->nested.smm.guest_mode = true; 6394 } 6395 6396 vmcs12 = get_vmcs12(vcpu); 6397 if (copy_from_user(vmcs12, user_vmx_nested_state->vmcs12, sizeof(*vmcs12))) 6398 return -EFAULT; 6399 6400 if (vmcs12->hdr.revision_id != VMCS12_REVISION) 6401 return -EINVAL; 6402 6403 if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) 6404 return 0; 6405 6406 vmx->nested.nested_run_pending = 6407 !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING); 6408 6409 vmx->nested.mtf_pending = 6410 !!(kvm_state->flags & KVM_STATE_NESTED_MTF_PENDING); 6411 6412 ret = -EINVAL; 6413 if (nested_cpu_has_shadow_vmcs(vmcs12) && 6414 vmcs12->vmcs_link_pointer != INVALID_GPA) { 6415 struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu); 6416 6417 if (kvm_state->size < 6418 sizeof(*kvm_state) + 6419 sizeof(user_vmx_nested_state->vmcs12) + sizeof(*shadow_vmcs12)) 6420 goto error_guest_mode; 6421 6422 if (copy_from_user(shadow_vmcs12, 6423 user_vmx_nested_state->shadow_vmcs12, 6424 sizeof(*shadow_vmcs12))) { 6425 ret = -EFAULT; 6426 goto error_guest_mode; 6427 } 6428 6429 if (shadow_vmcs12->hdr.revision_id != VMCS12_REVISION || 6430 !shadow_vmcs12->hdr.shadow_vmcs) 6431 goto error_guest_mode; 6432 } 6433 6434 vmx->nested.has_preemption_timer_deadline = false; 6435 if (kvm_state->hdr.vmx.flags & KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE) { 6436 vmx->nested.has_preemption_timer_deadline = true; 6437 vmx->nested.preemption_timer_deadline = 6438 kvm_state->hdr.vmx.preemption_timer_deadline; 6439 } 6440 6441 if (nested_vmx_check_controls(vcpu, vmcs12) || 6442 nested_vmx_check_host_state(vcpu, vmcs12) || 6443 nested_vmx_check_guest_state(vcpu, vmcs12, &ignored)) 6444 goto error_guest_mode; 6445 6446 vmx->nested.dirty_vmcs12 = true; 6447 vmx->nested.force_msr_bitmap_recalc = true; 6448 ret = nested_vmx_enter_non_root_mode(vcpu, false); 6449 if (ret) 6450 goto error_guest_mode; 6451 6452 return 0; 6453 6454 error_guest_mode: 6455 vmx->nested.nested_run_pending = 0; 6456 return ret; 6457 } 6458 6459 void nested_vmx_set_vmcs_shadowing_bitmap(void) 6460 { 6461 if (enable_shadow_vmcs) { 6462 vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap)); 6463 vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap)); 6464 } 6465 } 6466 6467 /* 6468 * Indexing into the vmcs12 uses the VMCS encoding rotated left by 6. Undo 6469 * that madness to get the encoding for comparison. 6470 */ 6471 #define VMCS12_IDX_TO_ENC(idx) ((u16)(((u16)(idx) >> 6) | ((u16)(idx) << 10))) 6472 6473 static u64 nested_vmx_calc_vmcs_enum_msr(void) 6474 { 6475 /* 6476 * Note these are the so called "index" of the VMCS field encoding, not 6477 * the index into vmcs12. 6478 */ 6479 unsigned int max_idx, idx; 6480 int i; 6481 6482 /* 6483 * For better or worse, KVM allows VMREAD/VMWRITE to all fields in 6484 * vmcs12, regardless of whether or not the associated feature is 6485 * exposed to L1. Simply find the field with the highest index. 6486 */ 6487 max_idx = 0; 6488 for (i = 0; i < nr_vmcs12_fields; i++) { 6489 /* The vmcs12 table is very, very sparsely populated. */ 6490 if (!vmcs12_field_offsets[i]) 6491 continue; 6492 6493 idx = vmcs_field_index(VMCS12_IDX_TO_ENC(i)); 6494 if (idx > max_idx) 6495 max_idx = idx; 6496 } 6497 6498 return (u64)max_idx << VMCS_FIELD_INDEX_SHIFT; 6499 } 6500 6501 /* 6502 * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be 6503 * returned for the various VMX controls MSRs when nested VMX is enabled. 6504 * The same values should also be used to verify that vmcs12 control fields are 6505 * valid during nested entry from L1 to L2. 6506 * Each of these control msrs has a low and high 32-bit half: A low bit is on 6507 * if the corresponding bit in the (32-bit) control field *must* be on, and a 6508 * bit in the high half is on if the corresponding bit in the control field 6509 * may be on. See also vmx_control_verify(). 6510 */ 6511 void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps) 6512 { 6513 /* 6514 * Note that as a general rule, the high half of the MSRs (bits in 6515 * the control fields which may be 1) should be initialized by the 6516 * intersection of the underlying hardware's MSR (i.e., features which 6517 * can be supported) and the list of features we want to expose - 6518 * because they are known to be properly supported in our code. 6519 * Also, usually, the low half of the MSRs (bits which must be 1) can 6520 * be set to 0, meaning that L1 may turn off any of these bits. The 6521 * reason is that if one of these bits is necessary, it will appear 6522 * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control 6523 * fields of vmcs01 and vmcs02, will turn these bits off - and 6524 * nested_vmx_l1_wants_exit() will not pass related exits to L1. 6525 * These rules have exceptions below. 6526 */ 6527 6528 /* pin-based controls */ 6529 rdmsr(MSR_IA32_VMX_PINBASED_CTLS, 6530 msrs->pinbased_ctls_low, 6531 msrs->pinbased_ctls_high); 6532 msrs->pinbased_ctls_low |= 6533 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR; 6534 msrs->pinbased_ctls_high &= 6535 PIN_BASED_EXT_INTR_MASK | 6536 PIN_BASED_NMI_EXITING | 6537 PIN_BASED_VIRTUAL_NMIS | 6538 (enable_apicv ? PIN_BASED_POSTED_INTR : 0); 6539 msrs->pinbased_ctls_high |= 6540 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR | 6541 PIN_BASED_VMX_PREEMPTION_TIMER; 6542 6543 /* exit controls */ 6544 rdmsr(MSR_IA32_VMX_EXIT_CTLS, 6545 msrs->exit_ctls_low, 6546 msrs->exit_ctls_high); 6547 msrs->exit_ctls_low = 6548 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR; 6549 6550 msrs->exit_ctls_high &= 6551 #ifdef CONFIG_X86_64 6552 VM_EXIT_HOST_ADDR_SPACE_SIZE | 6553 #endif 6554 VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT | 6555 VM_EXIT_CLEAR_BNDCFGS | VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL; 6556 msrs->exit_ctls_high |= 6557 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR | 6558 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER | 6559 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT; 6560 6561 /* We support free control of debug control saving. */ 6562 msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS; 6563 6564 /* entry controls */ 6565 rdmsr(MSR_IA32_VMX_ENTRY_CTLS, 6566 msrs->entry_ctls_low, 6567 msrs->entry_ctls_high); 6568 msrs->entry_ctls_low = 6569 VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR; 6570 msrs->entry_ctls_high &= 6571 #ifdef CONFIG_X86_64 6572 VM_ENTRY_IA32E_MODE | 6573 #endif 6574 VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_BNDCFGS | 6575 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL; 6576 msrs->entry_ctls_high |= 6577 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER); 6578 6579 /* We support free control of debug control loading. */ 6580 msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS; 6581 6582 /* cpu-based controls */ 6583 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, 6584 msrs->procbased_ctls_low, 6585 msrs->procbased_ctls_high); 6586 msrs->procbased_ctls_low = 6587 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR; 6588 msrs->procbased_ctls_high &= 6589 CPU_BASED_INTR_WINDOW_EXITING | 6590 CPU_BASED_NMI_WINDOW_EXITING | CPU_BASED_USE_TSC_OFFSETTING | 6591 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING | 6592 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING | 6593 CPU_BASED_CR3_STORE_EXITING | 6594 #ifdef CONFIG_X86_64 6595 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING | 6596 #endif 6597 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING | 6598 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG | 6599 CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING | 6600 CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING | 6601 CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; 6602 /* 6603 * We can allow some features even when not supported by the 6604 * hardware. For example, L1 can specify an MSR bitmap - and we 6605 * can use it to avoid exits to L1 - even when L0 runs L2 6606 * without MSR bitmaps. 6607 */ 6608 msrs->procbased_ctls_high |= 6609 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR | 6610 CPU_BASED_USE_MSR_BITMAPS; 6611 6612 /* We support free control of CR3 access interception. */ 6613 msrs->procbased_ctls_low &= 6614 ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING); 6615 6616 /* 6617 * secondary cpu-based controls. Do not include those that 6618 * depend on CPUID bits, they are added later by 6619 * vmx_vcpu_after_set_cpuid. 6620 */ 6621 if (msrs->procbased_ctls_high & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) 6622 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, 6623 msrs->secondary_ctls_low, 6624 msrs->secondary_ctls_high); 6625 6626 msrs->secondary_ctls_low = 0; 6627 msrs->secondary_ctls_high &= 6628 SECONDARY_EXEC_DESC | 6629 SECONDARY_EXEC_ENABLE_RDTSCP | 6630 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 6631 SECONDARY_EXEC_WBINVD_EXITING | 6632 SECONDARY_EXEC_APIC_REGISTER_VIRT | 6633 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 6634 SECONDARY_EXEC_RDRAND_EXITING | 6635 SECONDARY_EXEC_ENABLE_INVPCID | 6636 SECONDARY_EXEC_RDSEED_EXITING | 6637 SECONDARY_EXEC_XSAVES | 6638 SECONDARY_EXEC_TSC_SCALING; 6639 6640 /* 6641 * We can emulate "VMCS shadowing," even if the hardware 6642 * doesn't support it. 6643 */ 6644 msrs->secondary_ctls_high |= 6645 SECONDARY_EXEC_SHADOW_VMCS; 6646 6647 if (enable_ept) { 6648 /* nested EPT: emulate EPT also to L1 */ 6649 msrs->secondary_ctls_high |= 6650 SECONDARY_EXEC_ENABLE_EPT; 6651 msrs->ept_caps = 6652 VMX_EPT_PAGE_WALK_4_BIT | 6653 VMX_EPT_PAGE_WALK_5_BIT | 6654 VMX_EPTP_WB_BIT | 6655 VMX_EPT_INVEPT_BIT | 6656 VMX_EPT_EXECUTE_ONLY_BIT; 6657 6658 msrs->ept_caps &= ept_caps; 6659 msrs->ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT | 6660 VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT | 6661 VMX_EPT_1GB_PAGE_BIT; 6662 if (enable_ept_ad_bits) { 6663 msrs->secondary_ctls_high |= 6664 SECONDARY_EXEC_ENABLE_PML; 6665 msrs->ept_caps |= VMX_EPT_AD_BIT; 6666 } 6667 } 6668 6669 if (cpu_has_vmx_vmfunc()) { 6670 msrs->secondary_ctls_high |= 6671 SECONDARY_EXEC_ENABLE_VMFUNC; 6672 /* 6673 * Advertise EPTP switching unconditionally 6674 * since we emulate it 6675 */ 6676 if (enable_ept) 6677 msrs->vmfunc_controls = 6678 VMX_VMFUNC_EPTP_SWITCHING; 6679 } 6680 6681 /* 6682 * Old versions of KVM use the single-context version without 6683 * checking for support, so declare that it is supported even 6684 * though it is treated as global context. The alternative is 6685 * not failing the single-context invvpid, and it is worse. 6686 */ 6687 if (enable_vpid) { 6688 msrs->secondary_ctls_high |= 6689 SECONDARY_EXEC_ENABLE_VPID; 6690 msrs->vpid_caps = VMX_VPID_INVVPID_BIT | 6691 VMX_VPID_EXTENT_SUPPORTED_MASK; 6692 } 6693 6694 if (enable_unrestricted_guest) 6695 msrs->secondary_ctls_high |= 6696 SECONDARY_EXEC_UNRESTRICTED_GUEST; 6697 6698 if (flexpriority_enabled) 6699 msrs->secondary_ctls_high |= 6700 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; 6701 6702 if (enable_sgx) 6703 msrs->secondary_ctls_high |= SECONDARY_EXEC_ENCLS_EXITING; 6704 6705 /* miscellaneous data */ 6706 rdmsr(MSR_IA32_VMX_MISC, 6707 msrs->misc_low, 6708 msrs->misc_high); 6709 msrs->misc_low &= VMX_MISC_SAVE_EFER_LMA; 6710 msrs->misc_low |= 6711 MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS | 6712 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE | 6713 VMX_MISC_ACTIVITY_HLT | 6714 VMX_MISC_ACTIVITY_WAIT_SIPI; 6715 msrs->misc_high = 0; 6716 6717 /* 6718 * This MSR reports some information about VMX support. We 6719 * should return information about the VMX we emulate for the 6720 * guest, and the VMCS structure we give it - not about the 6721 * VMX support of the underlying hardware. 6722 */ 6723 msrs->basic = 6724 VMCS12_REVISION | 6725 VMX_BASIC_TRUE_CTLS | 6726 ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) | 6727 (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT); 6728 6729 if (cpu_has_vmx_basic_inout()) 6730 msrs->basic |= VMX_BASIC_INOUT; 6731 6732 /* 6733 * These MSRs specify bits which the guest must keep fixed on 6734 * while L1 is in VMXON mode (in L1's root mode, or running an L2). 6735 * We picked the standard core2 setting. 6736 */ 6737 #define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE) 6738 #define VMXON_CR4_ALWAYSON X86_CR4_VMXE 6739 msrs->cr0_fixed0 = VMXON_CR0_ALWAYSON; 6740 msrs->cr4_fixed0 = VMXON_CR4_ALWAYSON; 6741 6742 /* These MSRs specify bits which the guest must keep fixed off. */ 6743 rdmsrl(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1); 6744 rdmsrl(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1); 6745 6746 msrs->vmcs_enum = nested_vmx_calc_vmcs_enum_msr(); 6747 } 6748 6749 void nested_vmx_hardware_unsetup(void) 6750 { 6751 int i; 6752 6753 if (enable_shadow_vmcs) { 6754 for (i = 0; i < VMX_BITMAP_NR; i++) 6755 free_page((unsigned long)vmx_bitmap[i]); 6756 } 6757 } 6758 6759 __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *)) 6760 { 6761 int i; 6762 6763 if (!cpu_has_vmx_shadow_vmcs()) 6764 enable_shadow_vmcs = 0; 6765 if (enable_shadow_vmcs) { 6766 for (i = 0; i < VMX_BITMAP_NR; i++) { 6767 /* 6768 * The vmx_bitmap is not tied to a VM and so should 6769 * not be charged to a memcg. 6770 */ 6771 vmx_bitmap[i] = (unsigned long *) 6772 __get_free_page(GFP_KERNEL); 6773 if (!vmx_bitmap[i]) { 6774 nested_vmx_hardware_unsetup(); 6775 return -ENOMEM; 6776 } 6777 } 6778 6779 init_vmcs_shadow_fields(); 6780 } 6781 6782 exit_handlers[EXIT_REASON_VMCLEAR] = handle_vmclear; 6783 exit_handlers[EXIT_REASON_VMLAUNCH] = handle_vmlaunch; 6784 exit_handlers[EXIT_REASON_VMPTRLD] = handle_vmptrld; 6785 exit_handlers[EXIT_REASON_VMPTRST] = handle_vmptrst; 6786 exit_handlers[EXIT_REASON_VMREAD] = handle_vmread; 6787 exit_handlers[EXIT_REASON_VMRESUME] = handle_vmresume; 6788 exit_handlers[EXIT_REASON_VMWRITE] = handle_vmwrite; 6789 exit_handlers[EXIT_REASON_VMOFF] = handle_vmoff; 6790 exit_handlers[EXIT_REASON_VMON] = handle_vmon; 6791 exit_handlers[EXIT_REASON_INVEPT] = handle_invept; 6792 exit_handlers[EXIT_REASON_INVVPID] = handle_invvpid; 6793 exit_handlers[EXIT_REASON_VMFUNC] = handle_vmfunc; 6794 6795 return 0; 6796 } 6797 6798 struct kvm_x86_nested_ops vmx_nested_ops = { 6799 .leave_nested = vmx_leave_nested, 6800 .check_events = vmx_check_nested_events, 6801 .hv_timer_pending = nested_vmx_preemption_timer_pending, 6802 .triple_fault = nested_vmx_triple_fault, 6803 .get_state = vmx_get_nested_state, 6804 .set_state = vmx_set_nested_state, 6805 .get_nested_state_pages = vmx_get_nested_state_pages, 6806 .write_log_dirty = nested_vmx_write_pml_buffer, 6807 .enable_evmcs = nested_enable_evmcs, 6808 .get_evmcs_version = nested_get_evmcs_version, 6809 }; 6810