1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/objtool.h> 4 #include <linux/percpu.h> 5 6 #include <asm/debugreg.h> 7 #include <asm/mmu_context.h> 8 9 #include "cpuid.h" 10 #include "hyperv.h" 11 #include "mmu.h" 12 #include "nested.h" 13 #include "pmu.h" 14 #include "sgx.h" 15 #include "trace.h" 16 #include "vmx.h" 17 #include "x86.h" 18 19 static bool __read_mostly enable_shadow_vmcs = 1; 20 module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO); 21 22 static bool __read_mostly nested_early_check = 0; 23 module_param(nested_early_check, bool, S_IRUGO); 24 25 #define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK 26 27 /* 28 * Hyper-V requires all of these, so mark them as supported even though 29 * they are just treated the same as all-context. 30 */ 31 #define VMX_VPID_EXTENT_SUPPORTED_MASK \ 32 (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \ 33 VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \ 34 VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \ 35 VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT) 36 37 #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5 38 39 enum { 40 VMX_VMREAD_BITMAP, 41 VMX_VMWRITE_BITMAP, 42 VMX_BITMAP_NR 43 }; 44 static unsigned long *vmx_bitmap[VMX_BITMAP_NR]; 45 46 #define vmx_vmread_bitmap (vmx_bitmap[VMX_VMREAD_BITMAP]) 47 #define vmx_vmwrite_bitmap (vmx_bitmap[VMX_VMWRITE_BITMAP]) 48 49 struct shadow_vmcs_field { 50 u16 encoding; 51 u16 offset; 52 }; 53 static struct shadow_vmcs_field shadow_read_only_fields[] = { 54 #define SHADOW_FIELD_RO(x, y) { x, offsetof(struct vmcs12, y) }, 55 #include "vmcs_shadow_fields.h" 56 }; 57 static int max_shadow_read_only_fields = 58 ARRAY_SIZE(shadow_read_only_fields); 59 60 static struct shadow_vmcs_field shadow_read_write_fields[] = { 61 #define SHADOW_FIELD_RW(x, y) { x, offsetof(struct vmcs12, y) }, 62 #include "vmcs_shadow_fields.h" 63 }; 64 static int max_shadow_read_write_fields = 65 ARRAY_SIZE(shadow_read_write_fields); 66 67 static void init_vmcs_shadow_fields(void) 68 { 69 int i, j; 70 71 memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE); 72 memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE); 73 74 for (i = j = 0; i < max_shadow_read_only_fields; i++) { 75 struct shadow_vmcs_field entry = shadow_read_only_fields[i]; 76 u16 field = entry.encoding; 77 78 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 && 79 (i + 1 == max_shadow_read_only_fields || 80 shadow_read_only_fields[i + 1].encoding != field + 1)) 81 pr_err("Missing field from shadow_read_only_field %x\n", 82 field + 1); 83 84 clear_bit(field, vmx_vmread_bitmap); 85 if (field & 1) 86 #ifdef CONFIG_X86_64 87 continue; 88 #else 89 entry.offset += sizeof(u32); 90 #endif 91 shadow_read_only_fields[j++] = entry; 92 } 93 max_shadow_read_only_fields = j; 94 95 for (i = j = 0; i < max_shadow_read_write_fields; i++) { 96 struct shadow_vmcs_field entry = shadow_read_write_fields[i]; 97 u16 field = entry.encoding; 98 99 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 && 100 (i + 1 == max_shadow_read_write_fields || 101 shadow_read_write_fields[i + 1].encoding != field + 1)) 102 pr_err("Missing field from shadow_read_write_field %x\n", 103 field + 1); 104 105 WARN_ONCE(field >= GUEST_ES_AR_BYTES && 106 field <= GUEST_TR_AR_BYTES, 107 "Update vmcs12_write_any() to drop reserved bits from AR_BYTES"); 108 109 /* 110 * PML and the preemption timer can be emulated, but the 111 * processor cannot vmwrite to fields that don't exist 112 * on bare metal. 113 */ 114 switch (field) { 115 case GUEST_PML_INDEX: 116 if (!cpu_has_vmx_pml()) 117 continue; 118 break; 119 case VMX_PREEMPTION_TIMER_VALUE: 120 if (!cpu_has_vmx_preemption_timer()) 121 continue; 122 break; 123 case GUEST_INTR_STATUS: 124 if (!cpu_has_vmx_apicv()) 125 continue; 126 break; 127 default: 128 break; 129 } 130 131 clear_bit(field, vmx_vmwrite_bitmap); 132 clear_bit(field, vmx_vmread_bitmap); 133 if (field & 1) 134 #ifdef CONFIG_X86_64 135 continue; 136 #else 137 entry.offset += sizeof(u32); 138 #endif 139 shadow_read_write_fields[j++] = entry; 140 } 141 max_shadow_read_write_fields = j; 142 } 143 144 /* 145 * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(), 146 * set the success or error code of an emulated VMX instruction (as specified 147 * by Vol 2B, VMX Instruction Reference, "Conventions"), and skip the emulated 148 * instruction. 149 */ 150 static int nested_vmx_succeed(struct kvm_vcpu *vcpu) 151 { 152 vmx_set_rflags(vcpu, vmx_get_rflags(vcpu) 153 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | 154 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF)); 155 return kvm_skip_emulated_instruction(vcpu); 156 } 157 158 static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu) 159 { 160 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) 161 & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF | 162 X86_EFLAGS_SF | X86_EFLAGS_OF)) 163 | X86_EFLAGS_CF); 164 return kvm_skip_emulated_instruction(vcpu); 165 } 166 167 static int nested_vmx_failValid(struct kvm_vcpu *vcpu, 168 u32 vm_instruction_error) 169 { 170 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) 171 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | 172 X86_EFLAGS_SF | X86_EFLAGS_OF)) 173 | X86_EFLAGS_ZF); 174 get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error; 175 /* 176 * We don't need to force sync to shadow VMCS because 177 * VM_INSTRUCTION_ERROR is not shadowed. Enlightened VMCS 'shadows' all 178 * fields and thus must be synced. 179 */ 180 if (to_vmx(vcpu)->nested.hv_evmcs_vmptr != EVMPTR_INVALID) 181 to_vmx(vcpu)->nested.need_vmcs12_to_shadow_sync = true; 182 183 return kvm_skip_emulated_instruction(vcpu); 184 } 185 186 static int nested_vmx_fail(struct kvm_vcpu *vcpu, u32 vm_instruction_error) 187 { 188 struct vcpu_vmx *vmx = to_vmx(vcpu); 189 190 /* 191 * failValid writes the error number to the current VMCS, which 192 * can't be done if there isn't a current VMCS. 193 */ 194 if (vmx->nested.current_vmptr == -1ull && 195 !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) 196 return nested_vmx_failInvalid(vcpu); 197 198 return nested_vmx_failValid(vcpu, vm_instruction_error); 199 } 200 201 static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator) 202 { 203 /* TODO: not to reset guest simply here. */ 204 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 205 pr_debug_ratelimited("kvm: nested vmx abort, indicator %d\n", indicator); 206 } 207 208 static inline bool vmx_control_verify(u32 control, u32 low, u32 high) 209 { 210 return fixed_bits_valid(control, low, high); 211 } 212 213 static inline u64 vmx_control_msr(u32 low, u32 high) 214 { 215 return low | ((u64)high << 32); 216 } 217 218 static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx) 219 { 220 secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_SHADOW_VMCS); 221 vmcs_write64(VMCS_LINK_POINTER, -1ull); 222 vmx->nested.need_vmcs12_to_shadow_sync = false; 223 } 224 225 static inline void nested_release_evmcs(struct kvm_vcpu *vcpu) 226 { 227 struct vcpu_vmx *vmx = to_vmx(vcpu); 228 229 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) { 230 kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true); 231 vmx->nested.hv_evmcs = NULL; 232 } 233 234 vmx->nested.hv_evmcs_vmptr = EVMPTR_INVALID; 235 } 236 237 static void vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx, 238 struct loaded_vmcs *prev) 239 { 240 struct vmcs_host_state *dest, *src; 241 242 if (unlikely(!vmx->guest_state_loaded)) 243 return; 244 245 src = &prev->host_state; 246 dest = &vmx->loaded_vmcs->host_state; 247 248 vmx_set_host_fs_gs(dest, src->fs_sel, src->gs_sel, src->fs_base, src->gs_base); 249 dest->ldt_sel = src->ldt_sel; 250 #ifdef CONFIG_X86_64 251 dest->ds_sel = src->ds_sel; 252 dest->es_sel = src->es_sel; 253 #endif 254 } 255 256 static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs) 257 { 258 struct vcpu_vmx *vmx = to_vmx(vcpu); 259 struct loaded_vmcs *prev; 260 int cpu; 261 262 if (WARN_ON_ONCE(vmx->loaded_vmcs == vmcs)) 263 return; 264 265 cpu = get_cpu(); 266 prev = vmx->loaded_vmcs; 267 vmx->loaded_vmcs = vmcs; 268 vmx_vcpu_load_vmcs(vcpu, cpu, prev); 269 vmx_sync_vmcs_host_state(vmx, prev); 270 put_cpu(); 271 272 vmx_register_cache_reset(vcpu); 273 } 274 275 /* 276 * Free whatever needs to be freed from vmx->nested when L1 goes down, or 277 * just stops using VMX. 278 */ 279 static void free_nested(struct kvm_vcpu *vcpu) 280 { 281 struct vcpu_vmx *vmx = to_vmx(vcpu); 282 283 if (WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01)) 284 vmx_switch_vmcs(vcpu, &vmx->vmcs01); 285 286 if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon) 287 return; 288 289 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); 290 291 vmx->nested.vmxon = false; 292 vmx->nested.smm.vmxon = false; 293 free_vpid(vmx->nested.vpid02); 294 vmx->nested.posted_intr_nv = -1; 295 vmx->nested.current_vmptr = -1ull; 296 if (enable_shadow_vmcs) { 297 vmx_disable_shadow_vmcs(vmx); 298 vmcs_clear(vmx->vmcs01.shadow_vmcs); 299 free_vmcs(vmx->vmcs01.shadow_vmcs); 300 vmx->vmcs01.shadow_vmcs = NULL; 301 } 302 kfree(vmx->nested.cached_vmcs12); 303 vmx->nested.cached_vmcs12 = NULL; 304 kfree(vmx->nested.cached_shadow_vmcs12); 305 vmx->nested.cached_shadow_vmcs12 = NULL; 306 /* Unpin physical memory we referred to in the vmcs02 */ 307 if (vmx->nested.apic_access_page) { 308 kvm_release_page_clean(vmx->nested.apic_access_page); 309 vmx->nested.apic_access_page = NULL; 310 } 311 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true); 312 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true); 313 vmx->nested.pi_desc = NULL; 314 315 kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); 316 317 nested_release_evmcs(vcpu); 318 319 free_loaded_vmcs(&vmx->nested.vmcs02); 320 } 321 322 /* 323 * Ensure that the current vmcs of the logical processor is the 324 * vmcs01 of the vcpu before calling free_nested(). 325 */ 326 void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu) 327 { 328 vcpu_load(vcpu); 329 vmx_leave_nested(vcpu); 330 vcpu_put(vcpu); 331 } 332 333 #define EPTP_PA_MASK GENMASK_ULL(51, 12) 334 335 static bool nested_ept_root_matches(hpa_t root_hpa, u64 root_eptp, u64 eptp) 336 { 337 return VALID_PAGE(root_hpa) && 338 ((root_eptp & EPTP_PA_MASK) == (eptp & EPTP_PA_MASK)); 339 } 340 341 static void nested_ept_invalidate_addr(struct kvm_vcpu *vcpu, gpa_t eptp, 342 gpa_t addr) 343 { 344 uint i; 345 struct kvm_mmu_root_info *cached_root; 346 347 WARN_ON_ONCE(!mmu_is_nested(vcpu)); 348 349 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { 350 cached_root = &vcpu->arch.mmu->prev_roots[i]; 351 352 if (nested_ept_root_matches(cached_root->hpa, cached_root->pgd, 353 eptp)) 354 vcpu->arch.mmu->invlpg(vcpu, addr, cached_root->hpa); 355 } 356 } 357 358 static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu, 359 struct x86_exception *fault) 360 { 361 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 362 struct vcpu_vmx *vmx = to_vmx(vcpu); 363 u32 vm_exit_reason; 364 unsigned long exit_qualification = vcpu->arch.exit_qualification; 365 366 if (vmx->nested.pml_full) { 367 vm_exit_reason = EXIT_REASON_PML_FULL; 368 vmx->nested.pml_full = false; 369 exit_qualification &= INTR_INFO_UNBLOCK_NMI; 370 } else { 371 if (fault->error_code & PFERR_RSVD_MASK) 372 vm_exit_reason = EXIT_REASON_EPT_MISCONFIG; 373 else 374 vm_exit_reason = EXIT_REASON_EPT_VIOLATION; 375 376 /* 377 * Although the caller (kvm_inject_emulated_page_fault) would 378 * have already synced the faulting address in the shadow EPT 379 * tables for the current EPTP12, we also need to sync it for 380 * any other cached EPTP02s based on the same EP4TA, since the 381 * TLB associates mappings to the EP4TA rather than the full EPTP. 382 */ 383 nested_ept_invalidate_addr(vcpu, vmcs12->ept_pointer, 384 fault->address); 385 } 386 387 nested_vmx_vmexit(vcpu, vm_exit_reason, 0, exit_qualification); 388 vmcs12->guest_physical_address = fault->address; 389 } 390 391 static void nested_ept_new_eptp(struct kvm_vcpu *vcpu) 392 { 393 kvm_init_shadow_ept_mmu(vcpu, 394 to_vmx(vcpu)->nested.msrs.ept_caps & 395 VMX_EPT_EXECUTE_ONLY_BIT, 396 nested_ept_ad_enabled(vcpu), 397 nested_ept_get_eptp(vcpu)); 398 } 399 400 static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) 401 { 402 WARN_ON(mmu_is_nested(vcpu)); 403 404 vcpu->arch.mmu = &vcpu->arch.guest_mmu; 405 nested_ept_new_eptp(vcpu); 406 vcpu->arch.mmu->get_guest_pgd = nested_ept_get_eptp; 407 vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault; 408 vcpu->arch.mmu->get_pdptr = kvm_pdptr_read; 409 410 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; 411 } 412 413 static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu) 414 { 415 vcpu->arch.mmu = &vcpu->arch.root_mmu; 416 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; 417 } 418 419 static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12, 420 u16 error_code) 421 { 422 bool inequality, bit; 423 424 bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0; 425 inequality = 426 (error_code & vmcs12->page_fault_error_code_mask) != 427 vmcs12->page_fault_error_code_match; 428 return inequality ^ bit; 429 } 430 431 432 /* 433 * KVM wants to inject page-faults which it got to the guest. This function 434 * checks whether in a nested guest, we need to inject them to L1 or L2. 435 */ 436 static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit_qual) 437 { 438 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 439 unsigned int nr = vcpu->arch.exception.nr; 440 bool has_payload = vcpu->arch.exception.has_payload; 441 unsigned long payload = vcpu->arch.exception.payload; 442 443 if (nr == PF_VECTOR) { 444 if (vcpu->arch.exception.nested_apf) { 445 *exit_qual = vcpu->arch.apf.nested_apf_token; 446 return 1; 447 } 448 if (nested_vmx_is_page_fault_vmexit(vmcs12, 449 vcpu->arch.exception.error_code)) { 450 *exit_qual = has_payload ? payload : vcpu->arch.cr2; 451 return 1; 452 } 453 } else if (vmcs12->exception_bitmap & (1u << nr)) { 454 if (nr == DB_VECTOR) { 455 if (!has_payload) { 456 payload = vcpu->arch.dr6; 457 payload &= ~DR6_BT; 458 payload ^= DR6_ACTIVE_LOW; 459 } 460 *exit_qual = payload; 461 } else 462 *exit_qual = 0; 463 return 1; 464 } 465 466 return 0; 467 } 468 469 470 static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu, 471 struct x86_exception *fault) 472 { 473 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 474 475 WARN_ON(!is_guest_mode(vcpu)); 476 477 if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code) && 478 !to_vmx(vcpu)->nested.nested_run_pending) { 479 vmcs12->vm_exit_intr_error_code = fault->error_code; 480 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, 481 PF_VECTOR | INTR_TYPE_HARD_EXCEPTION | 482 INTR_INFO_DELIVER_CODE_MASK | INTR_INFO_VALID_MASK, 483 fault->address); 484 } else { 485 kvm_inject_page_fault(vcpu, fault); 486 } 487 } 488 489 static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu, 490 struct vmcs12 *vmcs12) 491 { 492 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) 493 return 0; 494 495 if (CC(!page_address_valid(vcpu, vmcs12->io_bitmap_a)) || 496 CC(!page_address_valid(vcpu, vmcs12->io_bitmap_b))) 497 return -EINVAL; 498 499 return 0; 500 } 501 502 static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu, 503 struct vmcs12 *vmcs12) 504 { 505 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) 506 return 0; 507 508 if (CC(!page_address_valid(vcpu, vmcs12->msr_bitmap))) 509 return -EINVAL; 510 511 return 0; 512 } 513 514 static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu, 515 struct vmcs12 *vmcs12) 516 { 517 if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) 518 return 0; 519 520 if (CC(!page_address_valid(vcpu, vmcs12->virtual_apic_page_addr))) 521 return -EINVAL; 522 523 return 0; 524 } 525 526 /* 527 * Check if MSR is intercepted for L01 MSR bitmap. 528 */ 529 static bool msr_write_intercepted_l01(struct kvm_vcpu *vcpu, u32 msr) 530 { 531 unsigned long *msr_bitmap; 532 int f = sizeof(unsigned long); 533 534 if (!cpu_has_vmx_msr_bitmap()) 535 return true; 536 537 msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap; 538 539 if (msr <= 0x1fff) { 540 return !!test_bit(msr, msr_bitmap + 0x800 / f); 541 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { 542 msr &= 0x1fff; 543 return !!test_bit(msr, msr_bitmap + 0xc00 / f); 544 } 545 546 return true; 547 } 548 549 /* 550 * If a msr is allowed by L0, we should check whether it is allowed by L1. 551 * The corresponding bit will be cleared unless both of L0 and L1 allow it. 552 */ 553 static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1, 554 unsigned long *msr_bitmap_nested, 555 u32 msr, int type) 556 { 557 int f = sizeof(unsigned long); 558 559 /* 560 * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals 561 * have the write-low and read-high bitmap offsets the wrong way round. 562 * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. 563 */ 564 if (msr <= 0x1fff) { 565 if (type & MSR_TYPE_R && 566 !test_bit(msr, msr_bitmap_l1 + 0x000 / f)) 567 /* read-low */ 568 __clear_bit(msr, msr_bitmap_nested + 0x000 / f); 569 570 if (type & MSR_TYPE_W && 571 !test_bit(msr, msr_bitmap_l1 + 0x800 / f)) 572 /* write-low */ 573 __clear_bit(msr, msr_bitmap_nested + 0x800 / f); 574 575 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { 576 msr &= 0x1fff; 577 if (type & MSR_TYPE_R && 578 !test_bit(msr, msr_bitmap_l1 + 0x400 / f)) 579 /* read-high */ 580 __clear_bit(msr, msr_bitmap_nested + 0x400 / f); 581 582 if (type & MSR_TYPE_W && 583 !test_bit(msr, msr_bitmap_l1 + 0xc00 / f)) 584 /* write-high */ 585 __clear_bit(msr, msr_bitmap_nested + 0xc00 / f); 586 587 } 588 } 589 590 static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap) 591 { 592 int msr; 593 594 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { 595 unsigned word = msr / BITS_PER_LONG; 596 597 msr_bitmap[word] = ~0; 598 msr_bitmap[word + (0x800 / sizeof(long))] = ~0; 599 } 600 } 601 602 /* 603 * Merge L0's and L1's MSR bitmap, return false to indicate that 604 * we do not use the hardware. 605 */ 606 static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu, 607 struct vmcs12 *vmcs12) 608 { 609 int msr; 610 unsigned long *msr_bitmap_l1; 611 unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap; 612 struct kvm_host_map *map = &to_vmx(vcpu)->nested.msr_bitmap_map; 613 614 /* Nothing to do if the MSR bitmap is not in use. */ 615 if (!cpu_has_vmx_msr_bitmap() || 616 !nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) 617 return false; 618 619 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->msr_bitmap), map)) 620 return false; 621 622 msr_bitmap_l1 = (unsigned long *)map->hva; 623 624 /* 625 * To keep the control flow simple, pay eight 8-byte writes (sixteen 626 * 4-byte writes on 32-bit systems) up front to enable intercepts for 627 * the x2APIC MSR range and selectively disable them below. 628 */ 629 enable_x2apic_msr_intercepts(msr_bitmap_l0); 630 631 if (nested_cpu_has_virt_x2apic_mode(vmcs12)) { 632 if (nested_cpu_has_apic_reg_virt(vmcs12)) { 633 /* 634 * L0 need not intercept reads for MSRs between 0x800 635 * and 0x8ff, it just lets the processor take the value 636 * from the virtual-APIC page; take those 256 bits 637 * directly from the L1 bitmap. 638 */ 639 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { 640 unsigned word = msr / BITS_PER_LONG; 641 642 msr_bitmap_l0[word] = msr_bitmap_l1[word]; 643 } 644 } 645 646 nested_vmx_disable_intercept_for_msr( 647 msr_bitmap_l1, msr_bitmap_l0, 648 X2APIC_MSR(APIC_TASKPRI), 649 MSR_TYPE_R | MSR_TYPE_W); 650 651 if (nested_cpu_has_vid(vmcs12)) { 652 nested_vmx_disable_intercept_for_msr( 653 msr_bitmap_l1, msr_bitmap_l0, 654 X2APIC_MSR(APIC_EOI), 655 MSR_TYPE_W); 656 nested_vmx_disable_intercept_for_msr( 657 msr_bitmap_l1, msr_bitmap_l0, 658 X2APIC_MSR(APIC_SELF_IPI), 659 MSR_TYPE_W); 660 } 661 } 662 663 /* KVM unconditionally exposes the FS/GS base MSRs to L1. */ 664 #ifdef CONFIG_X86_64 665 nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0, 666 MSR_FS_BASE, MSR_TYPE_RW); 667 668 nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0, 669 MSR_GS_BASE, MSR_TYPE_RW); 670 671 nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0, 672 MSR_KERNEL_GS_BASE, MSR_TYPE_RW); 673 #endif 674 675 /* 676 * Checking the L0->L1 bitmap is trying to verify two things: 677 * 678 * 1. L0 gave a permission to L1 to actually passthrough the MSR. This 679 * ensures that we do not accidentally generate an L02 MSR bitmap 680 * from the L12 MSR bitmap that is too permissive. 681 * 2. That L1 or L2s have actually used the MSR. This avoids 682 * unnecessarily merging of the bitmap if the MSR is unused. This 683 * works properly because we only update the L01 MSR bitmap lazily. 684 * So even if L0 should pass L1 these MSRs, the L01 bitmap is only 685 * updated to reflect this when L1 (or its L2s) actually write to 686 * the MSR. 687 */ 688 if (!msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL)) 689 nested_vmx_disable_intercept_for_msr( 690 msr_bitmap_l1, msr_bitmap_l0, 691 MSR_IA32_SPEC_CTRL, 692 MSR_TYPE_R | MSR_TYPE_W); 693 694 if (!msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD)) 695 nested_vmx_disable_intercept_for_msr( 696 msr_bitmap_l1, msr_bitmap_l0, 697 MSR_IA32_PRED_CMD, 698 MSR_TYPE_W); 699 700 kvm_vcpu_unmap(vcpu, &to_vmx(vcpu)->nested.msr_bitmap_map, false); 701 702 return true; 703 } 704 705 static void nested_cache_shadow_vmcs12(struct kvm_vcpu *vcpu, 706 struct vmcs12 *vmcs12) 707 { 708 struct kvm_host_map map; 709 struct vmcs12 *shadow; 710 711 if (!nested_cpu_has_shadow_vmcs(vmcs12) || 712 vmcs12->vmcs_link_pointer == -1ull) 713 return; 714 715 shadow = get_shadow_vmcs12(vcpu); 716 717 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map)) 718 return; 719 720 memcpy(shadow, map.hva, VMCS12_SIZE); 721 kvm_vcpu_unmap(vcpu, &map, false); 722 } 723 724 static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu *vcpu, 725 struct vmcs12 *vmcs12) 726 { 727 struct vcpu_vmx *vmx = to_vmx(vcpu); 728 729 if (!nested_cpu_has_shadow_vmcs(vmcs12) || 730 vmcs12->vmcs_link_pointer == -1ull) 731 return; 732 733 kvm_write_guest(vmx->vcpu.kvm, vmcs12->vmcs_link_pointer, 734 get_shadow_vmcs12(vcpu), VMCS12_SIZE); 735 } 736 737 /* 738 * In nested virtualization, check if L1 has set 739 * VM_EXIT_ACK_INTR_ON_EXIT 740 */ 741 static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu) 742 { 743 return get_vmcs12(vcpu)->vm_exit_controls & 744 VM_EXIT_ACK_INTR_ON_EXIT; 745 } 746 747 static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu, 748 struct vmcs12 *vmcs12) 749 { 750 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) && 751 CC(!page_address_valid(vcpu, vmcs12->apic_access_addr))) 752 return -EINVAL; 753 else 754 return 0; 755 } 756 757 static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, 758 struct vmcs12 *vmcs12) 759 { 760 if (!nested_cpu_has_virt_x2apic_mode(vmcs12) && 761 !nested_cpu_has_apic_reg_virt(vmcs12) && 762 !nested_cpu_has_vid(vmcs12) && 763 !nested_cpu_has_posted_intr(vmcs12)) 764 return 0; 765 766 /* 767 * If virtualize x2apic mode is enabled, 768 * virtualize apic access must be disabled. 769 */ 770 if (CC(nested_cpu_has_virt_x2apic_mode(vmcs12) && 771 nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))) 772 return -EINVAL; 773 774 /* 775 * If virtual interrupt delivery is enabled, 776 * we must exit on external interrupts. 777 */ 778 if (CC(nested_cpu_has_vid(vmcs12) && !nested_exit_on_intr(vcpu))) 779 return -EINVAL; 780 781 /* 782 * bits 15:8 should be zero in posted_intr_nv, 783 * the descriptor address has been already checked 784 * in nested_get_vmcs12_pages. 785 * 786 * bits 5:0 of posted_intr_desc_addr should be zero. 787 */ 788 if (nested_cpu_has_posted_intr(vmcs12) && 789 (CC(!nested_cpu_has_vid(vmcs12)) || 790 CC(!nested_exit_intr_ack_set(vcpu)) || 791 CC((vmcs12->posted_intr_nv & 0xff00)) || 792 CC(!kvm_vcpu_is_legal_aligned_gpa(vcpu, vmcs12->posted_intr_desc_addr, 64)))) 793 return -EINVAL; 794 795 /* tpr shadow is needed by all apicv features. */ 796 if (CC(!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))) 797 return -EINVAL; 798 799 return 0; 800 } 801 802 static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu, 803 u32 count, u64 addr) 804 { 805 if (count == 0) 806 return 0; 807 808 if (!kvm_vcpu_is_legal_aligned_gpa(vcpu, addr, 16) || 809 !kvm_vcpu_is_legal_gpa(vcpu, (addr + count * sizeof(struct vmx_msr_entry) - 1))) 810 return -EINVAL; 811 812 return 0; 813 } 814 815 static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu *vcpu, 816 struct vmcs12 *vmcs12) 817 { 818 if (CC(nested_vmx_check_msr_switch(vcpu, 819 vmcs12->vm_exit_msr_load_count, 820 vmcs12->vm_exit_msr_load_addr)) || 821 CC(nested_vmx_check_msr_switch(vcpu, 822 vmcs12->vm_exit_msr_store_count, 823 vmcs12->vm_exit_msr_store_addr))) 824 return -EINVAL; 825 826 return 0; 827 } 828 829 static int nested_vmx_check_entry_msr_switch_controls(struct kvm_vcpu *vcpu, 830 struct vmcs12 *vmcs12) 831 { 832 if (CC(nested_vmx_check_msr_switch(vcpu, 833 vmcs12->vm_entry_msr_load_count, 834 vmcs12->vm_entry_msr_load_addr))) 835 return -EINVAL; 836 837 return 0; 838 } 839 840 static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu, 841 struct vmcs12 *vmcs12) 842 { 843 if (!nested_cpu_has_pml(vmcs12)) 844 return 0; 845 846 if (CC(!nested_cpu_has_ept(vmcs12)) || 847 CC(!page_address_valid(vcpu, vmcs12->pml_address))) 848 return -EINVAL; 849 850 return 0; 851 } 852 853 static int nested_vmx_check_unrestricted_guest_controls(struct kvm_vcpu *vcpu, 854 struct vmcs12 *vmcs12) 855 { 856 if (CC(nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST) && 857 !nested_cpu_has_ept(vmcs12))) 858 return -EINVAL; 859 return 0; 860 } 861 862 static int nested_vmx_check_mode_based_ept_exec_controls(struct kvm_vcpu *vcpu, 863 struct vmcs12 *vmcs12) 864 { 865 if (CC(nested_cpu_has2(vmcs12, SECONDARY_EXEC_MODE_BASED_EPT_EXEC) && 866 !nested_cpu_has_ept(vmcs12))) 867 return -EINVAL; 868 return 0; 869 } 870 871 static int nested_vmx_check_shadow_vmcs_controls(struct kvm_vcpu *vcpu, 872 struct vmcs12 *vmcs12) 873 { 874 if (!nested_cpu_has_shadow_vmcs(vmcs12)) 875 return 0; 876 877 if (CC(!page_address_valid(vcpu, vmcs12->vmread_bitmap)) || 878 CC(!page_address_valid(vcpu, vmcs12->vmwrite_bitmap))) 879 return -EINVAL; 880 881 return 0; 882 } 883 884 static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu, 885 struct vmx_msr_entry *e) 886 { 887 /* x2APIC MSR accesses are not allowed */ 888 if (CC(vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8)) 889 return -EINVAL; 890 if (CC(e->index == MSR_IA32_UCODE_WRITE) || /* SDM Table 35-2 */ 891 CC(e->index == MSR_IA32_UCODE_REV)) 892 return -EINVAL; 893 if (CC(e->reserved != 0)) 894 return -EINVAL; 895 return 0; 896 } 897 898 static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu, 899 struct vmx_msr_entry *e) 900 { 901 if (CC(e->index == MSR_FS_BASE) || 902 CC(e->index == MSR_GS_BASE) || 903 CC(e->index == MSR_IA32_SMM_MONITOR_CTL) || /* SMM is not supported */ 904 nested_vmx_msr_check_common(vcpu, e)) 905 return -EINVAL; 906 return 0; 907 } 908 909 static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu, 910 struct vmx_msr_entry *e) 911 { 912 if (CC(e->index == MSR_IA32_SMBASE) || /* SMM is not supported */ 913 nested_vmx_msr_check_common(vcpu, e)) 914 return -EINVAL; 915 return 0; 916 } 917 918 static u32 nested_vmx_max_atomic_switch_msrs(struct kvm_vcpu *vcpu) 919 { 920 struct vcpu_vmx *vmx = to_vmx(vcpu); 921 u64 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low, 922 vmx->nested.msrs.misc_high); 923 924 return (vmx_misc_max_msr(vmx_misc) + 1) * VMX_MISC_MSR_LIST_MULTIPLIER; 925 } 926 927 /* 928 * Load guest's/host's msr at nested entry/exit. 929 * return 0 for success, entry index for failure. 930 * 931 * One of the failure modes for MSR load/store is when a list exceeds the 932 * virtual hardware's capacity. To maintain compatibility with hardware inasmuch 933 * as possible, process all valid entries before failing rather than precheck 934 * for a capacity violation. 935 */ 936 static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) 937 { 938 u32 i; 939 struct vmx_msr_entry e; 940 u32 max_msr_list_size = nested_vmx_max_atomic_switch_msrs(vcpu); 941 942 for (i = 0; i < count; i++) { 943 if (unlikely(i >= max_msr_list_size)) 944 goto fail; 945 946 if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e), 947 &e, sizeof(e))) { 948 pr_debug_ratelimited( 949 "%s cannot read MSR entry (%u, 0x%08llx)\n", 950 __func__, i, gpa + i * sizeof(e)); 951 goto fail; 952 } 953 if (nested_vmx_load_msr_check(vcpu, &e)) { 954 pr_debug_ratelimited( 955 "%s check failed (%u, 0x%x, 0x%x)\n", 956 __func__, i, e.index, e.reserved); 957 goto fail; 958 } 959 if (kvm_set_msr(vcpu, e.index, e.value)) { 960 pr_debug_ratelimited( 961 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n", 962 __func__, i, e.index, e.value); 963 goto fail; 964 } 965 } 966 return 0; 967 fail: 968 /* Note, max_msr_list_size is at most 4096, i.e. this can't wrap. */ 969 return i + 1; 970 } 971 972 static bool nested_vmx_get_vmexit_msr_value(struct kvm_vcpu *vcpu, 973 u32 msr_index, 974 u64 *data) 975 { 976 struct vcpu_vmx *vmx = to_vmx(vcpu); 977 978 /* 979 * If the L0 hypervisor stored a more accurate value for the TSC that 980 * does not include the time taken for emulation of the L2->L1 981 * VM-exit in L0, use the more accurate value. 982 */ 983 if (msr_index == MSR_IA32_TSC) { 984 int i = vmx_find_loadstore_msr_slot(&vmx->msr_autostore.guest, 985 MSR_IA32_TSC); 986 987 if (i >= 0) { 988 u64 val = vmx->msr_autostore.guest.val[i].value; 989 990 *data = kvm_read_l1_tsc(vcpu, val); 991 return true; 992 } 993 } 994 995 if (kvm_get_msr(vcpu, msr_index, data)) { 996 pr_debug_ratelimited("%s cannot read MSR (0x%x)\n", __func__, 997 msr_index); 998 return false; 999 } 1000 return true; 1001 } 1002 1003 static bool read_and_check_msr_entry(struct kvm_vcpu *vcpu, u64 gpa, int i, 1004 struct vmx_msr_entry *e) 1005 { 1006 if (kvm_vcpu_read_guest(vcpu, 1007 gpa + i * sizeof(*e), 1008 e, 2 * sizeof(u32))) { 1009 pr_debug_ratelimited( 1010 "%s cannot read MSR entry (%u, 0x%08llx)\n", 1011 __func__, i, gpa + i * sizeof(*e)); 1012 return false; 1013 } 1014 if (nested_vmx_store_msr_check(vcpu, e)) { 1015 pr_debug_ratelimited( 1016 "%s check failed (%u, 0x%x, 0x%x)\n", 1017 __func__, i, e->index, e->reserved); 1018 return false; 1019 } 1020 return true; 1021 } 1022 1023 static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) 1024 { 1025 u64 data; 1026 u32 i; 1027 struct vmx_msr_entry e; 1028 u32 max_msr_list_size = nested_vmx_max_atomic_switch_msrs(vcpu); 1029 1030 for (i = 0; i < count; i++) { 1031 if (unlikely(i >= max_msr_list_size)) 1032 return -EINVAL; 1033 1034 if (!read_and_check_msr_entry(vcpu, gpa, i, &e)) 1035 return -EINVAL; 1036 1037 if (!nested_vmx_get_vmexit_msr_value(vcpu, e.index, &data)) 1038 return -EINVAL; 1039 1040 if (kvm_vcpu_write_guest(vcpu, 1041 gpa + i * sizeof(e) + 1042 offsetof(struct vmx_msr_entry, value), 1043 &data, sizeof(data))) { 1044 pr_debug_ratelimited( 1045 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n", 1046 __func__, i, e.index, data); 1047 return -EINVAL; 1048 } 1049 } 1050 return 0; 1051 } 1052 1053 static bool nested_msr_store_list_has_msr(struct kvm_vcpu *vcpu, u32 msr_index) 1054 { 1055 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 1056 u32 count = vmcs12->vm_exit_msr_store_count; 1057 u64 gpa = vmcs12->vm_exit_msr_store_addr; 1058 struct vmx_msr_entry e; 1059 u32 i; 1060 1061 for (i = 0; i < count; i++) { 1062 if (!read_and_check_msr_entry(vcpu, gpa, i, &e)) 1063 return false; 1064 1065 if (e.index == msr_index) 1066 return true; 1067 } 1068 return false; 1069 } 1070 1071 static void prepare_vmx_msr_autostore_list(struct kvm_vcpu *vcpu, 1072 u32 msr_index) 1073 { 1074 struct vcpu_vmx *vmx = to_vmx(vcpu); 1075 struct vmx_msrs *autostore = &vmx->msr_autostore.guest; 1076 bool in_vmcs12_store_list; 1077 int msr_autostore_slot; 1078 bool in_autostore_list; 1079 int last; 1080 1081 msr_autostore_slot = vmx_find_loadstore_msr_slot(autostore, msr_index); 1082 in_autostore_list = msr_autostore_slot >= 0; 1083 in_vmcs12_store_list = nested_msr_store_list_has_msr(vcpu, msr_index); 1084 1085 if (in_vmcs12_store_list && !in_autostore_list) { 1086 if (autostore->nr == MAX_NR_LOADSTORE_MSRS) { 1087 /* 1088 * Emulated VMEntry does not fail here. Instead a less 1089 * accurate value will be returned by 1090 * nested_vmx_get_vmexit_msr_value() using kvm_get_msr() 1091 * instead of reading the value from the vmcs02 VMExit 1092 * MSR-store area. 1093 */ 1094 pr_warn_ratelimited( 1095 "Not enough msr entries in msr_autostore. Can't add msr %x\n", 1096 msr_index); 1097 return; 1098 } 1099 last = autostore->nr++; 1100 autostore->val[last].index = msr_index; 1101 } else if (!in_vmcs12_store_list && in_autostore_list) { 1102 last = --autostore->nr; 1103 autostore->val[msr_autostore_slot] = autostore->val[last]; 1104 } 1105 } 1106 1107 /* 1108 * Load guest's/host's cr3 at nested entry/exit. @nested_ept is true if we are 1109 * emulating VM-Entry into a guest with EPT enabled. On failure, the expected 1110 * Exit Qualification (for a VM-Entry consistency check VM-Exit) is assigned to 1111 * @entry_failure_code. 1112 */ 1113 static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, 1114 bool nested_ept, bool reload_pdptrs, 1115 enum vm_entry_failure_code *entry_failure_code) 1116 { 1117 if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3))) { 1118 *entry_failure_code = ENTRY_FAIL_DEFAULT; 1119 return -EINVAL; 1120 } 1121 1122 /* 1123 * If PAE paging and EPT are both on, CR3 is not used by the CPU and 1124 * must not be dereferenced. 1125 */ 1126 if (reload_pdptrs && !nested_ept && is_pae_paging(vcpu) && 1127 CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))) { 1128 *entry_failure_code = ENTRY_FAIL_PDPTE; 1129 return -EINVAL; 1130 } 1131 1132 if (!nested_ept) 1133 kvm_mmu_new_pgd(vcpu, cr3); 1134 1135 vcpu->arch.cr3 = cr3; 1136 kvm_register_mark_available(vcpu, VCPU_EXREG_CR3); 1137 1138 /* Re-initialize the MMU, e.g. to pick up CR4 MMU role changes. */ 1139 kvm_init_mmu(vcpu); 1140 1141 return 0; 1142 } 1143 1144 /* 1145 * Returns if KVM is able to config CPU to tag TLB entries 1146 * populated by L2 differently than TLB entries populated 1147 * by L1. 1148 * 1149 * If L0 uses EPT, L1 and L2 run with different EPTP because 1150 * guest_mode is part of kvm_mmu_page_role. Thus, TLB entries 1151 * are tagged with different EPTP. 1152 * 1153 * If L1 uses VPID and we allocated a vpid02, TLB entries are tagged 1154 * with different VPID (L1 entries are tagged with vmx->vpid 1155 * while L2 entries are tagged with vmx->nested.vpid02). 1156 */ 1157 static bool nested_has_guest_tlb_tag(struct kvm_vcpu *vcpu) 1158 { 1159 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 1160 1161 return enable_ept || 1162 (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02); 1163 } 1164 1165 static void nested_vmx_transition_tlb_flush(struct kvm_vcpu *vcpu, 1166 struct vmcs12 *vmcs12, 1167 bool is_vmenter) 1168 { 1169 struct vcpu_vmx *vmx = to_vmx(vcpu); 1170 1171 /* 1172 * If vmcs12 doesn't use VPID, L1 expects linear and combined mappings 1173 * for *all* contexts to be flushed on VM-Enter/VM-Exit, i.e. it's a 1174 * full TLB flush from the guest's perspective. This is required even 1175 * if VPID is disabled in the host as KVM may need to synchronize the 1176 * MMU in response to the guest TLB flush. 1177 * 1178 * Note, using TLB_FLUSH_GUEST is correct even if nested EPT is in use. 1179 * EPT is a special snowflake, as guest-physical mappings aren't 1180 * flushed on VPID invalidations, including VM-Enter or VM-Exit with 1181 * VPID disabled. As a result, KVM _never_ needs to sync nEPT 1182 * entries on VM-Enter because L1 can't rely on VM-Enter to flush 1183 * those mappings. 1184 */ 1185 if (!nested_cpu_has_vpid(vmcs12)) { 1186 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 1187 return; 1188 } 1189 1190 /* L2 should never have a VPID if VPID is disabled. */ 1191 WARN_ON(!enable_vpid); 1192 1193 /* 1194 * If VPID is enabled and used by vmc12, but L2 does not have a unique 1195 * TLB tag (ASID), i.e. EPT is disabled and KVM was unable to allocate 1196 * a VPID for L2, flush the current context as the effective ASID is 1197 * common to both L1 and L2. 1198 * 1199 * Defer the flush so that it runs after vmcs02.EPTP has been set by 1200 * KVM_REQ_LOAD_MMU_PGD (if nested EPT is enabled) and to avoid 1201 * redundant flushes further down the nested pipeline. 1202 * 1203 * If a TLB flush isn't required due to any of the above, and vpid12 is 1204 * changing then the new "virtual" VPID (vpid12) will reuse the same 1205 * "real" VPID (vpid02), and so needs to be flushed. There's no direct 1206 * mapping between vpid02 and vpid12, vpid02 is per-vCPU and reused for 1207 * all nested vCPUs. Remember, a flush on VM-Enter does not invalidate 1208 * guest-physical mappings, so there is no need to sync the nEPT MMU. 1209 */ 1210 if (!nested_has_guest_tlb_tag(vcpu)) { 1211 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); 1212 } else if (is_vmenter && 1213 vmcs12->virtual_processor_id != vmx->nested.last_vpid) { 1214 vmx->nested.last_vpid = vmcs12->virtual_processor_id; 1215 vpid_sync_context(nested_get_vpid02(vcpu)); 1216 } 1217 } 1218 1219 static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask) 1220 { 1221 superset &= mask; 1222 subset &= mask; 1223 1224 return (superset | subset) == superset; 1225 } 1226 1227 static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data) 1228 { 1229 const u64 feature_and_reserved = 1230 /* feature (except bit 48; see below) */ 1231 BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) | 1232 /* reserved */ 1233 BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56); 1234 u64 vmx_basic = vmx->nested.msrs.basic; 1235 1236 if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved)) 1237 return -EINVAL; 1238 1239 /* 1240 * KVM does not emulate a version of VMX that constrains physical 1241 * addresses of VMX structures (e.g. VMCS) to 32-bits. 1242 */ 1243 if (data & BIT_ULL(48)) 1244 return -EINVAL; 1245 1246 if (vmx_basic_vmcs_revision_id(vmx_basic) != 1247 vmx_basic_vmcs_revision_id(data)) 1248 return -EINVAL; 1249 1250 if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data)) 1251 return -EINVAL; 1252 1253 vmx->nested.msrs.basic = data; 1254 return 0; 1255 } 1256 1257 static int 1258 vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) 1259 { 1260 u64 supported; 1261 u32 *lowp, *highp; 1262 1263 switch (msr_index) { 1264 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: 1265 lowp = &vmx->nested.msrs.pinbased_ctls_low; 1266 highp = &vmx->nested.msrs.pinbased_ctls_high; 1267 break; 1268 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: 1269 lowp = &vmx->nested.msrs.procbased_ctls_low; 1270 highp = &vmx->nested.msrs.procbased_ctls_high; 1271 break; 1272 case MSR_IA32_VMX_TRUE_EXIT_CTLS: 1273 lowp = &vmx->nested.msrs.exit_ctls_low; 1274 highp = &vmx->nested.msrs.exit_ctls_high; 1275 break; 1276 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: 1277 lowp = &vmx->nested.msrs.entry_ctls_low; 1278 highp = &vmx->nested.msrs.entry_ctls_high; 1279 break; 1280 case MSR_IA32_VMX_PROCBASED_CTLS2: 1281 lowp = &vmx->nested.msrs.secondary_ctls_low; 1282 highp = &vmx->nested.msrs.secondary_ctls_high; 1283 break; 1284 default: 1285 BUG(); 1286 } 1287 1288 supported = vmx_control_msr(*lowp, *highp); 1289 1290 /* Check must-be-1 bits are still 1. */ 1291 if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0))) 1292 return -EINVAL; 1293 1294 /* Check must-be-0 bits are still 0. */ 1295 if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32))) 1296 return -EINVAL; 1297 1298 *lowp = data; 1299 *highp = data >> 32; 1300 return 0; 1301 } 1302 1303 static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data) 1304 { 1305 const u64 feature_and_reserved_bits = 1306 /* feature */ 1307 BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) | 1308 BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) | 1309 /* reserved */ 1310 GENMASK_ULL(13, 9) | BIT_ULL(31); 1311 u64 vmx_misc; 1312 1313 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low, 1314 vmx->nested.msrs.misc_high); 1315 1316 if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits)) 1317 return -EINVAL; 1318 1319 if ((vmx->nested.msrs.pinbased_ctls_high & 1320 PIN_BASED_VMX_PREEMPTION_TIMER) && 1321 vmx_misc_preemption_timer_rate(data) != 1322 vmx_misc_preemption_timer_rate(vmx_misc)) 1323 return -EINVAL; 1324 1325 if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc)) 1326 return -EINVAL; 1327 1328 if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc)) 1329 return -EINVAL; 1330 1331 if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc)) 1332 return -EINVAL; 1333 1334 vmx->nested.msrs.misc_low = data; 1335 vmx->nested.msrs.misc_high = data >> 32; 1336 1337 return 0; 1338 } 1339 1340 static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data) 1341 { 1342 u64 vmx_ept_vpid_cap; 1343 1344 vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.msrs.ept_caps, 1345 vmx->nested.msrs.vpid_caps); 1346 1347 /* Every bit is either reserved or a feature bit. */ 1348 if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL)) 1349 return -EINVAL; 1350 1351 vmx->nested.msrs.ept_caps = data; 1352 vmx->nested.msrs.vpid_caps = data >> 32; 1353 return 0; 1354 } 1355 1356 static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) 1357 { 1358 u64 *msr; 1359 1360 switch (msr_index) { 1361 case MSR_IA32_VMX_CR0_FIXED0: 1362 msr = &vmx->nested.msrs.cr0_fixed0; 1363 break; 1364 case MSR_IA32_VMX_CR4_FIXED0: 1365 msr = &vmx->nested.msrs.cr4_fixed0; 1366 break; 1367 default: 1368 BUG(); 1369 } 1370 1371 /* 1372 * 1 bits (which indicates bits which "must-be-1" during VMX operation) 1373 * must be 1 in the restored value. 1374 */ 1375 if (!is_bitwise_subset(data, *msr, -1ULL)) 1376 return -EINVAL; 1377 1378 *msr = data; 1379 return 0; 1380 } 1381 1382 /* 1383 * Called when userspace is restoring VMX MSRs. 1384 * 1385 * Returns 0 on success, non-0 otherwise. 1386 */ 1387 int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) 1388 { 1389 struct vcpu_vmx *vmx = to_vmx(vcpu); 1390 1391 /* 1392 * Don't allow changes to the VMX capability MSRs while the vCPU 1393 * is in VMX operation. 1394 */ 1395 if (vmx->nested.vmxon) 1396 return -EBUSY; 1397 1398 switch (msr_index) { 1399 case MSR_IA32_VMX_BASIC: 1400 return vmx_restore_vmx_basic(vmx, data); 1401 case MSR_IA32_VMX_PINBASED_CTLS: 1402 case MSR_IA32_VMX_PROCBASED_CTLS: 1403 case MSR_IA32_VMX_EXIT_CTLS: 1404 case MSR_IA32_VMX_ENTRY_CTLS: 1405 /* 1406 * The "non-true" VMX capability MSRs are generated from the 1407 * "true" MSRs, so we do not support restoring them directly. 1408 * 1409 * If userspace wants to emulate VMX_BASIC[55]=0, userspace 1410 * should restore the "true" MSRs with the must-be-1 bits 1411 * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND 1412 * DEFAULT SETTINGS". 1413 */ 1414 return -EINVAL; 1415 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: 1416 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: 1417 case MSR_IA32_VMX_TRUE_EXIT_CTLS: 1418 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: 1419 case MSR_IA32_VMX_PROCBASED_CTLS2: 1420 return vmx_restore_control_msr(vmx, msr_index, data); 1421 case MSR_IA32_VMX_MISC: 1422 return vmx_restore_vmx_misc(vmx, data); 1423 case MSR_IA32_VMX_CR0_FIXED0: 1424 case MSR_IA32_VMX_CR4_FIXED0: 1425 return vmx_restore_fixed0_msr(vmx, msr_index, data); 1426 case MSR_IA32_VMX_CR0_FIXED1: 1427 case MSR_IA32_VMX_CR4_FIXED1: 1428 /* 1429 * These MSRs are generated based on the vCPU's CPUID, so we 1430 * do not support restoring them directly. 1431 */ 1432 return -EINVAL; 1433 case MSR_IA32_VMX_EPT_VPID_CAP: 1434 return vmx_restore_vmx_ept_vpid_cap(vmx, data); 1435 case MSR_IA32_VMX_VMCS_ENUM: 1436 vmx->nested.msrs.vmcs_enum = data; 1437 return 0; 1438 case MSR_IA32_VMX_VMFUNC: 1439 if (data & ~vmx->nested.msrs.vmfunc_controls) 1440 return -EINVAL; 1441 vmx->nested.msrs.vmfunc_controls = data; 1442 return 0; 1443 default: 1444 /* 1445 * The rest of the VMX capability MSRs do not support restore. 1446 */ 1447 return -EINVAL; 1448 } 1449 } 1450 1451 /* Returns 0 on success, non-0 otherwise. */ 1452 int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata) 1453 { 1454 switch (msr_index) { 1455 case MSR_IA32_VMX_BASIC: 1456 *pdata = msrs->basic; 1457 break; 1458 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: 1459 case MSR_IA32_VMX_PINBASED_CTLS: 1460 *pdata = vmx_control_msr( 1461 msrs->pinbased_ctls_low, 1462 msrs->pinbased_ctls_high); 1463 if (msr_index == MSR_IA32_VMX_PINBASED_CTLS) 1464 *pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR; 1465 break; 1466 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: 1467 case MSR_IA32_VMX_PROCBASED_CTLS: 1468 *pdata = vmx_control_msr( 1469 msrs->procbased_ctls_low, 1470 msrs->procbased_ctls_high); 1471 if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS) 1472 *pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR; 1473 break; 1474 case MSR_IA32_VMX_TRUE_EXIT_CTLS: 1475 case MSR_IA32_VMX_EXIT_CTLS: 1476 *pdata = vmx_control_msr( 1477 msrs->exit_ctls_low, 1478 msrs->exit_ctls_high); 1479 if (msr_index == MSR_IA32_VMX_EXIT_CTLS) 1480 *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR; 1481 break; 1482 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: 1483 case MSR_IA32_VMX_ENTRY_CTLS: 1484 *pdata = vmx_control_msr( 1485 msrs->entry_ctls_low, 1486 msrs->entry_ctls_high); 1487 if (msr_index == MSR_IA32_VMX_ENTRY_CTLS) 1488 *pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR; 1489 break; 1490 case MSR_IA32_VMX_MISC: 1491 *pdata = vmx_control_msr( 1492 msrs->misc_low, 1493 msrs->misc_high); 1494 break; 1495 case MSR_IA32_VMX_CR0_FIXED0: 1496 *pdata = msrs->cr0_fixed0; 1497 break; 1498 case MSR_IA32_VMX_CR0_FIXED1: 1499 *pdata = msrs->cr0_fixed1; 1500 break; 1501 case MSR_IA32_VMX_CR4_FIXED0: 1502 *pdata = msrs->cr4_fixed0; 1503 break; 1504 case MSR_IA32_VMX_CR4_FIXED1: 1505 *pdata = msrs->cr4_fixed1; 1506 break; 1507 case MSR_IA32_VMX_VMCS_ENUM: 1508 *pdata = msrs->vmcs_enum; 1509 break; 1510 case MSR_IA32_VMX_PROCBASED_CTLS2: 1511 *pdata = vmx_control_msr( 1512 msrs->secondary_ctls_low, 1513 msrs->secondary_ctls_high); 1514 break; 1515 case MSR_IA32_VMX_EPT_VPID_CAP: 1516 *pdata = msrs->ept_caps | 1517 ((u64)msrs->vpid_caps << 32); 1518 break; 1519 case MSR_IA32_VMX_VMFUNC: 1520 *pdata = msrs->vmfunc_controls; 1521 break; 1522 default: 1523 return 1; 1524 } 1525 1526 return 0; 1527 } 1528 1529 /* 1530 * Copy the writable VMCS shadow fields back to the VMCS12, in case they have 1531 * been modified by the L1 guest. Note, "writable" in this context means 1532 * "writable by the guest", i.e. tagged SHADOW_FIELD_RW; the set of 1533 * fields tagged SHADOW_FIELD_RO may or may not align with the "read-only" 1534 * VM-exit information fields (which are actually writable if the vCPU is 1535 * configured to support "VMWRITE to any supported field in the VMCS"). 1536 */ 1537 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx) 1538 { 1539 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; 1540 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu); 1541 struct shadow_vmcs_field field; 1542 unsigned long val; 1543 int i; 1544 1545 if (WARN_ON(!shadow_vmcs)) 1546 return; 1547 1548 preempt_disable(); 1549 1550 vmcs_load(shadow_vmcs); 1551 1552 for (i = 0; i < max_shadow_read_write_fields; i++) { 1553 field = shadow_read_write_fields[i]; 1554 val = __vmcs_readl(field.encoding); 1555 vmcs12_write_any(vmcs12, field.encoding, field.offset, val); 1556 } 1557 1558 vmcs_clear(shadow_vmcs); 1559 vmcs_load(vmx->loaded_vmcs->vmcs); 1560 1561 preempt_enable(); 1562 } 1563 1564 static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx) 1565 { 1566 const struct shadow_vmcs_field *fields[] = { 1567 shadow_read_write_fields, 1568 shadow_read_only_fields 1569 }; 1570 const int max_fields[] = { 1571 max_shadow_read_write_fields, 1572 max_shadow_read_only_fields 1573 }; 1574 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; 1575 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu); 1576 struct shadow_vmcs_field field; 1577 unsigned long val; 1578 int i, q; 1579 1580 if (WARN_ON(!shadow_vmcs)) 1581 return; 1582 1583 vmcs_load(shadow_vmcs); 1584 1585 for (q = 0; q < ARRAY_SIZE(fields); q++) { 1586 for (i = 0; i < max_fields[q]; i++) { 1587 field = fields[q][i]; 1588 val = vmcs12_read_any(vmcs12, field.encoding, 1589 field.offset); 1590 __vmcs_writel(field.encoding, val); 1591 } 1592 } 1593 1594 vmcs_clear(shadow_vmcs); 1595 vmcs_load(vmx->loaded_vmcs->vmcs); 1596 } 1597 1598 static void copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx, u32 hv_clean_fields) 1599 { 1600 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; 1601 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; 1602 1603 /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */ 1604 vmcs12->tpr_threshold = evmcs->tpr_threshold; 1605 vmcs12->guest_rip = evmcs->guest_rip; 1606 1607 if (unlikely(!(hv_clean_fields & 1608 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC))) { 1609 vmcs12->guest_rsp = evmcs->guest_rsp; 1610 vmcs12->guest_rflags = evmcs->guest_rflags; 1611 vmcs12->guest_interruptibility_info = 1612 evmcs->guest_interruptibility_info; 1613 } 1614 1615 if (unlikely(!(hv_clean_fields & 1616 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) { 1617 vmcs12->cpu_based_vm_exec_control = 1618 evmcs->cpu_based_vm_exec_control; 1619 } 1620 1621 if (unlikely(!(hv_clean_fields & 1622 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EXCPN))) { 1623 vmcs12->exception_bitmap = evmcs->exception_bitmap; 1624 } 1625 1626 if (unlikely(!(hv_clean_fields & 1627 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY))) { 1628 vmcs12->vm_entry_controls = evmcs->vm_entry_controls; 1629 } 1630 1631 if (unlikely(!(hv_clean_fields & 1632 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT))) { 1633 vmcs12->vm_entry_intr_info_field = 1634 evmcs->vm_entry_intr_info_field; 1635 vmcs12->vm_entry_exception_error_code = 1636 evmcs->vm_entry_exception_error_code; 1637 vmcs12->vm_entry_instruction_len = 1638 evmcs->vm_entry_instruction_len; 1639 } 1640 1641 if (unlikely(!(hv_clean_fields & 1642 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) { 1643 vmcs12->host_ia32_pat = evmcs->host_ia32_pat; 1644 vmcs12->host_ia32_efer = evmcs->host_ia32_efer; 1645 vmcs12->host_cr0 = evmcs->host_cr0; 1646 vmcs12->host_cr3 = evmcs->host_cr3; 1647 vmcs12->host_cr4 = evmcs->host_cr4; 1648 vmcs12->host_ia32_sysenter_esp = evmcs->host_ia32_sysenter_esp; 1649 vmcs12->host_ia32_sysenter_eip = evmcs->host_ia32_sysenter_eip; 1650 vmcs12->host_rip = evmcs->host_rip; 1651 vmcs12->host_ia32_sysenter_cs = evmcs->host_ia32_sysenter_cs; 1652 vmcs12->host_es_selector = evmcs->host_es_selector; 1653 vmcs12->host_cs_selector = evmcs->host_cs_selector; 1654 vmcs12->host_ss_selector = evmcs->host_ss_selector; 1655 vmcs12->host_ds_selector = evmcs->host_ds_selector; 1656 vmcs12->host_fs_selector = evmcs->host_fs_selector; 1657 vmcs12->host_gs_selector = evmcs->host_gs_selector; 1658 vmcs12->host_tr_selector = evmcs->host_tr_selector; 1659 } 1660 1661 if (unlikely(!(hv_clean_fields & 1662 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP1))) { 1663 vmcs12->pin_based_vm_exec_control = 1664 evmcs->pin_based_vm_exec_control; 1665 vmcs12->vm_exit_controls = evmcs->vm_exit_controls; 1666 vmcs12->secondary_vm_exec_control = 1667 evmcs->secondary_vm_exec_control; 1668 } 1669 1670 if (unlikely(!(hv_clean_fields & 1671 HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP))) { 1672 vmcs12->io_bitmap_a = evmcs->io_bitmap_a; 1673 vmcs12->io_bitmap_b = evmcs->io_bitmap_b; 1674 } 1675 1676 if (unlikely(!(hv_clean_fields & 1677 HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP))) { 1678 vmcs12->msr_bitmap = evmcs->msr_bitmap; 1679 } 1680 1681 if (unlikely(!(hv_clean_fields & 1682 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2))) { 1683 vmcs12->guest_es_base = evmcs->guest_es_base; 1684 vmcs12->guest_cs_base = evmcs->guest_cs_base; 1685 vmcs12->guest_ss_base = evmcs->guest_ss_base; 1686 vmcs12->guest_ds_base = evmcs->guest_ds_base; 1687 vmcs12->guest_fs_base = evmcs->guest_fs_base; 1688 vmcs12->guest_gs_base = evmcs->guest_gs_base; 1689 vmcs12->guest_ldtr_base = evmcs->guest_ldtr_base; 1690 vmcs12->guest_tr_base = evmcs->guest_tr_base; 1691 vmcs12->guest_gdtr_base = evmcs->guest_gdtr_base; 1692 vmcs12->guest_idtr_base = evmcs->guest_idtr_base; 1693 vmcs12->guest_es_limit = evmcs->guest_es_limit; 1694 vmcs12->guest_cs_limit = evmcs->guest_cs_limit; 1695 vmcs12->guest_ss_limit = evmcs->guest_ss_limit; 1696 vmcs12->guest_ds_limit = evmcs->guest_ds_limit; 1697 vmcs12->guest_fs_limit = evmcs->guest_fs_limit; 1698 vmcs12->guest_gs_limit = evmcs->guest_gs_limit; 1699 vmcs12->guest_ldtr_limit = evmcs->guest_ldtr_limit; 1700 vmcs12->guest_tr_limit = evmcs->guest_tr_limit; 1701 vmcs12->guest_gdtr_limit = evmcs->guest_gdtr_limit; 1702 vmcs12->guest_idtr_limit = evmcs->guest_idtr_limit; 1703 vmcs12->guest_es_ar_bytes = evmcs->guest_es_ar_bytes; 1704 vmcs12->guest_cs_ar_bytes = evmcs->guest_cs_ar_bytes; 1705 vmcs12->guest_ss_ar_bytes = evmcs->guest_ss_ar_bytes; 1706 vmcs12->guest_ds_ar_bytes = evmcs->guest_ds_ar_bytes; 1707 vmcs12->guest_fs_ar_bytes = evmcs->guest_fs_ar_bytes; 1708 vmcs12->guest_gs_ar_bytes = evmcs->guest_gs_ar_bytes; 1709 vmcs12->guest_ldtr_ar_bytes = evmcs->guest_ldtr_ar_bytes; 1710 vmcs12->guest_tr_ar_bytes = evmcs->guest_tr_ar_bytes; 1711 vmcs12->guest_es_selector = evmcs->guest_es_selector; 1712 vmcs12->guest_cs_selector = evmcs->guest_cs_selector; 1713 vmcs12->guest_ss_selector = evmcs->guest_ss_selector; 1714 vmcs12->guest_ds_selector = evmcs->guest_ds_selector; 1715 vmcs12->guest_fs_selector = evmcs->guest_fs_selector; 1716 vmcs12->guest_gs_selector = evmcs->guest_gs_selector; 1717 vmcs12->guest_ldtr_selector = evmcs->guest_ldtr_selector; 1718 vmcs12->guest_tr_selector = evmcs->guest_tr_selector; 1719 } 1720 1721 if (unlikely(!(hv_clean_fields & 1722 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2))) { 1723 vmcs12->tsc_offset = evmcs->tsc_offset; 1724 vmcs12->virtual_apic_page_addr = evmcs->virtual_apic_page_addr; 1725 vmcs12->xss_exit_bitmap = evmcs->xss_exit_bitmap; 1726 } 1727 1728 if (unlikely(!(hv_clean_fields & 1729 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR))) { 1730 vmcs12->cr0_guest_host_mask = evmcs->cr0_guest_host_mask; 1731 vmcs12->cr4_guest_host_mask = evmcs->cr4_guest_host_mask; 1732 vmcs12->cr0_read_shadow = evmcs->cr0_read_shadow; 1733 vmcs12->cr4_read_shadow = evmcs->cr4_read_shadow; 1734 vmcs12->guest_cr0 = evmcs->guest_cr0; 1735 vmcs12->guest_cr3 = evmcs->guest_cr3; 1736 vmcs12->guest_cr4 = evmcs->guest_cr4; 1737 vmcs12->guest_dr7 = evmcs->guest_dr7; 1738 } 1739 1740 if (unlikely(!(hv_clean_fields & 1741 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER))) { 1742 vmcs12->host_fs_base = evmcs->host_fs_base; 1743 vmcs12->host_gs_base = evmcs->host_gs_base; 1744 vmcs12->host_tr_base = evmcs->host_tr_base; 1745 vmcs12->host_gdtr_base = evmcs->host_gdtr_base; 1746 vmcs12->host_idtr_base = evmcs->host_idtr_base; 1747 vmcs12->host_rsp = evmcs->host_rsp; 1748 } 1749 1750 if (unlikely(!(hv_clean_fields & 1751 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT))) { 1752 vmcs12->ept_pointer = evmcs->ept_pointer; 1753 vmcs12->virtual_processor_id = evmcs->virtual_processor_id; 1754 } 1755 1756 if (unlikely(!(hv_clean_fields & 1757 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1))) { 1758 vmcs12->vmcs_link_pointer = evmcs->vmcs_link_pointer; 1759 vmcs12->guest_ia32_debugctl = evmcs->guest_ia32_debugctl; 1760 vmcs12->guest_ia32_pat = evmcs->guest_ia32_pat; 1761 vmcs12->guest_ia32_efer = evmcs->guest_ia32_efer; 1762 vmcs12->guest_pdptr0 = evmcs->guest_pdptr0; 1763 vmcs12->guest_pdptr1 = evmcs->guest_pdptr1; 1764 vmcs12->guest_pdptr2 = evmcs->guest_pdptr2; 1765 vmcs12->guest_pdptr3 = evmcs->guest_pdptr3; 1766 vmcs12->guest_pending_dbg_exceptions = 1767 evmcs->guest_pending_dbg_exceptions; 1768 vmcs12->guest_sysenter_esp = evmcs->guest_sysenter_esp; 1769 vmcs12->guest_sysenter_eip = evmcs->guest_sysenter_eip; 1770 vmcs12->guest_bndcfgs = evmcs->guest_bndcfgs; 1771 vmcs12->guest_activity_state = evmcs->guest_activity_state; 1772 vmcs12->guest_sysenter_cs = evmcs->guest_sysenter_cs; 1773 } 1774 1775 /* 1776 * Not used? 1777 * vmcs12->vm_exit_msr_store_addr = evmcs->vm_exit_msr_store_addr; 1778 * vmcs12->vm_exit_msr_load_addr = evmcs->vm_exit_msr_load_addr; 1779 * vmcs12->vm_entry_msr_load_addr = evmcs->vm_entry_msr_load_addr; 1780 * vmcs12->page_fault_error_code_mask = 1781 * evmcs->page_fault_error_code_mask; 1782 * vmcs12->page_fault_error_code_match = 1783 * evmcs->page_fault_error_code_match; 1784 * vmcs12->cr3_target_count = evmcs->cr3_target_count; 1785 * vmcs12->vm_exit_msr_store_count = evmcs->vm_exit_msr_store_count; 1786 * vmcs12->vm_exit_msr_load_count = evmcs->vm_exit_msr_load_count; 1787 * vmcs12->vm_entry_msr_load_count = evmcs->vm_entry_msr_load_count; 1788 */ 1789 1790 /* 1791 * Read only fields: 1792 * vmcs12->guest_physical_address = evmcs->guest_physical_address; 1793 * vmcs12->vm_instruction_error = evmcs->vm_instruction_error; 1794 * vmcs12->vm_exit_reason = evmcs->vm_exit_reason; 1795 * vmcs12->vm_exit_intr_info = evmcs->vm_exit_intr_info; 1796 * vmcs12->vm_exit_intr_error_code = evmcs->vm_exit_intr_error_code; 1797 * vmcs12->idt_vectoring_info_field = evmcs->idt_vectoring_info_field; 1798 * vmcs12->idt_vectoring_error_code = evmcs->idt_vectoring_error_code; 1799 * vmcs12->vm_exit_instruction_len = evmcs->vm_exit_instruction_len; 1800 * vmcs12->vmx_instruction_info = evmcs->vmx_instruction_info; 1801 * vmcs12->exit_qualification = evmcs->exit_qualification; 1802 * vmcs12->guest_linear_address = evmcs->guest_linear_address; 1803 * 1804 * Not present in struct vmcs12: 1805 * vmcs12->exit_io_instruction_ecx = evmcs->exit_io_instruction_ecx; 1806 * vmcs12->exit_io_instruction_esi = evmcs->exit_io_instruction_esi; 1807 * vmcs12->exit_io_instruction_edi = evmcs->exit_io_instruction_edi; 1808 * vmcs12->exit_io_instruction_eip = evmcs->exit_io_instruction_eip; 1809 */ 1810 1811 return; 1812 } 1813 1814 static void copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx) 1815 { 1816 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; 1817 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; 1818 1819 /* 1820 * Should not be changed by KVM: 1821 * 1822 * evmcs->host_es_selector = vmcs12->host_es_selector; 1823 * evmcs->host_cs_selector = vmcs12->host_cs_selector; 1824 * evmcs->host_ss_selector = vmcs12->host_ss_selector; 1825 * evmcs->host_ds_selector = vmcs12->host_ds_selector; 1826 * evmcs->host_fs_selector = vmcs12->host_fs_selector; 1827 * evmcs->host_gs_selector = vmcs12->host_gs_selector; 1828 * evmcs->host_tr_selector = vmcs12->host_tr_selector; 1829 * evmcs->host_ia32_pat = vmcs12->host_ia32_pat; 1830 * evmcs->host_ia32_efer = vmcs12->host_ia32_efer; 1831 * evmcs->host_cr0 = vmcs12->host_cr0; 1832 * evmcs->host_cr3 = vmcs12->host_cr3; 1833 * evmcs->host_cr4 = vmcs12->host_cr4; 1834 * evmcs->host_ia32_sysenter_esp = vmcs12->host_ia32_sysenter_esp; 1835 * evmcs->host_ia32_sysenter_eip = vmcs12->host_ia32_sysenter_eip; 1836 * evmcs->host_rip = vmcs12->host_rip; 1837 * evmcs->host_ia32_sysenter_cs = vmcs12->host_ia32_sysenter_cs; 1838 * evmcs->host_fs_base = vmcs12->host_fs_base; 1839 * evmcs->host_gs_base = vmcs12->host_gs_base; 1840 * evmcs->host_tr_base = vmcs12->host_tr_base; 1841 * evmcs->host_gdtr_base = vmcs12->host_gdtr_base; 1842 * evmcs->host_idtr_base = vmcs12->host_idtr_base; 1843 * evmcs->host_rsp = vmcs12->host_rsp; 1844 * sync_vmcs02_to_vmcs12() doesn't read these: 1845 * evmcs->io_bitmap_a = vmcs12->io_bitmap_a; 1846 * evmcs->io_bitmap_b = vmcs12->io_bitmap_b; 1847 * evmcs->msr_bitmap = vmcs12->msr_bitmap; 1848 * evmcs->ept_pointer = vmcs12->ept_pointer; 1849 * evmcs->xss_exit_bitmap = vmcs12->xss_exit_bitmap; 1850 * evmcs->vm_exit_msr_store_addr = vmcs12->vm_exit_msr_store_addr; 1851 * evmcs->vm_exit_msr_load_addr = vmcs12->vm_exit_msr_load_addr; 1852 * evmcs->vm_entry_msr_load_addr = vmcs12->vm_entry_msr_load_addr; 1853 * evmcs->tpr_threshold = vmcs12->tpr_threshold; 1854 * evmcs->virtual_processor_id = vmcs12->virtual_processor_id; 1855 * evmcs->exception_bitmap = vmcs12->exception_bitmap; 1856 * evmcs->vmcs_link_pointer = vmcs12->vmcs_link_pointer; 1857 * evmcs->pin_based_vm_exec_control = vmcs12->pin_based_vm_exec_control; 1858 * evmcs->vm_exit_controls = vmcs12->vm_exit_controls; 1859 * evmcs->secondary_vm_exec_control = vmcs12->secondary_vm_exec_control; 1860 * evmcs->page_fault_error_code_mask = 1861 * vmcs12->page_fault_error_code_mask; 1862 * evmcs->page_fault_error_code_match = 1863 * vmcs12->page_fault_error_code_match; 1864 * evmcs->cr3_target_count = vmcs12->cr3_target_count; 1865 * evmcs->virtual_apic_page_addr = vmcs12->virtual_apic_page_addr; 1866 * evmcs->tsc_offset = vmcs12->tsc_offset; 1867 * evmcs->guest_ia32_debugctl = vmcs12->guest_ia32_debugctl; 1868 * evmcs->cr0_guest_host_mask = vmcs12->cr0_guest_host_mask; 1869 * evmcs->cr4_guest_host_mask = vmcs12->cr4_guest_host_mask; 1870 * evmcs->cr0_read_shadow = vmcs12->cr0_read_shadow; 1871 * evmcs->cr4_read_shadow = vmcs12->cr4_read_shadow; 1872 * evmcs->vm_exit_msr_store_count = vmcs12->vm_exit_msr_store_count; 1873 * evmcs->vm_exit_msr_load_count = vmcs12->vm_exit_msr_load_count; 1874 * evmcs->vm_entry_msr_load_count = vmcs12->vm_entry_msr_load_count; 1875 * 1876 * Not present in struct vmcs12: 1877 * evmcs->exit_io_instruction_ecx = vmcs12->exit_io_instruction_ecx; 1878 * evmcs->exit_io_instruction_esi = vmcs12->exit_io_instruction_esi; 1879 * evmcs->exit_io_instruction_edi = vmcs12->exit_io_instruction_edi; 1880 * evmcs->exit_io_instruction_eip = vmcs12->exit_io_instruction_eip; 1881 */ 1882 1883 evmcs->guest_es_selector = vmcs12->guest_es_selector; 1884 evmcs->guest_cs_selector = vmcs12->guest_cs_selector; 1885 evmcs->guest_ss_selector = vmcs12->guest_ss_selector; 1886 evmcs->guest_ds_selector = vmcs12->guest_ds_selector; 1887 evmcs->guest_fs_selector = vmcs12->guest_fs_selector; 1888 evmcs->guest_gs_selector = vmcs12->guest_gs_selector; 1889 evmcs->guest_ldtr_selector = vmcs12->guest_ldtr_selector; 1890 evmcs->guest_tr_selector = vmcs12->guest_tr_selector; 1891 1892 evmcs->guest_es_limit = vmcs12->guest_es_limit; 1893 evmcs->guest_cs_limit = vmcs12->guest_cs_limit; 1894 evmcs->guest_ss_limit = vmcs12->guest_ss_limit; 1895 evmcs->guest_ds_limit = vmcs12->guest_ds_limit; 1896 evmcs->guest_fs_limit = vmcs12->guest_fs_limit; 1897 evmcs->guest_gs_limit = vmcs12->guest_gs_limit; 1898 evmcs->guest_ldtr_limit = vmcs12->guest_ldtr_limit; 1899 evmcs->guest_tr_limit = vmcs12->guest_tr_limit; 1900 evmcs->guest_gdtr_limit = vmcs12->guest_gdtr_limit; 1901 evmcs->guest_idtr_limit = vmcs12->guest_idtr_limit; 1902 1903 evmcs->guest_es_ar_bytes = vmcs12->guest_es_ar_bytes; 1904 evmcs->guest_cs_ar_bytes = vmcs12->guest_cs_ar_bytes; 1905 evmcs->guest_ss_ar_bytes = vmcs12->guest_ss_ar_bytes; 1906 evmcs->guest_ds_ar_bytes = vmcs12->guest_ds_ar_bytes; 1907 evmcs->guest_fs_ar_bytes = vmcs12->guest_fs_ar_bytes; 1908 evmcs->guest_gs_ar_bytes = vmcs12->guest_gs_ar_bytes; 1909 evmcs->guest_ldtr_ar_bytes = vmcs12->guest_ldtr_ar_bytes; 1910 evmcs->guest_tr_ar_bytes = vmcs12->guest_tr_ar_bytes; 1911 1912 evmcs->guest_es_base = vmcs12->guest_es_base; 1913 evmcs->guest_cs_base = vmcs12->guest_cs_base; 1914 evmcs->guest_ss_base = vmcs12->guest_ss_base; 1915 evmcs->guest_ds_base = vmcs12->guest_ds_base; 1916 evmcs->guest_fs_base = vmcs12->guest_fs_base; 1917 evmcs->guest_gs_base = vmcs12->guest_gs_base; 1918 evmcs->guest_ldtr_base = vmcs12->guest_ldtr_base; 1919 evmcs->guest_tr_base = vmcs12->guest_tr_base; 1920 evmcs->guest_gdtr_base = vmcs12->guest_gdtr_base; 1921 evmcs->guest_idtr_base = vmcs12->guest_idtr_base; 1922 1923 evmcs->guest_ia32_pat = vmcs12->guest_ia32_pat; 1924 evmcs->guest_ia32_efer = vmcs12->guest_ia32_efer; 1925 1926 evmcs->guest_pdptr0 = vmcs12->guest_pdptr0; 1927 evmcs->guest_pdptr1 = vmcs12->guest_pdptr1; 1928 evmcs->guest_pdptr2 = vmcs12->guest_pdptr2; 1929 evmcs->guest_pdptr3 = vmcs12->guest_pdptr3; 1930 1931 evmcs->guest_pending_dbg_exceptions = 1932 vmcs12->guest_pending_dbg_exceptions; 1933 evmcs->guest_sysenter_esp = vmcs12->guest_sysenter_esp; 1934 evmcs->guest_sysenter_eip = vmcs12->guest_sysenter_eip; 1935 1936 evmcs->guest_activity_state = vmcs12->guest_activity_state; 1937 evmcs->guest_sysenter_cs = vmcs12->guest_sysenter_cs; 1938 1939 evmcs->guest_cr0 = vmcs12->guest_cr0; 1940 evmcs->guest_cr3 = vmcs12->guest_cr3; 1941 evmcs->guest_cr4 = vmcs12->guest_cr4; 1942 evmcs->guest_dr7 = vmcs12->guest_dr7; 1943 1944 evmcs->guest_physical_address = vmcs12->guest_physical_address; 1945 1946 evmcs->vm_instruction_error = vmcs12->vm_instruction_error; 1947 evmcs->vm_exit_reason = vmcs12->vm_exit_reason; 1948 evmcs->vm_exit_intr_info = vmcs12->vm_exit_intr_info; 1949 evmcs->vm_exit_intr_error_code = vmcs12->vm_exit_intr_error_code; 1950 evmcs->idt_vectoring_info_field = vmcs12->idt_vectoring_info_field; 1951 evmcs->idt_vectoring_error_code = vmcs12->idt_vectoring_error_code; 1952 evmcs->vm_exit_instruction_len = vmcs12->vm_exit_instruction_len; 1953 evmcs->vmx_instruction_info = vmcs12->vmx_instruction_info; 1954 1955 evmcs->exit_qualification = vmcs12->exit_qualification; 1956 1957 evmcs->guest_linear_address = vmcs12->guest_linear_address; 1958 evmcs->guest_rsp = vmcs12->guest_rsp; 1959 evmcs->guest_rflags = vmcs12->guest_rflags; 1960 1961 evmcs->guest_interruptibility_info = 1962 vmcs12->guest_interruptibility_info; 1963 evmcs->cpu_based_vm_exec_control = vmcs12->cpu_based_vm_exec_control; 1964 evmcs->vm_entry_controls = vmcs12->vm_entry_controls; 1965 evmcs->vm_entry_intr_info_field = vmcs12->vm_entry_intr_info_field; 1966 evmcs->vm_entry_exception_error_code = 1967 vmcs12->vm_entry_exception_error_code; 1968 evmcs->vm_entry_instruction_len = vmcs12->vm_entry_instruction_len; 1969 1970 evmcs->guest_rip = vmcs12->guest_rip; 1971 1972 evmcs->guest_bndcfgs = vmcs12->guest_bndcfgs; 1973 1974 return; 1975 } 1976 1977 /* 1978 * This is an equivalent of the nested hypervisor executing the vmptrld 1979 * instruction. 1980 */ 1981 static enum nested_evmptrld_status nested_vmx_handle_enlightened_vmptrld( 1982 struct kvm_vcpu *vcpu, bool from_launch) 1983 { 1984 struct vcpu_vmx *vmx = to_vmx(vcpu); 1985 bool evmcs_gpa_changed = false; 1986 u64 evmcs_gpa; 1987 1988 if (likely(!vmx->nested.enlightened_vmcs_enabled)) 1989 return EVMPTRLD_DISABLED; 1990 1991 if (!nested_enlightened_vmentry(vcpu, &evmcs_gpa)) { 1992 nested_release_evmcs(vcpu); 1993 return EVMPTRLD_DISABLED; 1994 } 1995 1996 if (unlikely(evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) { 1997 vmx->nested.current_vmptr = -1ull; 1998 1999 nested_release_evmcs(vcpu); 2000 2001 if (kvm_vcpu_map(vcpu, gpa_to_gfn(evmcs_gpa), 2002 &vmx->nested.hv_evmcs_map)) 2003 return EVMPTRLD_ERROR; 2004 2005 vmx->nested.hv_evmcs = vmx->nested.hv_evmcs_map.hva; 2006 2007 /* 2008 * Currently, KVM only supports eVMCS version 1 2009 * (== KVM_EVMCS_VERSION) and thus we expect guest to set this 2010 * value to first u32 field of eVMCS which should specify eVMCS 2011 * VersionNumber. 2012 * 2013 * Guest should be aware of supported eVMCS versions by host by 2014 * examining CPUID.0x4000000A.EAX[0:15]. Host userspace VMM is 2015 * expected to set this CPUID leaf according to the value 2016 * returned in vmcs_version from nested_enable_evmcs(). 2017 * 2018 * However, it turns out that Microsoft Hyper-V fails to comply 2019 * to their own invented interface: When Hyper-V use eVMCS, it 2020 * just sets first u32 field of eVMCS to revision_id specified 2021 * in MSR_IA32_VMX_BASIC. Instead of used eVMCS version number 2022 * which is one of the supported versions specified in 2023 * CPUID.0x4000000A.EAX[0:15]. 2024 * 2025 * To overcome Hyper-V bug, we accept here either a supported 2026 * eVMCS version or VMCS12 revision_id as valid values for first 2027 * u32 field of eVMCS. 2028 */ 2029 if ((vmx->nested.hv_evmcs->revision_id != KVM_EVMCS_VERSION) && 2030 (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION)) { 2031 nested_release_evmcs(vcpu); 2032 return EVMPTRLD_VMFAIL; 2033 } 2034 2035 vmx->nested.hv_evmcs_vmptr = evmcs_gpa; 2036 2037 evmcs_gpa_changed = true; 2038 /* 2039 * Unlike normal vmcs12, enlightened vmcs12 is not fully 2040 * reloaded from guest's memory (read only fields, fields not 2041 * present in struct hv_enlightened_vmcs, ...). Make sure there 2042 * are no leftovers. 2043 */ 2044 if (from_launch) { 2045 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 2046 memset(vmcs12, 0, sizeof(*vmcs12)); 2047 vmcs12->hdr.revision_id = VMCS12_REVISION; 2048 } 2049 2050 } 2051 2052 /* 2053 * Clean fields data can't be used on VMLAUNCH and when we switch 2054 * between different L2 guests as KVM keeps a single VMCS12 per L1. 2055 */ 2056 if (from_launch || evmcs_gpa_changed) 2057 vmx->nested.hv_evmcs->hv_clean_fields &= 2058 ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; 2059 2060 return EVMPTRLD_SUCCEEDED; 2061 } 2062 2063 void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu) 2064 { 2065 struct vcpu_vmx *vmx = to_vmx(vcpu); 2066 2067 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) 2068 copy_vmcs12_to_enlightened(vmx); 2069 else 2070 copy_vmcs12_to_shadow(vmx); 2071 2072 vmx->nested.need_vmcs12_to_shadow_sync = false; 2073 } 2074 2075 static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer) 2076 { 2077 struct vcpu_vmx *vmx = 2078 container_of(timer, struct vcpu_vmx, nested.preemption_timer); 2079 2080 vmx->nested.preemption_timer_expired = true; 2081 kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu); 2082 kvm_vcpu_kick(&vmx->vcpu); 2083 2084 return HRTIMER_NORESTART; 2085 } 2086 2087 static u64 vmx_calc_preemption_timer_value(struct kvm_vcpu *vcpu) 2088 { 2089 struct vcpu_vmx *vmx = to_vmx(vcpu); 2090 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 2091 2092 u64 l1_scaled_tsc = kvm_read_l1_tsc(vcpu, rdtsc()) >> 2093 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; 2094 2095 if (!vmx->nested.has_preemption_timer_deadline) { 2096 vmx->nested.preemption_timer_deadline = 2097 vmcs12->vmx_preemption_timer_value + l1_scaled_tsc; 2098 vmx->nested.has_preemption_timer_deadline = true; 2099 } 2100 return vmx->nested.preemption_timer_deadline - l1_scaled_tsc; 2101 } 2102 2103 static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu, 2104 u64 preemption_timeout) 2105 { 2106 struct vcpu_vmx *vmx = to_vmx(vcpu); 2107 2108 /* 2109 * A timer value of zero is architecturally guaranteed to cause 2110 * a VMExit prior to executing any instructions in the guest. 2111 */ 2112 if (preemption_timeout == 0) { 2113 vmx_preemption_timer_fn(&vmx->nested.preemption_timer); 2114 return; 2115 } 2116 2117 if (vcpu->arch.virtual_tsc_khz == 0) 2118 return; 2119 2120 preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; 2121 preemption_timeout *= 1000000; 2122 do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz); 2123 hrtimer_start(&vmx->nested.preemption_timer, 2124 ktime_add_ns(ktime_get(), preemption_timeout), 2125 HRTIMER_MODE_ABS_PINNED); 2126 } 2127 2128 static u64 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) 2129 { 2130 if (vmx->nested.nested_run_pending && 2131 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) 2132 return vmcs12->guest_ia32_efer; 2133 else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) 2134 return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME); 2135 else 2136 return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME); 2137 } 2138 2139 static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx) 2140 { 2141 /* 2142 * If vmcs02 hasn't been initialized, set the constant vmcs02 state 2143 * according to L0's settings (vmcs12 is irrelevant here). Host 2144 * fields that come from L0 and are not constant, e.g. HOST_CR3, 2145 * will be set as needed prior to VMLAUNCH/VMRESUME. 2146 */ 2147 if (vmx->nested.vmcs02_initialized) 2148 return; 2149 vmx->nested.vmcs02_initialized = true; 2150 2151 /* 2152 * We don't care what the EPTP value is we just need to guarantee 2153 * it's valid so we don't get a false positive when doing early 2154 * consistency checks. 2155 */ 2156 if (enable_ept && nested_early_check) 2157 vmcs_write64(EPT_POINTER, 2158 construct_eptp(&vmx->vcpu, 0, PT64_ROOT_4LEVEL)); 2159 2160 /* All VMFUNCs are currently emulated through L0 vmexits. */ 2161 if (cpu_has_vmx_vmfunc()) 2162 vmcs_write64(VM_FUNCTION_CONTROL, 0); 2163 2164 if (cpu_has_vmx_posted_intr()) 2165 vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR); 2166 2167 if (cpu_has_vmx_msr_bitmap()) 2168 vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap)); 2169 2170 /* 2171 * PML is emulated for L2, but never enabled in hardware as the MMU 2172 * handles A/D emulation. Disabling PML for L2 also avoids having to 2173 * deal with filtering out L2 GPAs from the buffer. 2174 */ 2175 if (enable_pml) { 2176 vmcs_write64(PML_ADDRESS, 0); 2177 vmcs_write16(GUEST_PML_INDEX, -1); 2178 } 2179 2180 if (cpu_has_vmx_encls_vmexit()) 2181 vmcs_write64(ENCLS_EXITING_BITMAP, -1ull); 2182 2183 /* 2184 * Set the MSR load/store lists to match L0's settings. Only the 2185 * addresses are constant (for vmcs02), the counts can change based 2186 * on L2's behavior, e.g. switching to/from long mode. 2187 */ 2188 vmcs_write64(VM_EXIT_MSR_STORE_ADDR, __pa(vmx->msr_autostore.guest.val)); 2189 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val)); 2190 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val)); 2191 2192 vmx_set_constant_host_state(vmx); 2193 } 2194 2195 static void prepare_vmcs02_early_rare(struct vcpu_vmx *vmx, 2196 struct vmcs12 *vmcs12) 2197 { 2198 prepare_vmcs02_constant_state(vmx); 2199 2200 vmcs_write64(VMCS_LINK_POINTER, -1ull); 2201 2202 if (enable_vpid) { 2203 if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) 2204 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02); 2205 else 2206 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); 2207 } 2208 } 2209 2210 static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) 2211 { 2212 u32 exec_control; 2213 u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12); 2214 2215 if (vmx->nested.dirty_vmcs12 || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) 2216 prepare_vmcs02_early_rare(vmx, vmcs12); 2217 2218 /* 2219 * PIN CONTROLS 2220 */ 2221 exec_control = vmx_pin_based_exec_ctrl(vmx); 2222 exec_control |= (vmcs12->pin_based_vm_exec_control & 2223 ~PIN_BASED_VMX_PREEMPTION_TIMER); 2224 2225 /* Posted interrupts setting is only taken from vmcs12. */ 2226 if (nested_cpu_has_posted_intr(vmcs12)) { 2227 vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv; 2228 vmx->nested.pi_pending = false; 2229 } else { 2230 exec_control &= ~PIN_BASED_POSTED_INTR; 2231 } 2232 pin_controls_set(vmx, exec_control); 2233 2234 /* 2235 * EXEC CONTROLS 2236 */ 2237 exec_control = vmx_exec_control(vmx); /* L0's desires */ 2238 exec_control &= ~CPU_BASED_INTR_WINDOW_EXITING; 2239 exec_control &= ~CPU_BASED_NMI_WINDOW_EXITING; 2240 exec_control &= ~CPU_BASED_TPR_SHADOW; 2241 exec_control |= vmcs12->cpu_based_vm_exec_control; 2242 2243 vmx->nested.l1_tpr_threshold = -1; 2244 if (exec_control & CPU_BASED_TPR_SHADOW) 2245 vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold); 2246 #ifdef CONFIG_X86_64 2247 else 2248 exec_control |= CPU_BASED_CR8_LOAD_EXITING | 2249 CPU_BASED_CR8_STORE_EXITING; 2250 #endif 2251 2252 /* 2253 * A vmexit (to either L1 hypervisor or L0 userspace) is always needed 2254 * for I/O port accesses. 2255 */ 2256 exec_control |= CPU_BASED_UNCOND_IO_EXITING; 2257 exec_control &= ~CPU_BASED_USE_IO_BITMAPS; 2258 2259 /* 2260 * This bit will be computed in nested_get_vmcs12_pages, because 2261 * we do not have access to L1's MSR bitmap yet. For now, keep 2262 * the same bit as before, hoping to avoid multiple VMWRITEs that 2263 * only set/clear this bit. 2264 */ 2265 exec_control &= ~CPU_BASED_USE_MSR_BITMAPS; 2266 exec_control |= exec_controls_get(vmx) & CPU_BASED_USE_MSR_BITMAPS; 2267 2268 exec_controls_set(vmx, exec_control); 2269 2270 /* 2271 * SECONDARY EXEC CONTROLS 2272 */ 2273 if (cpu_has_secondary_exec_ctrls()) { 2274 exec_control = vmx->secondary_exec_control; 2275 2276 /* Take the following fields only from vmcs12 */ 2277 exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2278 SECONDARY_EXEC_ENABLE_INVPCID | 2279 SECONDARY_EXEC_ENABLE_RDTSCP | 2280 SECONDARY_EXEC_XSAVES | 2281 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE | 2282 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2283 SECONDARY_EXEC_APIC_REGISTER_VIRT | 2284 SECONDARY_EXEC_ENABLE_VMFUNC | 2285 SECONDARY_EXEC_TSC_SCALING); 2286 if (nested_cpu_has(vmcs12, 2287 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) 2288 exec_control |= vmcs12->secondary_vm_exec_control; 2289 2290 /* PML is emulated and never enabled in hardware for L2. */ 2291 exec_control &= ~SECONDARY_EXEC_ENABLE_PML; 2292 2293 /* VMCS shadowing for L2 is emulated for now */ 2294 exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS; 2295 2296 /* 2297 * Preset *DT exiting when emulating UMIP, so that vmx_set_cr4() 2298 * will not have to rewrite the controls just for this bit. 2299 */ 2300 if (!boot_cpu_has(X86_FEATURE_UMIP) && vmx_umip_emulated() && 2301 (vmcs12->guest_cr4 & X86_CR4_UMIP)) 2302 exec_control |= SECONDARY_EXEC_DESC; 2303 2304 if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) 2305 vmcs_write16(GUEST_INTR_STATUS, 2306 vmcs12->guest_intr_status); 2307 2308 if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST)) 2309 exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST; 2310 2311 if (exec_control & SECONDARY_EXEC_ENCLS_EXITING) 2312 vmx_write_encls_bitmap(&vmx->vcpu, vmcs12); 2313 2314 secondary_exec_controls_set(vmx, exec_control); 2315 } 2316 2317 /* 2318 * ENTRY CONTROLS 2319 * 2320 * vmcs12's VM_{ENTRY,EXIT}_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE 2321 * are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate 2322 * on the related bits (if supported by the CPU) in the hope that 2323 * we can avoid VMWrites during vmx_set_efer(). 2324 */ 2325 exec_control = (vmcs12->vm_entry_controls | vmx_vmentry_ctrl()) & 2326 ~VM_ENTRY_IA32E_MODE & ~VM_ENTRY_LOAD_IA32_EFER; 2327 if (cpu_has_load_ia32_efer()) { 2328 if (guest_efer & EFER_LMA) 2329 exec_control |= VM_ENTRY_IA32E_MODE; 2330 if (guest_efer != host_efer) 2331 exec_control |= VM_ENTRY_LOAD_IA32_EFER; 2332 } 2333 vm_entry_controls_set(vmx, exec_control); 2334 2335 /* 2336 * EXIT CONTROLS 2337 * 2338 * L2->L1 exit controls are emulated - the hardware exit is to L0 so 2339 * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER 2340 * bits may be modified by vmx_set_efer() in prepare_vmcs02(). 2341 */ 2342 exec_control = vmx_vmexit_ctrl(); 2343 if (cpu_has_load_ia32_efer() && guest_efer != host_efer) 2344 exec_control |= VM_EXIT_LOAD_IA32_EFER; 2345 vm_exit_controls_set(vmx, exec_control); 2346 2347 /* 2348 * Interrupt/Exception Fields 2349 */ 2350 if (vmx->nested.nested_run_pending) { 2351 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 2352 vmcs12->vm_entry_intr_info_field); 2353 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, 2354 vmcs12->vm_entry_exception_error_code); 2355 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 2356 vmcs12->vm_entry_instruction_len); 2357 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 2358 vmcs12->guest_interruptibility_info); 2359 vmx->loaded_vmcs->nmi_known_unmasked = 2360 !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI); 2361 } else { 2362 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); 2363 } 2364 } 2365 2366 static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) 2367 { 2368 struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs; 2369 2370 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields & 2371 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) { 2372 vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector); 2373 vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector); 2374 vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector); 2375 vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector); 2376 vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector); 2377 vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector); 2378 vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector); 2379 vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector); 2380 vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit); 2381 vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit); 2382 vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit); 2383 vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit); 2384 vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit); 2385 vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit); 2386 vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit); 2387 vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit); 2388 vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit); 2389 vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit); 2390 vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes); 2391 vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes); 2392 vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes); 2393 vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes); 2394 vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes); 2395 vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes); 2396 vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes); 2397 vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes); 2398 vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base); 2399 vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base); 2400 vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base); 2401 vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base); 2402 vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base); 2403 vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base); 2404 vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base); 2405 vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base); 2406 vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base); 2407 vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base); 2408 2409 vmx->segment_cache.bitmask = 0; 2410 } 2411 2412 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields & 2413 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1)) { 2414 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs); 2415 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, 2416 vmcs12->guest_pending_dbg_exceptions); 2417 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp); 2418 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip); 2419 2420 /* 2421 * L1 may access the L2's PDPTR, so save them to construct 2422 * vmcs12 2423 */ 2424 if (enable_ept) { 2425 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0); 2426 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); 2427 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); 2428 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); 2429 } 2430 2431 if (kvm_mpx_supported() && vmx->nested.nested_run_pending && 2432 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)) 2433 vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs); 2434 } 2435 2436 if (nested_cpu_has_xsaves(vmcs12)) 2437 vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap); 2438 2439 /* 2440 * Whether page-faults are trapped is determined by a combination of 2441 * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF. If L0 2442 * doesn't care about page faults then we should set all of these to 2443 * L1's desires. However, if L0 does care about (some) page faults, it 2444 * is not easy (if at all possible?) to merge L0 and L1's desires, we 2445 * simply ask to exit on each and every L2 page fault. This is done by 2446 * setting MASK=MATCH=0 and (see below) EB.PF=1. 2447 * Note that below we don't need special code to set EB.PF beyond the 2448 * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept, 2449 * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when 2450 * !enable_ept, EB.PF is 1, so the "or" will always be 1. 2451 */ 2452 if (vmx_need_pf_intercept(&vmx->vcpu)) { 2453 /* 2454 * TODO: if both L0 and L1 need the same MASK and MATCH, 2455 * go ahead and use it? 2456 */ 2457 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0); 2458 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0); 2459 } else { 2460 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, vmcs12->page_fault_error_code_mask); 2461 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, vmcs12->page_fault_error_code_match); 2462 } 2463 2464 if (cpu_has_vmx_apicv()) { 2465 vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0); 2466 vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1); 2467 vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2); 2468 vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3); 2469 } 2470 2471 /* 2472 * Make sure the msr_autostore list is up to date before we set the 2473 * count in the vmcs02. 2474 */ 2475 prepare_vmx_msr_autostore_list(&vmx->vcpu, MSR_IA32_TSC); 2476 2477 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, vmx->msr_autostore.guest.nr); 2478 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); 2479 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); 2480 2481 set_cr4_guest_host_mask(vmx); 2482 } 2483 2484 /* 2485 * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested 2486 * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it 2487 * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2 2488 * guest in a way that will both be appropriate to L1's requests, and our 2489 * needs. In addition to modifying the active vmcs (which is vmcs02), this 2490 * function also has additional necessary side-effects, like setting various 2491 * vcpu->arch fields. 2492 * Returns 0 on success, 1 on failure. Invalid state exit qualification code 2493 * is assigned to entry_failure_code on failure. 2494 */ 2495 static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, 2496 bool from_vmentry, 2497 enum vm_entry_failure_code *entry_failure_code) 2498 { 2499 struct vcpu_vmx *vmx = to_vmx(vcpu); 2500 bool load_guest_pdptrs_vmcs12 = false; 2501 2502 if (vmx->nested.dirty_vmcs12 || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) { 2503 prepare_vmcs02_rare(vmx, vmcs12); 2504 vmx->nested.dirty_vmcs12 = false; 2505 2506 load_guest_pdptrs_vmcs12 = !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr) || 2507 !(vmx->nested.hv_evmcs->hv_clean_fields & 2508 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1); 2509 } 2510 2511 if (vmx->nested.nested_run_pending && 2512 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) { 2513 kvm_set_dr(vcpu, 7, vmcs12->guest_dr7); 2514 vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl); 2515 } else { 2516 kvm_set_dr(vcpu, 7, vcpu->arch.dr7); 2517 vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl); 2518 } 2519 if (kvm_mpx_supported() && (!vmx->nested.nested_run_pending || 2520 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))) 2521 vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs); 2522 vmx_set_rflags(vcpu, vmcs12->guest_rflags); 2523 2524 /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the 2525 * bitwise-or of what L1 wants to trap for L2, and what we want to 2526 * trap. Note that CR0.TS also needs updating - we do this later. 2527 */ 2528 vmx_update_exception_bitmap(vcpu); 2529 vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask; 2530 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); 2531 2532 if (vmx->nested.nested_run_pending && 2533 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) { 2534 vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat); 2535 vcpu->arch.pat = vmcs12->guest_ia32_pat; 2536 } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { 2537 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); 2538 } 2539 2540 vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset( 2541 vcpu->arch.l1_tsc_offset, 2542 vmx_get_l2_tsc_offset(vcpu), 2543 vmx_get_l2_tsc_multiplier(vcpu)); 2544 2545 vcpu->arch.tsc_scaling_ratio = kvm_calc_nested_tsc_multiplier( 2546 vcpu->arch.l1_tsc_scaling_ratio, 2547 vmx_get_l2_tsc_multiplier(vcpu)); 2548 2549 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); 2550 if (kvm_has_tsc_control) 2551 vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio); 2552 2553 nested_vmx_transition_tlb_flush(vcpu, vmcs12, true); 2554 2555 if (nested_cpu_has_ept(vmcs12)) 2556 nested_ept_init_mmu_context(vcpu); 2557 2558 /* 2559 * This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those 2560 * bits which we consider mandatory enabled. 2561 * The CR0_READ_SHADOW is what L2 should have expected to read given 2562 * the specifications by L1; It's not enough to take 2563 * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we 2564 * have more bits than L1 expected. 2565 */ 2566 vmx_set_cr0(vcpu, vmcs12->guest_cr0); 2567 vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12)); 2568 2569 vmx_set_cr4(vcpu, vmcs12->guest_cr4); 2570 vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12)); 2571 2572 vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12); 2573 /* Note: may modify VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */ 2574 vmx_set_efer(vcpu, vcpu->arch.efer); 2575 2576 /* 2577 * Guest state is invalid and unrestricted guest is disabled, 2578 * which means L1 attempted VMEntry to L2 with invalid state. 2579 * Fail the VMEntry. 2580 */ 2581 if (CC(!vmx_guest_state_valid(vcpu))) { 2582 *entry_failure_code = ENTRY_FAIL_DEFAULT; 2583 return -EINVAL; 2584 } 2585 2586 /* Shadow page tables on either EPT or shadow page tables. */ 2587 if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12), 2588 from_vmentry, entry_failure_code)) 2589 return -EINVAL; 2590 2591 /* 2592 * Immediately write vmcs02.GUEST_CR3. It will be propagated to vmcs12 2593 * on nested VM-Exit, which can occur without actually running L2 and 2594 * thus without hitting vmx_load_mmu_pgd(), e.g. if L1 is entering L2 with 2595 * vmcs12.GUEST_ACTIVITYSTATE=HLT, in which case KVM will intercept the 2596 * transition to HLT instead of running L2. 2597 */ 2598 if (enable_ept) 2599 vmcs_writel(GUEST_CR3, vmcs12->guest_cr3); 2600 2601 /* Late preparation of GUEST_PDPTRs now that EFER and CRs are set. */ 2602 if (load_guest_pdptrs_vmcs12 && nested_cpu_has_ept(vmcs12) && 2603 is_pae_paging(vcpu)) { 2604 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0); 2605 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); 2606 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); 2607 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); 2608 } 2609 2610 if (!enable_ept) 2611 vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested; 2612 2613 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) && 2614 WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL, 2615 vmcs12->guest_ia32_perf_global_ctrl))) 2616 return -EINVAL; 2617 2618 kvm_rsp_write(vcpu, vmcs12->guest_rsp); 2619 kvm_rip_write(vcpu, vmcs12->guest_rip); 2620 2621 /* 2622 * It was observed that genuine Hyper-V running in L1 doesn't reset 2623 * 'hv_clean_fields' by itself, it only sets the corresponding dirty 2624 * bits when it changes a field in eVMCS. Mark all fields as clean 2625 * here. 2626 */ 2627 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) 2628 vmx->nested.hv_evmcs->hv_clean_fields |= 2629 HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; 2630 2631 return 0; 2632 } 2633 2634 static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12) 2635 { 2636 if (CC(!nested_cpu_has_nmi_exiting(vmcs12) && 2637 nested_cpu_has_virtual_nmis(vmcs12))) 2638 return -EINVAL; 2639 2640 if (CC(!nested_cpu_has_virtual_nmis(vmcs12) && 2641 nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING))) 2642 return -EINVAL; 2643 2644 return 0; 2645 } 2646 2647 static bool nested_vmx_check_eptp(struct kvm_vcpu *vcpu, u64 new_eptp) 2648 { 2649 struct vcpu_vmx *vmx = to_vmx(vcpu); 2650 2651 /* Check for memory type validity */ 2652 switch (new_eptp & VMX_EPTP_MT_MASK) { 2653 case VMX_EPTP_MT_UC: 2654 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT))) 2655 return false; 2656 break; 2657 case VMX_EPTP_MT_WB: 2658 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT))) 2659 return false; 2660 break; 2661 default: 2662 return false; 2663 } 2664 2665 /* Page-walk levels validity. */ 2666 switch (new_eptp & VMX_EPTP_PWL_MASK) { 2667 case VMX_EPTP_PWL_5: 2668 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_5_BIT))) 2669 return false; 2670 break; 2671 case VMX_EPTP_PWL_4: 2672 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_4_BIT))) 2673 return false; 2674 break; 2675 default: 2676 return false; 2677 } 2678 2679 /* Reserved bits should not be set */ 2680 if (CC(kvm_vcpu_is_illegal_gpa(vcpu, new_eptp) || ((new_eptp >> 7) & 0x1f))) 2681 return false; 2682 2683 /* AD, if set, should be supported */ 2684 if (new_eptp & VMX_EPTP_AD_ENABLE_BIT) { 2685 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT))) 2686 return false; 2687 } 2688 2689 return true; 2690 } 2691 2692 /* 2693 * Checks related to VM-Execution Control Fields 2694 */ 2695 static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu, 2696 struct vmcs12 *vmcs12) 2697 { 2698 struct vcpu_vmx *vmx = to_vmx(vcpu); 2699 2700 if (CC(!vmx_control_verify(vmcs12->pin_based_vm_exec_control, 2701 vmx->nested.msrs.pinbased_ctls_low, 2702 vmx->nested.msrs.pinbased_ctls_high)) || 2703 CC(!vmx_control_verify(vmcs12->cpu_based_vm_exec_control, 2704 vmx->nested.msrs.procbased_ctls_low, 2705 vmx->nested.msrs.procbased_ctls_high))) 2706 return -EINVAL; 2707 2708 if (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) && 2709 CC(!vmx_control_verify(vmcs12->secondary_vm_exec_control, 2710 vmx->nested.msrs.secondary_ctls_low, 2711 vmx->nested.msrs.secondary_ctls_high))) 2712 return -EINVAL; 2713 2714 if (CC(vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu)) || 2715 nested_vmx_check_io_bitmap_controls(vcpu, vmcs12) || 2716 nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12) || 2717 nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12) || 2718 nested_vmx_check_apic_access_controls(vcpu, vmcs12) || 2719 nested_vmx_check_apicv_controls(vcpu, vmcs12) || 2720 nested_vmx_check_nmi_controls(vmcs12) || 2721 nested_vmx_check_pml_controls(vcpu, vmcs12) || 2722 nested_vmx_check_unrestricted_guest_controls(vcpu, vmcs12) || 2723 nested_vmx_check_mode_based_ept_exec_controls(vcpu, vmcs12) || 2724 nested_vmx_check_shadow_vmcs_controls(vcpu, vmcs12) || 2725 CC(nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id)) 2726 return -EINVAL; 2727 2728 if (!nested_cpu_has_preemption_timer(vmcs12) && 2729 nested_cpu_has_save_preemption_timer(vmcs12)) 2730 return -EINVAL; 2731 2732 if (nested_cpu_has_ept(vmcs12) && 2733 CC(!nested_vmx_check_eptp(vcpu, vmcs12->ept_pointer))) 2734 return -EINVAL; 2735 2736 if (nested_cpu_has_vmfunc(vmcs12)) { 2737 if (CC(vmcs12->vm_function_control & 2738 ~vmx->nested.msrs.vmfunc_controls)) 2739 return -EINVAL; 2740 2741 if (nested_cpu_has_eptp_switching(vmcs12)) { 2742 if (CC(!nested_cpu_has_ept(vmcs12)) || 2743 CC(!page_address_valid(vcpu, vmcs12->eptp_list_address))) 2744 return -EINVAL; 2745 } 2746 } 2747 2748 return 0; 2749 } 2750 2751 /* 2752 * Checks related to VM-Exit Control Fields 2753 */ 2754 static int nested_check_vm_exit_controls(struct kvm_vcpu *vcpu, 2755 struct vmcs12 *vmcs12) 2756 { 2757 struct vcpu_vmx *vmx = to_vmx(vcpu); 2758 2759 if (CC(!vmx_control_verify(vmcs12->vm_exit_controls, 2760 vmx->nested.msrs.exit_ctls_low, 2761 vmx->nested.msrs.exit_ctls_high)) || 2762 CC(nested_vmx_check_exit_msr_switch_controls(vcpu, vmcs12))) 2763 return -EINVAL; 2764 2765 return 0; 2766 } 2767 2768 /* 2769 * Checks related to VM-Entry Control Fields 2770 */ 2771 static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu, 2772 struct vmcs12 *vmcs12) 2773 { 2774 struct vcpu_vmx *vmx = to_vmx(vcpu); 2775 2776 if (CC(!vmx_control_verify(vmcs12->vm_entry_controls, 2777 vmx->nested.msrs.entry_ctls_low, 2778 vmx->nested.msrs.entry_ctls_high))) 2779 return -EINVAL; 2780 2781 /* 2782 * From the Intel SDM, volume 3: 2783 * Fields relevant to VM-entry event injection must be set properly. 2784 * These fields are the VM-entry interruption-information field, the 2785 * VM-entry exception error code, and the VM-entry instruction length. 2786 */ 2787 if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) { 2788 u32 intr_info = vmcs12->vm_entry_intr_info_field; 2789 u8 vector = intr_info & INTR_INFO_VECTOR_MASK; 2790 u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK; 2791 bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK; 2792 bool should_have_error_code; 2793 bool urg = nested_cpu_has2(vmcs12, 2794 SECONDARY_EXEC_UNRESTRICTED_GUEST); 2795 bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE; 2796 2797 /* VM-entry interruption-info field: interruption type */ 2798 if (CC(intr_type == INTR_TYPE_RESERVED) || 2799 CC(intr_type == INTR_TYPE_OTHER_EVENT && 2800 !nested_cpu_supports_monitor_trap_flag(vcpu))) 2801 return -EINVAL; 2802 2803 /* VM-entry interruption-info field: vector */ 2804 if (CC(intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) || 2805 CC(intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) || 2806 CC(intr_type == INTR_TYPE_OTHER_EVENT && vector != 0)) 2807 return -EINVAL; 2808 2809 /* VM-entry interruption-info field: deliver error code */ 2810 should_have_error_code = 2811 intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode && 2812 x86_exception_has_error_code(vector); 2813 if (CC(has_error_code != should_have_error_code)) 2814 return -EINVAL; 2815 2816 /* VM-entry exception error code */ 2817 if (CC(has_error_code && 2818 vmcs12->vm_entry_exception_error_code & GENMASK(31, 16))) 2819 return -EINVAL; 2820 2821 /* VM-entry interruption-info field: reserved bits */ 2822 if (CC(intr_info & INTR_INFO_RESVD_BITS_MASK)) 2823 return -EINVAL; 2824 2825 /* VM-entry instruction length */ 2826 switch (intr_type) { 2827 case INTR_TYPE_SOFT_EXCEPTION: 2828 case INTR_TYPE_SOFT_INTR: 2829 case INTR_TYPE_PRIV_SW_EXCEPTION: 2830 if (CC(vmcs12->vm_entry_instruction_len > 15) || 2831 CC(vmcs12->vm_entry_instruction_len == 0 && 2832 CC(!nested_cpu_has_zero_length_injection(vcpu)))) 2833 return -EINVAL; 2834 } 2835 } 2836 2837 if (nested_vmx_check_entry_msr_switch_controls(vcpu, vmcs12)) 2838 return -EINVAL; 2839 2840 return 0; 2841 } 2842 2843 static int nested_vmx_check_controls(struct kvm_vcpu *vcpu, 2844 struct vmcs12 *vmcs12) 2845 { 2846 if (nested_check_vm_execution_controls(vcpu, vmcs12) || 2847 nested_check_vm_exit_controls(vcpu, vmcs12) || 2848 nested_check_vm_entry_controls(vcpu, vmcs12)) 2849 return -EINVAL; 2850 2851 if (to_vmx(vcpu)->nested.enlightened_vmcs_enabled) 2852 return nested_evmcs_check_controls(vmcs12); 2853 2854 return 0; 2855 } 2856 2857 static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu, 2858 struct vmcs12 *vmcs12) 2859 { 2860 bool ia32e; 2861 2862 if (CC(!nested_host_cr0_valid(vcpu, vmcs12->host_cr0)) || 2863 CC(!nested_host_cr4_valid(vcpu, vmcs12->host_cr4)) || 2864 CC(kvm_vcpu_is_illegal_gpa(vcpu, vmcs12->host_cr3))) 2865 return -EINVAL; 2866 2867 if (CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_esp, vcpu)) || 2868 CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_eip, vcpu))) 2869 return -EINVAL; 2870 2871 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) && 2872 CC(!kvm_pat_valid(vmcs12->host_ia32_pat))) 2873 return -EINVAL; 2874 2875 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) && 2876 CC(!kvm_valid_perf_global_ctrl(vcpu_to_pmu(vcpu), 2877 vmcs12->host_ia32_perf_global_ctrl))) 2878 return -EINVAL; 2879 2880 #ifdef CONFIG_X86_64 2881 ia32e = !!(vcpu->arch.efer & EFER_LMA); 2882 #else 2883 ia32e = false; 2884 #endif 2885 2886 if (ia32e) { 2887 if (CC(!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)) || 2888 CC(!(vmcs12->host_cr4 & X86_CR4_PAE))) 2889 return -EINVAL; 2890 } else { 2891 if (CC(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) || 2892 CC(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) || 2893 CC(vmcs12->host_cr4 & X86_CR4_PCIDE) || 2894 CC((vmcs12->host_rip) >> 32)) 2895 return -EINVAL; 2896 } 2897 2898 if (CC(vmcs12->host_cs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 2899 CC(vmcs12->host_ss_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 2900 CC(vmcs12->host_ds_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 2901 CC(vmcs12->host_es_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 2902 CC(vmcs12->host_fs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 2903 CC(vmcs12->host_gs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 2904 CC(vmcs12->host_tr_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 2905 CC(vmcs12->host_cs_selector == 0) || 2906 CC(vmcs12->host_tr_selector == 0) || 2907 CC(vmcs12->host_ss_selector == 0 && !ia32e)) 2908 return -EINVAL; 2909 2910 if (CC(is_noncanonical_address(vmcs12->host_fs_base, vcpu)) || 2911 CC(is_noncanonical_address(vmcs12->host_gs_base, vcpu)) || 2912 CC(is_noncanonical_address(vmcs12->host_gdtr_base, vcpu)) || 2913 CC(is_noncanonical_address(vmcs12->host_idtr_base, vcpu)) || 2914 CC(is_noncanonical_address(vmcs12->host_tr_base, vcpu)) || 2915 CC(is_noncanonical_address(vmcs12->host_rip, vcpu))) 2916 return -EINVAL; 2917 2918 /* 2919 * If the load IA32_EFER VM-exit control is 1, bits reserved in the 2920 * IA32_EFER MSR must be 0 in the field for that register. In addition, 2921 * the values of the LMA and LME bits in the field must each be that of 2922 * the host address-space size VM-exit control. 2923 */ 2924 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) { 2925 if (CC(!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer)) || 2926 CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA)) || 2927 CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LME))) 2928 return -EINVAL; 2929 } 2930 2931 return 0; 2932 } 2933 2934 static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu, 2935 struct vmcs12 *vmcs12) 2936 { 2937 int r = 0; 2938 struct vmcs12 *shadow; 2939 struct kvm_host_map map; 2940 2941 if (vmcs12->vmcs_link_pointer == -1ull) 2942 return 0; 2943 2944 if (CC(!page_address_valid(vcpu, vmcs12->vmcs_link_pointer))) 2945 return -EINVAL; 2946 2947 if (CC(kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map))) 2948 return -EINVAL; 2949 2950 shadow = map.hva; 2951 2952 if (CC(shadow->hdr.revision_id != VMCS12_REVISION) || 2953 CC(shadow->hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12))) 2954 r = -EINVAL; 2955 2956 kvm_vcpu_unmap(vcpu, &map, false); 2957 return r; 2958 } 2959 2960 /* 2961 * Checks related to Guest Non-register State 2962 */ 2963 static int nested_check_guest_non_reg_state(struct vmcs12 *vmcs12) 2964 { 2965 if (CC(vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE && 2966 vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT && 2967 vmcs12->guest_activity_state != GUEST_ACTIVITY_WAIT_SIPI)) 2968 return -EINVAL; 2969 2970 return 0; 2971 } 2972 2973 static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu, 2974 struct vmcs12 *vmcs12, 2975 enum vm_entry_failure_code *entry_failure_code) 2976 { 2977 bool ia32e; 2978 2979 *entry_failure_code = ENTRY_FAIL_DEFAULT; 2980 2981 if (CC(!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0)) || 2982 CC(!nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4))) 2983 return -EINVAL; 2984 2985 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) && 2986 CC(!kvm_dr7_valid(vmcs12->guest_dr7))) 2987 return -EINVAL; 2988 2989 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) && 2990 CC(!kvm_pat_valid(vmcs12->guest_ia32_pat))) 2991 return -EINVAL; 2992 2993 if (nested_vmx_check_vmcs_link_ptr(vcpu, vmcs12)) { 2994 *entry_failure_code = ENTRY_FAIL_VMCS_LINK_PTR; 2995 return -EINVAL; 2996 } 2997 2998 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) && 2999 CC(!kvm_valid_perf_global_ctrl(vcpu_to_pmu(vcpu), 3000 vmcs12->guest_ia32_perf_global_ctrl))) 3001 return -EINVAL; 3002 3003 /* 3004 * If the load IA32_EFER VM-entry control is 1, the following checks 3005 * are performed on the field for the IA32_EFER MSR: 3006 * - Bits reserved in the IA32_EFER MSR must be 0. 3007 * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of 3008 * the IA-32e mode guest VM-exit control. It must also be identical 3009 * to bit 8 (LME) if bit 31 in the CR0 field (corresponding to 3010 * CR0.PG) is 1. 3011 */ 3012 if (to_vmx(vcpu)->nested.nested_run_pending && 3013 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) { 3014 ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0; 3015 if (CC(!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer)) || 3016 CC(ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA)) || 3017 CC(((vmcs12->guest_cr0 & X86_CR0_PG) && 3018 ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME)))) 3019 return -EINVAL; 3020 } 3021 3022 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) && 3023 (CC(is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu)) || 3024 CC((vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD)))) 3025 return -EINVAL; 3026 3027 if (nested_check_guest_non_reg_state(vmcs12)) 3028 return -EINVAL; 3029 3030 return 0; 3031 } 3032 3033 static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu) 3034 { 3035 struct vcpu_vmx *vmx = to_vmx(vcpu); 3036 unsigned long cr3, cr4; 3037 bool vm_fail; 3038 3039 if (!nested_early_check) 3040 return 0; 3041 3042 if (vmx->msr_autoload.host.nr) 3043 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); 3044 if (vmx->msr_autoload.guest.nr) 3045 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0); 3046 3047 preempt_disable(); 3048 3049 vmx_prepare_switch_to_guest(vcpu); 3050 3051 /* 3052 * Induce a consistency check VMExit by clearing bit 1 in GUEST_RFLAGS, 3053 * which is reserved to '1' by hardware. GUEST_RFLAGS is guaranteed to 3054 * be written (by prepare_vmcs02()) before the "real" VMEnter, i.e. 3055 * there is no need to preserve other bits or save/restore the field. 3056 */ 3057 vmcs_writel(GUEST_RFLAGS, 0); 3058 3059 cr3 = __get_current_cr3_fast(); 3060 if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) { 3061 vmcs_writel(HOST_CR3, cr3); 3062 vmx->loaded_vmcs->host_state.cr3 = cr3; 3063 } 3064 3065 cr4 = cr4_read_shadow(); 3066 if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) { 3067 vmcs_writel(HOST_CR4, cr4); 3068 vmx->loaded_vmcs->host_state.cr4 = cr4; 3069 } 3070 3071 vm_fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs, 3072 vmx->loaded_vmcs->launched); 3073 3074 if (vmx->msr_autoload.host.nr) 3075 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); 3076 if (vmx->msr_autoload.guest.nr) 3077 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); 3078 3079 if (vm_fail) { 3080 u32 error = vmcs_read32(VM_INSTRUCTION_ERROR); 3081 3082 preempt_enable(); 3083 3084 trace_kvm_nested_vmenter_failed( 3085 "early hardware check VM-instruction error: ", error); 3086 WARN_ON_ONCE(error != VMXERR_ENTRY_INVALID_CONTROL_FIELD); 3087 return 1; 3088 } 3089 3090 /* 3091 * VMExit clears RFLAGS.IF and DR7, even on a consistency check. 3092 */ 3093 if (hw_breakpoint_active()) 3094 set_debugreg(__this_cpu_read(cpu_dr7), 7); 3095 local_irq_enable(); 3096 preempt_enable(); 3097 3098 /* 3099 * A non-failing VMEntry means we somehow entered guest mode with 3100 * an illegal RIP, and that's just the tip of the iceberg. There 3101 * is no telling what memory has been modified or what state has 3102 * been exposed to unknown code. Hitting this all but guarantees 3103 * a (very critical) hardware issue. 3104 */ 3105 WARN_ON(!(vmcs_read32(VM_EXIT_REASON) & 3106 VMX_EXIT_REASONS_FAILED_VMENTRY)); 3107 3108 return 0; 3109 } 3110 3111 static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu) 3112 { 3113 struct vcpu_vmx *vmx = to_vmx(vcpu); 3114 3115 /* 3116 * hv_evmcs may end up being not mapped after migration (when 3117 * L2 was running), map it here to make sure vmcs12 changes are 3118 * properly reflected. 3119 */ 3120 if (vmx->nested.enlightened_vmcs_enabled && 3121 vmx->nested.hv_evmcs_vmptr == EVMPTR_MAP_PENDING) { 3122 enum nested_evmptrld_status evmptrld_status = 3123 nested_vmx_handle_enlightened_vmptrld(vcpu, false); 3124 3125 if (evmptrld_status == EVMPTRLD_VMFAIL || 3126 evmptrld_status == EVMPTRLD_ERROR) 3127 return false; 3128 3129 /* 3130 * Post migration VMCS12 always provides the most actual 3131 * information, copy it to eVMCS upon entry. 3132 */ 3133 vmx->nested.need_vmcs12_to_shadow_sync = true; 3134 } 3135 3136 return true; 3137 } 3138 3139 static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu) 3140 { 3141 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 3142 struct vcpu_vmx *vmx = to_vmx(vcpu); 3143 struct kvm_host_map *map; 3144 struct page *page; 3145 u64 hpa; 3146 3147 if (!vcpu->arch.pdptrs_from_userspace && 3148 !nested_cpu_has_ept(vmcs12) && is_pae_paging(vcpu)) { 3149 /* 3150 * Reload the guest's PDPTRs since after a migration 3151 * the guest CR3 might be restored prior to setting the nested 3152 * state which can lead to a load of wrong PDPTRs. 3153 */ 3154 if (CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3))) 3155 return false; 3156 } 3157 3158 3159 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { 3160 /* 3161 * Translate L1 physical address to host physical 3162 * address for vmcs02. Keep the page pinned, so this 3163 * physical address remains valid. We keep a reference 3164 * to it so we can release it later. 3165 */ 3166 if (vmx->nested.apic_access_page) { /* shouldn't happen */ 3167 kvm_release_page_clean(vmx->nested.apic_access_page); 3168 vmx->nested.apic_access_page = NULL; 3169 } 3170 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->apic_access_addr); 3171 if (!is_error_page(page)) { 3172 vmx->nested.apic_access_page = page; 3173 hpa = page_to_phys(vmx->nested.apic_access_page); 3174 vmcs_write64(APIC_ACCESS_ADDR, hpa); 3175 } else { 3176 pr_debug_ratelimited("%s: no backing 'struct page' for APIC-access address in vmcs12\n", 3177 __func__); 3178 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 3179 vcpu->run->internal.suberror = 3180 KVM_INTERNAL_ERROR_EMULATION; 3181 vcpu->run->internal.ndata = 0; 3182 return false; 3183 } 3184 } 3185 3186 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) { 3187 map = &vmx->nested.virtual_apic_map; 3188 3189 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->virtual_apic_page_addr), map)) { 3190 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, pfn_to_hpa(map->pfn)); 3191 } else if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING) && 3192 nested_cpu_has(vmcs12, CPU_BASED_CR8_STORE_EXITING) && 3193 !nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { 3194 /* 3195 * The processor will never use the TPR shadow, simply 3196 * clear the bit from the execution control. Such a 3197 * configuration is useless, but it happens in tests. 3198 * For any other configuration, failing the vm entry is 3199 * _not_ what the processor does but it's basically the 3200 * only possibility we have. 3201 */ 3202 exec_controls_clearbit(vmx, CPU_BASED_TPR_SHADOW); 3203 } else { 3204 /* 3205 * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR to 3206 * force VM-Entry to fail. 3207 */ 3208 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, -1ull); 3209 } 3210 } 3211 3212 if (nested_cpu_has_posted_intr(vmcs12)) { 3213 map = &vmx->nested.pi_desc_map; 3214 3215 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->posted_intr_desc_addr), map)) { 3216 vmx->nested.pi_desc = 3217 (struct pi_desc *)(((void *)map->hva) + 3218 offset_in_page(vmcs12->posted_intr_desc_addr)); 3219 vmcs_write64(POSTED_INTR_DESC_ADDR, 3220 pfn_to_hpa(map->pfn) + offset_in_page(vmcs12->posted_intr_desc_addr)); 3221 } else { 3222 /* 3223 * Defer the KVM_INTERNAL_EXIT until KVM tries to 3224 * access the contents of the VMCS12 posted interrupt 3225 * descriptor. (Note that KVM may do this when it 3226 * should not, per the architectural specification.) 3227 */ 3228 vmx->nested.pi_desc = NULL; 3229 pin_controls_clearbit(vmx, PIN_BASED_POSTED_INTR); 3230 } 3231 } 3232 if (nested_vmx_prepare_msr_bitmap(vcpu, vmcs12)) 3233 exec_controls_setbit(vmx, CPU_BASED_USE_MSR_BITMAPS); 3234 else 3235 exec_controls_clearbit(vmx, CPU_BASED_USE_MSR_BITMAPS); 3236 3237 return true; 3238 } 3239 3240 static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu) 3241 { 3242 if (!nested_get_evmcs_page(vcpu)) { 3243 pr_debug_ratelimited("%s: enlightened vmptrld failed\n", 3244 __func__); 3245 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 3246 vcpu->run->internal.suberror = 3247 KVM_INTERNAL_ERROR_EMULATION; 3248 vcpu->run->internal.ndata = 0; 3249 3250 return false; 3251 } 3252 3253 if (is_guest_mode(vcpu) && !nested_get_vmcs12_pages(vcpu)) 3254 return false; 3255 3256 return true; 3257 } 3258 3259 static int nested_vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa) 3260 { 3261 struct vmcs12 *vmcs12; 3262 struct vcpu_vmx *vmx = to_vmx(vcpu); 3263 gpa_t dst; 3264 3265 if (WARN_ON_ONCE(!is_guest_mode(vcpu))) 3266 return 0; 3267 3268 if (WARN_ON_ONCE(vmx->nested.pml_full)) 3269 return 1; 3270 3271 /* 3272 * Check if PML is enabled for the nested guest. Whether eptp bit 6 is 3273 * set is already checked as part of A/D emulation. 3274 */ 3275 vmcs12 = get_vmcs12(vcpu); 3276 if (!nested_cpu_has_pml(vmcs12)) 3277 return 0; 3278 3279 if (vmcs12->guest_pml_index >= PML_ENTITY_NUM) { 3280 vmx->nested.pml_full = true; 3281 return 1; 3282 } 3283 3284 gpa &= ~0xFFFull; 3285 dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index; 3286 3287 if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa, 3288 offset_in_page(dst), sizeof(gpa))) 3289 return 0; 3290 3291 vmcs12->guest_pml_index--; 3292 3293 return 0; 3294 } 3295 3296 /* 3297 * Intel's VMX Instruction Reference specifies a common set of prerequisites 3298 * for running VMX instructions (except VMXON, whose prerequisites are 3299 * slightly different). It also specifies what exception to inject otherwise. 3300 * Note that many of these exceptions have priority over VM exits, so they 3301 * don't have to be checked again here. 3302 */ 3303 static int nested_vmx_check_permission(struct kvm_vcpu *vcpu) 3304 { 3305 if (!to_vmx(vcpu)->nested.vmxon) { 3306 kvm_queue_exception(vcpu, UD_VECTOR); 3307 return 0; 3308 } 3309 3310 if (vmx_get_cpl(vcpu)) { 3311 kvm_inject_gp(vcpu, 0); 3312 return 0; 3313 } 3314 3315 return 1; 3316 } 3317 3318 static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu) 3319 { 3320 u8 rvi = vmx_get_rvi(); 3321 u8 vppr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_PROCPRI); 3322 3323 return ((rvi & 0xf0) > (vppr & 0xf0)); 3324 } 3325 3326 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, 3327 struct vmcs12 *vmcs12); 3328 3329 /* 3330 * If from_vmentry is false, this is being called from state restore (either RSM 3331 * or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume. 3332 * 3333 * Returns: 3334 * NVMX_VMENTRY_SUCCESS: Entered VMX non-root mode 3335 * NVMX_VMENTRY_VMFAIL: Consistency check VMFail 3336 * NVMX_VMENTRY_VMEXIT: Consistency check VMExit 3337 * NVMX_VMENTRY_KVM_INTERNAL_ERROR: KVM internal error 3338 */ 3339 enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, 3340 bool from_vmentry) 3341 { 3342 struct vcpu_vmx *vmx = to_vmx(vcpu); 3343 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 3344 enum vm_entry_failure_code entry_failure_code; 3345 bool evaluate_pending_interrupts; 3346 union vmx_exit_reason exit_reason = { 3347 .basic = EXIT_REASON_INVALID_STATE, 3348 .failed_vmentry = 1, 3349 }; 3350 u32 failed_index; 3351 3352 if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu)) 3353 kvm_vcpu_flush_tlb_current(vcpu); 3354 3355 evaluate_pending_interrupts = exec_controls_get(vmx) & 3356 (CPU_BASED_INTR_WINDOW_EXITING | CPU_BASED_NMI_WINDOW_EXITING); 3357 if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu)) 3358 evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu); 3359 3360 if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) 3361 vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); 3362 if (kvm_mpx_supported() && 3363 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)) 3364 vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS); 3365 3366 /* 3367 * Overwrite vmcs01.GUEST_CR3 with L1's CR3 if EPT is disabled *and* 3368 * nested early checks are disabled. In the event of a "late" VM-Fail, 3369 * i.e. a VM-Fail detected by hardware but not KVM, KVM must unwind its 3370 * software model to the pre-VMEntry host state. When EPT is disabled, 3371 * GUEST_CR3 holds KVM's shadow CR3, not L1's "real" CR3, which causes 3372 * nested_vmx_restore_host_state() to corrupt vcpu->arch.cr3. Stuffing 3373 * vmcs01.GUEST_CR3 results in the unwind naturally setting arch.cr3 to 3374 * the correct value. Smashing vmcs01.GUEST_CR3 is safe because nested 3375 * VM-Exits, and the unwind, reset KVM's MMU, i.e. vmcs01.GUEST_CR3 is 3376 * guaranteed to be overwritten with a shadow CR3 prior to re-entering 3377 * L1. Don't stuff vmcs01.GUEST_CR3 when using nested early checks as 3378 * KVM modifies vcpu->arch.cr3 if and only if the early hardware checks 3379 * pass, and early VM-Fails do not reset KVM's MMU, i.e. the VM-Fail 3380 * path would need to manually save/restore vmcs01.GUEST_CR3. 3381 */ 3382 if (!enable_ept && !nested_early_check) 3383 vmcs_writel(GUEST_CR3, vcpu->arch.cr3); 3384 3385 vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02); 3386 3387 prepare_vmcs02_early(vmx, vmcs12); 3388 3389 if (from_vmentry) { 3390 if (unlikely(!nested_get_vmcs12_pages(vcpu))) { 3391 vmx_switch_vmcs(vcpu, &vmx->vmcs01); 3392 return NVMX_VMENTRY_KVM_INTERNAL_ERROR; 3393 } 3394 3395 if (nested_vmx_check_vmentry_hw(vcpu)) { 3396 vmx_switch_vmcs(vcpu, &vmx->vmcs01); 3397 return NVMX_VMENTRY_VMFAIL; 3398 } 3399 3400 if (nested_vmx_check_guest_state(vcpu, vmcs12, 3401 &entry_failure_code)) { 3402 exit_reason.basic = EXIT_REASON_INVALID_STATE; 3403 vmcs12->exit_qualification = entry_failure_code; 3404 goto vmentry_fail_vmexit; 3405 } 3406 } 3407 3408 enter_guest_mode(vcpu); 3409 3410 if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &entry_failure_code)) { 3411 exit_reason.basic = EXIT_REASON_INVALID_STATE; 3412 vmcs12->exit_qualification = entry_failure_code; 3413 goto vmentry_fail_vmexit_guest_mode; 3414 } 3415 3416 if (from_vmentry) { 3417 failed_index = nested_vmx_load_msr(vcpu, 3418 vmcs12->vm_entry_msr_load_addr, 3419 vmcs12->vm_entry_msr_load_count); 3420 if (failed_index) { 3421 exit_reason.basic = EXIT_REASON_MSR_LOAD_FAIL; 3422 vmcs12->exit_qualification = failed_index; 3423 goto vmentry_fail_vmexit_guest_mode; 3424 } 3425 } else { 3426 /* 3427 * The MMU is not initialized to point at the right entities yet and 3428 * "get pages" would need to read data from the guest (i.e. we will 3429 * need to perform gpa to hpa translation). Request a call 3430 * to nested_get_vmcs12_pages before the next VM-entry. The MSRs 3431 * have already been set at vmentry time and should not be reset. 3432 */ 3433 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); 3434 } 3435 3436 /* 3437 * If L1 had a pending IRQ/NMI until it executed 3438 * VMLAUNCH/VMRESUME which wasn't delivered because it was 3439 * disallowed (e.g. interrupts disabled), L0 needs to 3440 * evaluate if this pending event should cause an exit from L2 3441 * to L1 or delivered directly to L2 (e.g. In case L1 don't 3442 * intercept EXTERNAL_INTERRUPT). 3443 * 3444 * Usually this would be handled by the processor noticing an 3445 * IRQ/NMI window request, or checking RVI during evaluation of 3446 * pending virtual interrupts. However, this setting was done 3447 * on VMCS01 and now VMCS02 is active instead. Thus, we force L0 3448 * to perform pending event evaluation by requesting a KVM_REQ_EVENT. 3449 */ 3450 if (unlikely(evaluate_pending_interrupts)) 3451 kvm_make_request(KVM_REQ_EVENT, vcpu); 3452 3453 /* 3454 * Do not start the preemption timer hrtimer until after we know 3455 * we are successful, so that only nested_vmx_vmexit needs to cancel 3456 * the timer. 3457 */ 3458 vmx->nested.preemption_timer_expired = false; 3459 if (nested_cpu_has_preemption_timer(vmcs12)) { 3460 u64 timer_value = vmx_calc_preemption_timer_value(vcpu); 3461 vmx_start_preemption_timer(vcpu, timer_value); 3462 } 3463 3464 /* 3465 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point 3466 * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet 3467 * returned as far as L1 is concerned. It will only return (and set 3468 * the success flag) when L2 exits (see nested_vmx_vmexit()). 3469 */ 3470 return NVMX_VMENTRY_SUCCESS; 3471 3472 /* 3473 * A failed consistency check that leads to a VMExit during L1's 3474 * VMEnter to L2 is a variation of a normal VMexit, as explained in 3475 * 26.7 "VM-entry failures during or after loading guest state". 3476 */ 3477 vmentry_fail_vmexit_guest_mode: 3478 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING) 3479 vcpu->arch.tsc_offset -= vmcs12->tsc_offset; 3480 leave_guest_mode(vcpu); 3481 3482 vmentry_fail_vmexit: 3483 vmx_switch_vmcs(vcpu, &vmx->vmcs01); 3484 3485 if (!from_vmentry) 3486 return NVMX_VMENTRY_VMEXIT; 3487 3488 load_vmcs12_host_state(vcpu, vmcs12); 3489 vmcs12->vm_exit_reason = exit_reason.full; 3490 if (enable_shadow_vmcs || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) 3491 vmx->nested.need_vmcs12_to_shadow_sync = true; 3492 return NVMX_VMENTRY_VMEXIT; 3493 } 3494 3495 /* 3496 * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1 3497 * for running an L2 nested guest. 3498 */ 3499 static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) 3500 { 3501 struct vmcs12 *vmcs12; 3502 enum nvmx_vmentry_status status; 3503 struct vcpu_vmx *vmx = to_vmx(vcpu); 3504 u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu); 3505 enum nested_evmptrld_status evmptrld_status; 3506 3507 if (!nested_vmx_check_permission(vcpu)) 3508 return 1; 3509 3510 evmptrld_status = nested_vmx_handle_enlightened_vmptrld(vcpu, launch); 3511 if (evmptrld_status == EVMPTRLD_ERROR) { 3512 kvm_queue_exception(vcpu, UD_VECTOR); 3513 return 1; 3514 } else if (CC(evmptrld_status == EVMPTRLD_VMFAIL)) { 3515 return nested_vmx_failInvalid(vcpu); 3516 } 3517 3518 if (CC(!evmptr_is_valid(vmx->nested.hv_evmcs_vmptr) && 3519 vmx->nested.current_vmptr == -1ull)) 3520 return nested_vmx_failInvalid(vcpu); 3521 3522 vmcs12 = get_vmcs12(vcpu); 3523 3524 /* 3525 * Can't VMLAUNCH or VMRESUME a shadow VMCS. Despite the fact 3526 * that there *is* a valid VMCS pointer, RFLAGS.CF is set 3527 * rather than RFLAGS.ZF, and no error number is stored to the 3528 * VM-instruction error field. 3529 */ 3530 if (CC(vmcs12->hdr.shadow_vmcs)) 3531 return nested_vmx_failInvalid(vcpu); 3532 3533 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) { 3534 copy_enlightened_to_vmcs12(vmx, vmx->nested.hv_evmcs->hv_clean_fields); 3535 /* Enlightened VMCS doesn't have launch state */ 3536 vmcs12->launch_state = !launch; 3537 } else if (enable_shadow_vmcs) { 3538 copy_shadow_to_vmcs12(vmx); 3539 } 3540 3541 /* 3542 * The nested entry process starts with enforcing various prerequisites 3543 * on vmcs12 as required by the Intel SDM, and act appropriately when 3544 * they fail: As the SDM explains, some conditions should cause the 3545 * instruction to fail, while others will cause the instruction to seem 3546 * to succeed, but return an EXIT_REASON_INVALID_STATE. 3547 * To speed up the normal (success) code path, we should avoid checking 3548 * for misconfigurations which will anyway be caught by the processor 3549 * when using the merged vmcs02. 3550 */ 3551 if (CC(interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS)) 3552 return nested_vmx_fail(vcpu, VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS); 3553 3554 if (CC(vmcs12->launch_state == launch)) 3555 return nested_vmx_fail(vcpu, 3556 launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS 3557 : VMXERR_VMRESUME_NONLAUNCHED_VMCS); 3558 3559 if (nested_vmx_check_controls(vcpu, vmcs12)) 3560 return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); 3561 3562 if (nested_vmx_check_host_state(vcpu, vmcs12)) 3563 return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD); 3564 3565 /* 3566 * We're finally done with prerequisite checking, and can start with 3567 * the nested entry. 3568 */ 3569 vmx->nested.nested_run_pending = 1; 3570 vmx->nested.has_preemption_timer_deadline = false; 3571 status = nested_vmx_enter_non_root_mode(vcpu, true); 3572 if (unlikely(status != NVMX_VMENTRY_SUCCESS)) 3573 goto vmentry_failed; 3574 3575 /* Emulate processing of posted interrupts on VM-Enter. */ 3576 if (nested_cpu_has_posted_intr(vmcs12) && 3577 kvm_apic_has_interrupt(vcpu) == vmx->nested.posted_intr_nv) { 3578 vmx->nested.pi_pending = true; 3579 kvm_make_request(KVM_REQ_EVENT, vcpu); 3580 kvm_apic_clear_irr(vcpu, vmx->nested.posted_intr_nv); 3581 } 3582 3583 /* Hide L1D cache contents from the nested guest. */ 3584 vmx->vcpu.arch.l1tf_flush_l1d = true; 3585 3586 /* 3587 * Must happen outside of nested_vmx_enter_non_root_mode() as it will 3588 * also be used as part of restoring nVMX state for 3589 * snapshot restore (migration). 3590 * 3591 * In this flow, it is assumed that vmcs12 cache was 3592 * transferred as part of captured nVMX state and should 3593 * therefore not be read from guest memory (which may not 3594 * exist on destination host yet). 3595 */ 3596 nested_cache_shadow_vmcs12(vcpu, vmcs12); 3597 3598 switch (vmcs12->guest_activity_state) { 3599 case GUEST_ACTIVITY_HLT: 3600 /* 3601 * If we're entering a halted L2 vcpu and the L2 vcpu won't be 3602 * awakened by event injection or by an NMI-window VM-exit or 3603 * by an interrupt-window VM-exit, halt the vcpu. 3604 */ 3605 if (!(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) && 3606 !nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING) && 3607 !(nested_cpu_has(vmcs12, CPU_BASED_INTR_WINDOW_EXITING) && 3608 (vmcs12->guest_rflags & X86_EFLAGS_IF))) { 3609 vmx->nested.nested_run_pending = 0; 3610 return kvm_vcpu_halt(vcpu); 3611 } 3612 break; 3613 case GUEST_ACTIVITY_WAIT_SIPI: 3614 vmx->nested.nested_run_pending = 0; 3615 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; 3616 break; 3617 default: 3618 break; 3619 } 3620 3621 return 1; 3622 3623 vmentry_failed: 3624 vmx->nested.nested_run_pending = 0; 3625 if (status == NVMX_VMENTRY_KVM_INTERNAL_ERROR) 3626 return 0; 3627 if (status == NVMX_VMENTRY_VMEXIT) 3628 return 1; 3629 WARN_ON_ONCE(status != NVMX_VMENTRY_VMFAIL); 3630 return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); 3631 } 3632 3633 /* 3634 * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date 3635 * because L2 may have changed some cr0 bits directly (CR0_GUEST_HOST_MASK). 3636 * This function returns the new value we should put in vmcs12.guest_cr0. 3637 * It's not enough to just return the vmcs02 GUEST_CR0. Rather, 3638 * 1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now 3639 * available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0 3640 * didn't trap the bit, because if L1 did, so would L0). 3641 * 2. Bits that L1 asked to trap (and therefore L0 also did) could not have 3642 * been modified by L2, and L1 knows it. So just leave the old value of 3643 * the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0 3644 * isn't relevant, because if L0 traps this bit it can set it to anything. 3645 * 3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have 3646 * changed these bits, and therefore they need to be updated, but L0 3647 * didn't necessarily allow them to be changed in GUEST_CR0 - and rather 3648 * put them in vmcs02 CR0_READ_SHADOW. So take these bits from there. 3649 */ 3650 static inline unsigned long 3651 vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) 3652 { 3653 return 3654 /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) | 3655 /*2*/ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) | 3656 /*3*/ (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask | 3657 vcpu->arch.cr0_guest_owned_bits)); 3658 } 3659 3660 static inline unsigned long 3661 vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) 3662 { 3663 return 3664 /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) | 3665 /*2*/ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) | 3666 /*3*/ (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask | 3667 vcpu->arch.cr4_guest_owned_bits)); 3668 } 3669 3670 static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu, 3671 struct vmcs12 *vmcs12) 3672 { 3673 u32 idt_vectoring; 3674 unsigned int nr; 3675 3676 if (vcpu->arch.exception.injected) { 3677 nr = vcpu->arch.exception.nr; 3678 idt_vectoring = nr | VECTORING_INFO_VALID_MASK; 3679 3680 if (kvm_exception_is_soft(nr)) { 3681 vmcs12->vm_exit_instruction_len = 3682 vcpu->arch.event_exit_inst_len; 3683 idt_vectoring |= INTR_TYPE_SOFT_EXCEPTION; 3684 } else 3685 idt_vectoring |= INTR_TYPE_HARD_EXCEPTION; 3686 3687 if (vcpu->arch.exception.has_error_code) { 3688 idt_vectoring |= VECTORING_INFO_DELIVER_CODE_MASK; 3689 vmcs12->idt_vectoring_error_code = 3690 vcpu->arch.exception.error_code; 3691 } 3692 3693 vmcs12->idt_vectoring_info_field = idt_vectoring; 3694 } else if (vcpu->arch.nmi_injected) { 3695 vmcs12->idt_vectoring_info_field = 3696 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR; 3697 } else if (vcpu->arch.interrupt.injected) { 3698 nr = vcpu->arch.interrupt.nr; 3699 idt_vectoring = nr | VECTORING_INFO_VALID_MASK; 3700 3701 if (vcpu->arch.interrupt.soft) { 3702 idt_vectoring |= INTR_TYPE_SOFT_INTR; 3703 vmcs12->vm_entry_instruction_len = 3704 vcpu->arch.event_exit_inst_len; 3705 } else 3706 idt_vectoring |= INTR_TYPE_EXT_INTR; 3707 3708 vmcs12->idt_vectoring_info_field = idt_vectoring; 3709 } 3710 } 3711 3712 3713 void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu) 3714 { 3715 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 3716 gfn_t gfn; 3717 3718 /* 3719 * Don't need to mark the APIC access page dirty; it is never 3720 * written to by the CPU during APIC virtualization. 3721 */ 3722 3723 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) { 3724 gfn = vmcs12->virtual_apic_page_addr >> PAGE_SHIFT; 3725 kvm_vcpu_mark_page_dirty(vcpu, gfn); 3726 } 3727 3728 if (nested_cpu_has_posted_intr(vmcs12)) { 3729 gfn = vmcs12->posted_intr_desc_addr >> PAGE_SHIFT; 3730 kvm_vcpu_mark_page_dirty(vcpu, gfn); 3731 } 3732 } 3733 3734 static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu) 3735 { 3736 struct vcpu_vmx *vmx = to_vmx(vcpu); 3737 int max_irr; 3738 void *vapic_page; 3739 u16 status; 3740 3741 if (!vmx->nested.pi_pending) 3742 return 0; 3743 3744 if (!vmx->nested.pi_desc) 3745 goto mmio_needed; 3746 3747 vmx->nested.pi_pending = false; 3748 3749 if (!pi_test_and_clear_on(vmx->nested.pi_desc)) 3750 return 0; 3751 3752 max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256); 3753 if (max_irr != 256) { 3754 vapic_page = vmx->nested.virtual_apic_map.hva; 3755 if (!vapic_page) 3756 goto mmio_needed; 3757 3758 __kvm_apic_update_irr(vmx->nested.pi_desc->pir, 3759 vapic_page, &max_irr); 3760 status = vmcs_read16(GUEST_INTR_STATUS); 3761 if ((u8)max_irr > ((u8)status & 0xff)) { 3762 status &= ~0xff; 3763 status |= (u8)max_irr; 3764 vmcs_write16(GUEST_INTR_STATUS, status); 3765 } 3766 } 3767 3768 nested_mark_vmcs12_pages_dirty(vcpu); 3769 return 0; 3770 3771 mmio_needed: 3772 kvm_handle_memory_failure(vcpu, X86EMUL_IO_NEEDED, NULL); 3773 return -ENXIO; 3774 } 3775 3776 static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu, 3777 unsigned long exit_qual) 3778 { 3779 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 3780 unsigned int nr = vcpu->arch.exception.nr; 3781 u32 intr_info = nr | INTR_INFO_VALID_MASK; 3782 3783 if (vcpu->arch.exception.has_error_code) { 3784 vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code; 3785 intr_info |= INTR_INFO_DELIVER_CODE_MASK; 3786 } 3787 3788 if (kvm_exception_is_soft(nr)) 3789 intr_info |= INTR_TYPE_SOFT_EXCEPTION; 3790 else 3791 intr_info |= INTR_TYPE_HARD_EXCEPTION; 3792 3793 if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) && 3794 vmx_get_nmi_mask(vcpu)) 3795 intr_info |= INTR_INFO_UNBLOCK_NMI; 3796 3797 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual); 3798 } 3799 3800 /* 3801 * Returns true if a debug trap is pending delivery. 3802 * 3803 * In KVM, debug traps bear an exception payload. As such, the class of a #DB 3804 * exception may be inferred from the presence of an exception payload. 3805 */ 3806 static inline bool vmx_pending_dbg_trap(struct kvm_vcpu *vcpu) 3807 { 3808 return vcpu->arch.exception.pending && 3809 vcpu->arch.exception.nr == DB_VECTOR && 3810 vcpu->arch.exception.payload; 3811 } 3812 3813 /* 3814 * Certain VM-exits set the 'pending debug exceptions' field to indicate a 3815 * recognized #DB (data or single-step) that has yet to be delivered. Since KVM 3816 * represents these debug traps with a payload that is said to be compatible 3817 * with the 'pending debug exceptions' field, write the payload to the VMCS 3818 * field if a VM-exit is delivered before the debug trap. 3819 */ 3820 static void nested_vmx_update_pending_dbg(struct kvm_vcpu *vcpu) 3821 { 3822 if (vmx_pending_dbg_trap(vcpu)) 3823 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, 3824 vcpu->arch.exception.payload); 3825 } 3826 3827 static bool nested_vmx_preemption_timer_pending(struct kvm_vcpu *vcpu) 3828 { 3829 return nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) && 3830 to_vmx(vcpu)->nested.preemption_timer_expired; 3831 } 3832 3833 static int vmx_check_nested_events(struct kvm_vcpu *vcpu) 3834 { 3835 struct vcpu_vmx *vmx = to_vmx(vcpu); 3836 unsigned long exit_qual; 3837 bool block_nested_events = 3838 vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu); 3839 bool mtf_pending = vmx->nested.mtf_pending; 3840 struct kvm_lapic *apic = vcpu->arch.apic; 3841 3842 /* 3843 * Clear the MTF state. If a higher priority VM-exit is delivered first, 3844 * this state is discarded. 3845 */ 3846 if (!block_nested_events) 3847 vmx->nested.mtf_pending = false; 3848 3849 if (lapic_in_kernel(vcpu) && 3850 test_bit(KVM_APIC_INIT, &apic->pending_events)) { 3851 if (block_nested_events) 3852 return -EBUSY; 3853 nested_vmx_update_pending_dbg(vcpu); 3854 clear_bit(KVM_APIC_INIT, &apic->pending_events); 3855 if (vcpu->arch.mp_state != KVM_MP_STATE_INIT_RECEIVED) 3856 nested_vmx_vmexit(vcpu, EXIT_REASON_INIT_SIGNAL, 0, 0); 3857 return 0; 3858 } 3859 3860 if (lapic_in_kernel(vcpu) && 3861 test_bit(KVM_APIC_SIPI, &apic->pending_events)) { 3862 if (block_nested_events) 3863 return -EBUSY; 3864 3865 clear_bit(KVM_APIC_SIPI, &apic->pending_events); 3866 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) 3867 nested_vmx_vmexit(vcpu, EXIT_REASON_SIPI_SIGNAL, 0, 3868 apic->sipi_vector & 0xFFUL); 3869 return 0; 3870 } 3871 3872 /* 3873 * Process any exceptions that are not debug traps before MTF. 3874 * 3875 * Note that only a pending nested run can block a pending exception. 3876 * Otherwise an injected NMI/interrupt should either be 3877 * lost or delivered to the nested hypervisor in the IDT_VECTORING_INFO, 3878 * while delivering the pending exception. 3879 */ 3880 3881 if (vcpu->arch.exception.pending && !vmx_pending_dbg_trap(vcpu)) { 3882 if (vmx->nested.nested_run_pending) 3883 return -EBUSY; 3884 if (!nested_vmx_check_exception(vcpu, &exit_qual)) 3885 goto no_vmexit; 3886 nested_vmx_inject_exception_vmexit(vcpu, exit_qual); 3887 return 0; 3888 } 3889 3890 if (mtf_pending) { 3891 if (block_nested_events) 3892 return -EBUSY; 3893 nested_vmx_update_pending_dbg(vcpu); 3894 nested_vmx_vmexit(vcpu, EXIT_REASON_MONITOR_TRAP_FLAG, 0, 0); 3895 return 0; 3896 } 3897 3898 if (vcpu->arch.exception.pending) { 3899 if (vmx->nested.nested_run_pending) 3900 return -EBUSY; 3901 if (!nested_vmx_check_exception(vcpu, &exit_qual)) 3902 goto no_vmexit; 3903 nested_vmx_inject_exception_vmexit(vcpu, exit_qual); 3904 return 0; 3905 } 3906 3907 if (nested_vmx_preemption_timer_pending(vcpu)) { 3908 if (block_nested_events) 3909 return -EBUSY; 3910 nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0); 3911 return 0; 3912 } 3913 3914 if (vcpu->arch.smi_pending && !is_smm(vcpu)) { 3915 if (block_nested_events) 3916 return -EBUSY; 3917 goto no_vmexit; 3918 } 3919 3920 if (vcpu->arch.nmi_pending && !vmx_nmi_blocked(vcpu)) { 3921 if (block_nested_events) 3922 return -EBUSY; 3923 if (!nested_exit_on_nmi(vcpu)) 3924 goto no_vmexit; 3925 3926 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, 3927 NMI_VECTOR | INTR_TYPE_NMI_INTR | 3928 INTR_INFO_VALID_MASK, 0); 3929 /* 3930 * The NMI-triggered VM exit counts as injection: 3931 * clear this one and block further NMIs. 3932 */ 3933 vcpu->arch.nmi_pending = 0; 3934 vmx_set_nmi_mask(vcpu, true); 3935 return 0; 3936 } 3937 3938 if (kvm_cpu_has_interrupt(vcpu) && !vmx_interrupt_blocked(vcpu)) { 3939 if (block_nested_events) 3940 return -EBUSY; 3941 if (!nested_exit_on_intr(vcpu)) 3942 goto no_vmexit; 3943 nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0); 3944 return 0; 3945 } 3946 3947 no_vmexit: 3948 return vmx_complete_nested_posted_interrupt(vcpu); 3949 } 3950 3951 static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu) 3952 { 3953 ktime_t remaining = 3954 hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer); 3955 u64 value; 3956 3957 if (ktime_to_ns(remaining) <= 0) 3958 return 0; 3959 3960 value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz; 3961 do_div(value, 1000000); 3962 return value >> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; 3963 } 3964 3965 static bool is_vmcs12_ext_field(unsigned long field) 3966 { 3967 switch (field) { 3968 case GUEST_ES_SELECTOR: 3969 case GUEST_CS_SELECTOR: 3970 case GUEST_SS_SELECTOR: 3971 case GUEST_DS_SELECTOR: 3972 case GUEST_FS_SELECTOR: 3973 case GUEST_GS_SELECTOR: 3974 case GUEST_LDTR_SELECTOR: 3975 case GUEST_TR_SELECTOR: 3976 case GUEST_ES_LIMIT: 3977 case GUEST_CS_LIMIT: 3978 case GUEST_SS_LIMIT: 3979 case GUEST_DS_LIMIT: 3980 case GUEST_FS_LIMIT: 3981 case GUEST_GS_LIMIT: 3982 case GUEST_LDTR_LIMIT: 3983 case GUEST_TR_LIMIT: 3984 case GUEST_GDTR_LIMIT: 3985 case GUEST_IDTR_LIMIT: 3986 case GUEST_ES_AR_BYTES: 3987 case GUEST_DS_AR_BYTES: 3988 case GUEST_FS_AR_BYTES: 3989 case GUEST_GS_AR_BYTES: 3990 case GUEST_LDTR_AR_BYTES: 3991 case GUEST_TR_AR_BYTES: 3992 case GUEST_ES_BASE: 3993 case GUEST_CS_BASE: 3994 case GUEST_SS_BASE: 3995 case GUEST_DS_BASE: 3996 case GUEST_FS_BASE: 3997 case GUEST_GS_BASE: 3998 case GUEST_LDTR_BASE: 3999 case GUEST_TR_BASE: 4000 case GUEST_GDTR_BASE: 4001 case GUEST_IDTR_BASE: 4002 case GUEST_PENDING_DBG_EXCEPTIONS: 4003 case GUEST_BNDCFGS: 4004 return true; 4005 default: 4006 break; 4007 } 4008 4009 return false; 4010 } 4011 4012 static void sync_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu, 4013 struct vmcs12 *vmcs12) 4014 { 4015 struct vcpu_vmx *vmx = to_vmx(vcpu); 4016 4017 vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR); 4018 vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR); 4019 vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR); 4020 vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR); 4021 vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR); 4022 vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR); 4023 vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR); 4024 vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR); 4025 vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT); 4026 vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT); 4027 vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT); 4028 vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT); 4029 vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT); 4030 vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT); 4031 vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT); 4032 vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT); 4033 vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT); 4034 vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT); 4035 vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES); 4036 vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES); 4037 vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES); 4038 vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES); 4039 vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES); 4040 vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES); 4041 vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE); 4042 vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE); 4043 vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE); 4044 vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE); 4045 vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE); 4046 vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE); 4047 vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE); 4048 vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE); 4049 vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE); 4050 vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE); 4051 vmcs12->guest_pending_dbg_exceptions = 4052 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS); 4053 if (kvm_mpx_supported()) 4054 vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS); 4055 4056 vmx->nested.need_sync_vmcs02_to_vmcs12_rare = false; 4057 } 4058 4059 static void copy_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu, 4060 struct vmcs12 *vmcs12) 4061 { 4062 struct vcpu_vmx *vmx = to_vmx(vcpu); 4063 int cpu; 4064 4065 if (!vmx->nested.need_sync_vmcs02_to_vmcs12_rare) 4066 return; 4067 4068 4069 WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01); 4070 4071 cpu = get_cpu(); 4072 vmx->loaded_vmcs = &vmx->nested.vmcs02; 4073 vmx_vcpu_load_vmcs(vcpu, cpu, &vmx->vmcs01); 4074 4075 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12); 4076 4077 vmx->loaded_vmcs = &vmx->vmcs01; 4078 vmx_vcpu_load_vmcs(vcpu, cpu, &vmx->nested.vmcs02); 4079 put_cpu(); 4080 } 4081 4082 /* 4083 * Update the guest state fields of vmcs12 to reflect changes that 4084 * occurred while L2 was running. (The "IA-32e mode guest" bit of the 4085 * VM-entry controls is also updated, since this is really a guest 4086 * state bit.) 4087 */ 4088 static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) 4089 { 4090 struct vcpu_vmx *vmx = to_vmx(vcpu); 4091 4092 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) 4093 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12); 4094 4095 vmx->nested.need_sync_vmcs02_to_vmcs12_rare = 4096 !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr); 4097 4098 vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12); 4099 vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12); 4100 4101 vmcs12->guest_rsp = kvm_rsp_read(vcpu); 4102 vmcs12->guest_rip = kvm_rip_read(vcpu); 4103 vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS); 4104 4105 vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES); 4106 vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES); 4107 4108 vmcs12->guest_interruptibility_info = 4109 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); 4110 4111 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) 4112 vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT; 4113 else if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) 4114 vmcs12->guest_activity_state = GUEST_ACTIVITY_WAIT_SIPI; 4115 else 4116 vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE; 4117 4118 if (nested_cpu_has_preemption_timer(vmcs12) && 4119 vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER && 4120 !vmx->nested.nested_run_pending) 4121 vmcs12->vmx_preemption_timer_value = 4122 vmx_get_preemption_timer_value(vcpu); 4123 4124 /* 4125 * In some cases (usually, nested EPT), L2 is allowed to change its 4126 * own CR3 without exiting. If it has changed it, we must keep it. 4127 * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined 4128 * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12. 4129 * 4130 * Additionally, restore L2's PDPTR to vmcs12. 4131 */ 4132 if (enable_ept) { 4133 vmcs12->guest_cr3 = vmcs_readl(GUEST_CR3); 4134 if (nested_cpu_has_ept(vmcs12) && is_pae_paging(vcpu)) { 4135 vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0); 4136 vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1); 4137 vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2); 4138 vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3); 4139 } 4140 } 4141 4142 vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS); 4143 4144 if (nested_cpu_has_vid(vmcs12)) 4145 vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS); 4146 4147 vmcs12->vm_entry_controls = 4148 (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) | 4149 (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE); 4150 4151 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) 4152 kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7); 4153 4154 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER) 4155 vmcs12->guest_ia32_efer = vcpu->arch.efer; 4156 } 4157 4158 /* 4159 * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits 4160 * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12), 4161 * and this function updates it to reflect the changes to the guest state while 4162 * L2 was running (and perhaps made some exits which were handled directly by L0 4163 * without going back to L1), and to reflect the exit reason. 4164 * Note that we do not have to copy here all VMCS fields, just those that 4165 * could have changed by the L2 guest or the exit - i.e., the guest-state and 4166 * exit-information fields only. Other fields are modified by L1 with VMWRITE, 4167 * which already writes to vmcs12 directly. 4168 */ 4169 static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, 4170 u32 vm_exit_reason, u32 exit_intr_info, 4171 unsigned long exit_qualification) 4172 { 4173 /* update exit information fields: */ 4174 vmcs12->vm_exit_reason = vm_exit_reason; 4175 if (to_vmx(vcpu)->exit_reason.enclave_mode) 4176 vmcs12->vm_exit_reason |= VMX_EXIT_REASONS_SGX_ENCLAVE_MODE; 4177 vmcs12->exit_qualification = exit_qualification; 4178 vmcs12->vm_exit_intr_info = exit_intr_info; 4179 4180 vmcs12->idt_vectoring_info_field = 0; 4181 vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); 4182 vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 4183 4184 if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) { 4185 vmcs12->launch_state = 1; 4186 4187 /* vm_entry_intr_info_field is cleared on exit. Emulate this 4188 * instead of reading the real value. */ 4189 vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK; 4190 4191 /* 4192 * Transfer the event that L0 or L1 may wanted to inject into 4193 * L2 to IDT_VECTORING_INFO_FIELD. 4194 */ 4195 vmcs12_save_pending_event(vcpu, vmcs12); 4196 4197 /* 4198 * According to spec, there's no need to store the guest's 4199 * MSRs if the exit is due to a VM-entry failure that occurs 4200 * during or after loading the guest state. Since this exit 4201 * does not fall in that category, we need to save the MSRs. 4202 */ 4203 if (nested_vmx_store_msr(vcpu, 4204 vmcs12->vm_exit_msr_store_addr, 4205 vmcs12->vm_exit_msr_store_count)) 4206 nested_vmx_abort(vcpu, 4207 VMX_ABORT_SAVE_GUEST_MSR_FAIL); 4208 } 4209 4210 /* 4211 * Drop what we picked up for L2 via vmx_complete_interrupts. It is 4212 * preserved above and would only end up incorrectly in L1. 4213 */ 4214 vcpu->arch.nmi_injected = false; 4215 kvm_clear_exception_queue(vcpu); 4216 kvm_clear_interrupt_queue(vcpu); 4217 } 4218 4219 /* 4220 * A part of what we need to when the nested L2 guest exits and we want to 4221 * run its L1 parent, is to reset L1's guest state to the host state specified 4222 * in vmcs12. 4223 * This function is to be called not only on normal nested exit, but also on 4224 * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry 4225 * Failures During or After Loading Guest State"). 4226 * This function should be called when the active VMCS is L1's (vmcs01). 4227 */ 4228 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, 4229 struct vmcs12 *vmcs12) 4230 { 4231 enum vm_entry_failure_code ignored; 4232 struct kvm_segment seg; 4233 4234 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) 4235 vcpu->arch.efer = vmcs12->host_ia32_efer; 4236 else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) 4237 vcpu->arch.efer |= (EFER_LMA | EFER_LME); 4238 else 4239 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME); 4240 vmx_set_efer(vcpu, vcpu->arch.efer); 4241 4242 kvm_rsp_write(vcpu, vmcs12->host_rsp); 4243 kvm_rip_write(vcpu, vmcs12->host_rip); 4244 vmx_set_rflags(vcpu, X86_EFLAGS_FIXED); 4245 vmx_set_interrupt_shadow(vcpu, 0); 4246 4247 /* 4248 * Note that calling vmx_set_cr0 is important, even if cr0 hasn't 4249 * actually changed, because vmx_set_cr0 refers to efer set above. 4250 * 4251 * CR0_GUEST_HOST_MASK is already set in the original vmcs01 4252 * (KVM doesn't change it); 4253 */ 4254 vcpu->arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS; 4255 vmx_set_cr0(vcpu, vmcs12->host_cr0); 4256 4257 /* Same as above - no reason to call set_cr4_guest_host_mask(). */ 4258 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); 4259 vmx_set_cr4(vcpu, vmcs12->host_cr4); 4260 4261 nested_ept_uninit_mmu_context(vcpu); 4262 4263 /* 4264 * Only PDPTE load can fail as the value of cr3 was checked on entry and 4265 * couldn't have changed. 4266 */ 4267 if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, true, &ignored)) 4268 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL); 4269 4270 nested_vmx_transition_tlb_flush(vcpu, vmcs12, false); 4271 4272 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs); 4273 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp); 4274 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip); 4275 vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base); 4276 vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base); 4277 vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF); 4278 vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF); 4279 4280 /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */ 4281 if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS) 4282 vmcs_write64(GUEST_BNDCFGS, 0); 4283 4284 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) { 4285 vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat); 4286 vcpu->arch.pat = vmcs12->host_ia32_pat; 4287 } 4288 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) 4289 WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL, 4290 vmcs12->host_ia32_perf_global_ctrl)); 4291 4292 /* Set L1 segment info according to Intel SDM 4293 27.5.2 Loading Host Segment and Descriptor-Table Registers */ 4294 seg = (struct kvm_segment) { 4295 .base = 0, 4296 .limit = 0xFFFFFFFF, 4297 .selector = vmcs12->host_cs_selector, 4298 .type = 11, 4299 .present = 1, 4300 .s = 1, 4301 .g = 1 4302 }; 4303 if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) 4304 seg.l = 1; 4305 else 4306 seg.db = 1; 4307 vmx_set_segment(vcpu, &seg, VCPU_SREG_CS); 4308 seg = (struct kvm_segment) { 4309 .base = 0, 4310 .limit = 0xFFFFFFFF, 4311 .type = 3, 4312 .present = 1, 4313 .s = 1, 4314 .db = 1, 4315 .g = 1 4316 }; 4317 seg.selector = vmcs12->host_ds_selector; 4318 vmx_set_segment(vcpu, &seg, VCPU_SREG_DS); 4319 seg.selector = vmcs12->host_es_selector; 4320 vmx_set_segment(vcpu, &seg, VCPU_SREG_ES); 4321 seg.selector = vmcs12->host_ss_selector; 4322 vmx_set_segment(vcpu, &seg, VCPU_SREG_SS); 4323 seg.selector = vmcs12->host_fs_selector; 4324 seg.base = vmcs12->host_fs_base; 4325 vmx_set_segment(vcpu, &seg, VCPU_SREG_FS); 4326 seg.selector = vmcs12->host_gs_selector; 4327 seg.base = vmcs12->host_gs_base; 4328 vmx_set_segment(vcpu, &seg, VCPU_SREG_GS); 4329 seg = (struct kvm_segment) { 4330 .base = vmcs12->host_tr_base, 4331 .limit = 0x67, 4332 .selector = vmcs12->host_tr_selector, 4333 .type = 11, 4334 .present = 1 4335 }; 4336 vmx_set_segment(vcpu, &seg, VCPU_SREG_TR); 4337 4338 kvm_set_dr(vcpu, 7, 0x400); 4339 vmcs_write64(GUEST_IA32_DEBUGCTL, 0); 4340 4341 if (cpu_has_vmx_msr_bitmap()) 4342 vmx_update_msr_bitmap(vcpu); 4343 4344 if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr, 4345 vmcs12->vm_exit_msr_load_count)) 4346 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL); 4347 } 4348 4349 static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx) 4350 { 4351 struct vmx_uret_msr *efer_msr; 4352 unsigned int i; 4353 4354 if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_EFER) 4355 return vmcs_read64(GUEST_IA32_EFER); 4356 4357 if (cpu_has_load_ia32_efer()) 4358 return host_efer; 4359 4360 for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) { 4361 if (vmx->msr_autoload.guest.val[i].index == MSR_EFER) 4362 return vmx->msr_autoload.guest.val[i].value; 4363 } 4364 4365 efer_msr = vmx_find_uret_msr(vmx, MSR_EFER); 4366 if (efer_msr) 4367 return efer_msr->data; 4368 4369 return host_efer; 4370 } 4371 4372 static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu) 4373 { 4374 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 4375 struct vcpu_vmx *vmx = to_vmx(vcpu); 4376 struct vmx_msr_entry g, h; 4377 gpa_t gpa; 4378 u32 i, j; 4379 4380 vcpu->arch.pat = vmcs_read64(GUEST_IA32_PAT); 4381 4382 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) { 4383 /* 4384 * L1's host DR7 is lost if KVM_GUESTDBG_USE_HW_BP is set 4385 * as vmcs01.GUEST_DR7 contains a userspace defined value 4386 * and vcpu->arch.dr7 is not squirreled away before the 4387 * nested VMENTER (not worth adding a variable in nested_vmx). 4388 */ 4389 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) 4390 kvm_set_dr(vcpu, 7, DR7_FIXED_1); 4391 else 4392 WARN_ON(kvm_set_dr(vcpu, 7, vmcs_readl(GUEST_DR7))); 4393 } 4394 4395 /* 4396 * Note that calling vmx_set_{efer,cr0,cr4} is important as they 4397 * handle a variety of side effects to KVM's software model. 4398 */ 4399 vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx)); 4400 4401 vcpu->arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS; 4402 vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW)); 4403 4404 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); 4405 vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW)); 4406 4407 nested_ept_uninit_mmu_context(vcpu); 4408 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); 4409 kvm_register_mark_available(vcpu, VCPU_EXREG_CR3); 4410 4411 /* 4412 * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs 4413 * from vmcs01 (if necessary). The PDPTRs are not loaded on 4414 * VMFail, like everything else we just need to ensure our 4415 * software model is up-to-date. 4416 */ 4417 if (enable_ept && is_pae_paging(vcpu)) 4418 ept_save_pdptrs(vcpu); 4419 4420 kvm_mmu_reset_context(vcpu); 4421 4422 if (cpu_has_vmx_msr_bitmap()) 4423 vmx_update_msr_bitmap(vcpu); 4424 4425 /* 4426 * This nasty bit of open coding is a compromise between blindly 4427 * loading L1's MSRs using the exit load lists (incorrect emulation 4428 * of VMFail), leaving the nested VM's MSRs in the software model 4429 * (incorrect behavior) and snapshotting the modified MSRs (too 4430 * expensive since the lists are unbound by hardware). For each 4431 * MSR that was (prematurely) loaded from the nested VMEntry load 4432 * list, reload it from the exit load list if it exists and differs 4433 * from the guest value. The intent is to stuff host state as 4434 * silently as possible, not to fully process the exit load list. 4435 */ 4436 for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) { 4437 gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g)); 4438 if (kvm_vcpu_read_guest(vcpu, gpa, &g, sizeof(g))) { 4439 pr_debug_ratelimited( 4440 "%s read MSR index failed (%u, 0x%08llx)\n", 4441 __func__, i, gpa); 4442 goto vmabort; 4443 } 4444 4445 for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) { 4446 gpa = vmcs12->vm_exit_msr_load_addr + (j * sizeof(h)); 4447 if (kvm_vcpu_read_guest(vcpu, gpa, &h, sizeof(h))) { 4448 pr_debug_ratelimited( 4449 "%s read MSR failed (%u, 0x%08llx)\n", 4450 __func__, j, gpa); 4451 goto vmabort; 4452 } 4453 if (h.index != g.index) 4454 continue; 4455 if (h.value == g.value) 4456 break; 4457 4458 if (nested_vmx_load_msr_check(vcpu, &h)) { 4459 pr_debug_ratelimited( 4460 "%s check failed (%u, 0x%x, 0x%x)\n", 4461 __func__, j, h.index, h.reserved); 4462 goto vmabort; 4463 } 4464 4465 if (kvm_set_msr(vcpu, h.index, h.value)) { 4466 pr_debug_ratelimited( 4467 "%s WRMSR failed (%u, 0x%x, 0x%llx)\n", 4468 __func__, j, h.index, h.value); 4469 goto vmabort; 4470 } 4471 } 4472 } 4473 4474 return; 4475 4476 vmabort: 4477 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL); 4478 } 4479 4480 /* 4481 * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1 4482 * and modify vmcs12 to make it see what it would expect to see there if 4483 * L2 was its real guest. Must only be called when in L2 (is_guest_mode()) 4484 */ 4485 void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason, 4486 u32 exit_intr_info, unsigned long exit_qualification) 4487 { 4488 struct vcpu_vmx *vmx = to_vmx(vcpu); 4489 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 4490 4491 /* trying to cancel vmlaunch/vmresume is a bug */ 4492 WARN_ON_ONCE(vmx->nested.nested_run_pending); 4493 4494 /* Similarly, triple faults in L2 should never escape. */ 4495 WARN_ON_ONCE(kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)); 4496 4497 if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) { 4498 /* 4499 * KVM_REQ_GET_NESTED_STATE_PAGES is also used to map 4500 * Enlightened VMCS after migration and we still need to 4501 * do that when something is forcing L2->L1 exit prior to 4502 * the first L2 run. 4503 */ 4504 (void)nested_get_evmcs_page(vcpu); 4505 } 4506 4507 /* Service the TLB flush request for L2 before switching to L1. */ 4508 if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu)) 4509 kvm_vcpu_flush_tlb_current(vcpu); 4510 4511 /* 4512 * VCPU_EXREG_PDPTR will be clobbered in arch/x86/kvm/vmx/vmx.h between 4513 * now and the new vmentry. Ensure that the VMCS02 PDPTR fields are 4514 * up-to-date before switching to L1. 4515 */ 4516 if (enable_ept && is_pae_paging(vcpu)) 4517 vmx_ept_load_pdptrs(vcpu); 4518 4519 leave_guest_mode(vcpu); 4520 4521 if (nested_cpu_has_preemption_timer(vmcs12)) 4522 hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer); 4523 4524 if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETTING)) { 4525 vcpu->arch.tsc_offset = vcpu->arch.l1_tsc_offset; 4526 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_TSC_SCALING)) 4527 vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio; 4528 } 4529 4530 if (likely(!vmx->fail)) { 4531 sync_vmcs02_to_vmcs12(vcpu, vmcs12); 4532 4533 if (vm_exit_reason != -1) 4534 prepare_vmcs12(vcpu, vmcs12, vm_exit_reason, 4535 exit_intr_info, exit_qualification); 4536 4537 /* 4538 * Must happen outside of sync_vmcs02_to_vmcs12() as it will 4539 * also be used to capture vmcs12 cache as part of 4540 * capturing nVMX state for snapshot (migration). 4541 * 4542 * Otherwise, this flush will dirty guest memory at a 4543 * point it is already assumed by user-space to be 4544 * immutable. 4545 */ 4546 nested_flush_cached_shadow_vmcs12(vcpu, vmcs12); 4547 } else { 4548 /* 4549 * The only expected VM-instruction error is "VM entry with 4550 * invalid control field(s)." Anything else indicates a 4551 * problem with L0. And we should never get here with a 4552 * VMFail of any type if early consistency checks are enabled. 4553 */ 4554 WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) != 4555 VMXERR_ENTRY_INVALID_CONTROL_FIELD); 4556 WARN_ON_ONCE(nested_early_check); 4557 } 4558 4559 vmx_switch_vmcs(vcpu, &vmx->vmcs01); 4560 4561 /* Update any VMCS fields that might have changed while L2 ran */ 4562 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); 4563 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); 4564 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); 4565 if (kvm_has_tsc_control) 4566 vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio); 4567 4568 if (vmx->nested.l1_tpr_threshold != -1) 4569 vmcs_write32(TPR_THRESHOLD, vmx->nested.l1_tpr_threshold); 4570 4571 if (vmx->nested.change_vmcs01_virtual_apic_mode) { 4572 vmx->nested.change_vmcs01_virtual_apic_mode = false; 4573 vmx_set_virtual_apic_mode(vcpu); 4574 } 4575 4576 if (vmx->nested.update_vmcs01_cpu_dirty_logging) { 4577 vmx->nested.update_vmcs01_cpu_dirty_logging = false; 4578 vmx_update_cpu_dirty_logging(vcpu); 4579 } 4580 4581 /* Unpin physical memory we referred to in vmcs02 */ 4582 if (vmx->nested.apic_access_page) { 4583 kvm_release_page_clean(vmx->nested.apic_access_page); 4584 vmx->nested.apic_access_page = NULL; 4585 } 4586 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true); 4587 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true); 4588 vmx->nested.pi_desc = NULL; 4589 4590 if (vmx->nested.reload_vmcs01_apic_access_page) { 4591 vmx->nested.reload_vmcs01_apic_access_page = false; 4592 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); 4593 } 4594 4595 if ((vm_exit_reason != -1) && 4596 (enable_shadow_vmcs || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))) 4597 vmx->nested.need_vmcs12_to_shadow_sync = true; 4598 4599 /* in case we halted in L2 */ 4600 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 4601 4602 if (likely(!vmx->fail)) { 4603 if ((u16)vm_exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT && 4604 nested_exit_intr_ack_set(vcpu)) { 4605 int irq = kvm_cpu_get_interrupt(vcpu); 4606 WARN_ON(irq < 0); 4607 vmcs12->vm_exit_intr_info = irq | 4608 INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR; 4609 } 4610 4611 if (vm_exit_reason != -1) 4612 trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason, 4613 vmcs12->exit_qualification, 4614 vmcs12->idt_vectoring_info_field, 4615 vmcs12->vm_exit_intr_info, 4616 vmcs12->vm_exit_intr_error_code, 4617 KVM_ISA_VMX); 4618 4619 load_vmcs12_host_state(vcpu, vmcs12); 4620 4621 return; 4622 } 4623 4624 /* 4625 * After an early L2 VM-entry failure, we're now back 4626 * in L1 which thinks it just finished a VMLAUNCH or 4627 * VMRESUME instruction, so we need to set the failure 4628 * flag and the VM-instruction error field of the VMCS 4629 * accordingly, and skip the emulated instruction. 4630 */ 4631 (void)nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); 4632 4633 /* 4634 * Restore L1's host state to KVM's software model. We're here 4635 * because a consistency check was caught by hardware, which 4636 * means some amount of guest state has been propagated to KVM's 4637 * model and needs to be unwound to the host's state. 4638 */ 4639 nested_vmx_restore_host_state(vcpu); 4640 4641 vmx->fail = 0; 4642 } 4643 4644 static void nested_vmx_triple_fault(struct kvm_vcpu *vcpu) 4645 { 4646 nested_vmx_vmexit(vcpu, EXIT_REASON_TRIPLE_FAULT, 0, 0); 4647 } 4648 4649 /* 4650 * Decode the memory-address operand of a vmx instruction, as recorded on an 4651 * exit caused by such an instruction (run by a guest hypervisor). 4652 * On success, returns 0. When the operand is invalid, returns 1 and throws 4653 * #UD, #GP, or #SS. 4654 */ 4655 int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification, 4656 u32 vmx_instruction_info, bool wr, int len, gva_t *ret) 4657 { 4658 gva_t off; 4659 bool exn; 4660 struct kvm_segment s; 4661 4662 /* 4663 * According to Vol. 3B, "Information for VM Exits Due to Instruction 4664 * Execution", on an exit, vmx_instruction_info holds most of the 4665 * addressing components of the operand. Only the displacement part 4666 * is put in exit_qualification (see 3B, "Basic VM-Exit Information"). 4667 * For how an actual address is calculated from all these components, 4668 * refer to Vol. 1, "Operand Addressing". 4669 */ 4670 int scaling = vmx_instruction_info & 3; 4671 int addr_size = (vmx_instruction_info >> 7) & 7; 4672 bool is_reg = vmx_instruction_info & (1u << 10); 4673 int seg_reg = (vmx_instruction_info >> 15) & 7; 4674 int index_reg = (vmx_instruction_info >> 18) & 0xf; 4675 bool index_is_valid = !(vmx_instruction_info & (1u << 22)); 4676 int base_reg = (vmx_instruction_info >> 23) & 0xf; 4677 bool base_is_valid = !(vmx_instruction_info & (1u << 27)); 4678 4679 if (is_reg) { 4680 kvm_queue_exception(vcpu, UD_VECTOR); 4681 return 1; 4682 } 4683 4684 /* Addr = segment_base + offset */ 4685 /* offset = base + [index * scale] + displacement */ 4686 off = exit_qualification; /* holds the displacement */ 4687 if (addr_size == 1) 4688 off = (gva_t)sign_extend64(off, 31); 4689 else if (addr_size == 0) 4690 off = (gva_t)sign_extend64(off, 15); 4691 if (base_is_valid) 4692 off += kvm_register_read(vcpu, base_reg); 4693 if (index_is_valid) 4694 off += kvm_register_read(vcpu, index_reg) << scaling; 4695 vmx_get_segment(vcpu, &s, seg_reg); 4696 4697 /* 4698 * The effective address, i.e. @off, of a memory operand is truncated 4699 * based on the address size of the instruction. Note that this is 4700 * the *effective address*, i.e. the address prior to accounting for 4701 * the segment's base. 4702 */ 4703 if (addr_size == 1) /* 32 bit */ 4704 off &= 0xffffffff; 4705 else if (addr_size == 0) /* 16 bit */ 4706 off &= 0xffff; 4707 4708 /* Checks for #GP/#SS exceptions. */ 4709 exn = false; 4710 if (is_long_mode(vcpu)) { 4711 /* 4712 * The virtual/linear address is never truncated in 64-bit 4713 * mode, e.g. a 32-bit address size can yield a 64-bit virtual 4714 * address when using FS/GS with a non-zero base. 4715 */ 4716 if (seg_reg == VCPU_SREG_FS || seg_reg == VCPU_SREG_GS) 4717 *ret = s.base + off; 4718 else 4719 *ret = off; 4720 4721 /* Long mode: #GP(0)/#SS(0) if the memory address is in a 4722 * non-canonical form. This is the only check on the memory 4723 * destination for long mode! 4724 */ 4725 exn = is_noncanonical_address(*ret, vcpu); 4726 } else { 4727 /* 4728 * When not in long mode, the virtual/linear address is 4729 * unconditionally truncated to 32 bits regardless of the 4730 * address size. 4731 */ 4732 *ret = (s.base + off) & 0xffffffff; 4733 4734 /* Protected mode: apply checks for segment validity in the 4735 * following order: 4736 * - segment type check (#GP(0) may be thrown) 4737 * - usability check (#GP(0)/#SS(0)) 4738 * - limit check (#GP(0)/#SS(0)) 4739 */ 4740 if (wr) 4741 /* #GP(0) if the destination operand is located in a 4742 * read-only data segment or any code segment. 4743 */ 4744 exn = ((s.type & 0xa) == 0 || (s.type & 8)); 4745 else 4746 /* #GP(0) if the source operand is located in an 4747 * execute-only code segment 4748 */ 4749 exn = ((s.type & 0xa) == 8); 4750 if (exn) { 4751 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); 4752 return 1; 4753 } 4754 /* Protected mode: #GP(0)/#SS(0) if the segment is unusable. 4755 */ 4756 exn = (s.unusable != 0); 4757 4758 /* 4759 * Protected mode: #GP(0)/#SS(0) if the memory operand is 4760 * outside the segment limit. All CPUs that support VMX ignore 4761 * limit checks for flat segments, i.e. segments with base==0, 4762 * limit==0xffffffff and of type expand-up data or code. 4763 */ 4764 if (!(s.base == 0 && s.limit == 0xffffffff && 4765 ((s.type & 8) || !(s.type & 4)))) 4766 exn = exn || ((u64)off + len - 1 > s.limit); 4767 } 4768 if (exn) { 4769 kvm_queue_exception_e(vcpu, 4770 seg_reg == VCPU_SREG_SS ? 4771 SS_VECTOR : GP_VECTOR, 4772 0); 4773 return 1; 4774 } 4775 4776 return 0; 4777 } 4778 4779 void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu) 4780 { 4781 struct vcpu_vmx *vmx; 4782 4783 if (!nested_vmx_allowed(vcpu)) 4784 return; 4785 4786 vmx = to_vmx(vcpu); 4787 if (kvm_x86_ops.pmu_ops->is_valid_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL)) { 4788 vmx->nested.msrs.entry_ctls_high |= 4789 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL; 4790 vmx->nested.msrs.exit_ctls_high |= 4791 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL; 4792 } else { 4793 vmx->nested.msrs.entry_ctls_high &= 4794 ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL; 4795 vmx->nested.msrs.exit_ctls_high &= 4796 ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL; 4797 } 4798 } 4799 4800 static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer, 4801 int *ret) 4802 { 4803 gva_t gva; 4804 struct x86_exception e; 4805 int r; 4806 4807 if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu), 4808 vmcs_read32(VMX_INSTRUCTION_INFO), false, 4809 sizeof(*vmpointer), &gva)) { 4810 *ret = 1; 4811 return -EINVAL; 4812 } 4813 4814 r = kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e); 4815 if (r != X86EMUL_CONTINUE) { 4816 *ret = kvm_handle_memory_failure(vcpu, r, &e); 4817 return -EINVAL; 4818 } 4819 4820 return 0; 4821 } 4822 4823 /* 4824 * Allocate a shadow VMCS and associate it with the currently loaded 4825 * VMCS, unless such a shadow VMCS already exists. The newly allocated 4826 * VMCS is also VMCLEARed, so that it is ready for use. 4827 */ 4828 static struct vmcs *alloc_shadow_vmcs(struct kvm_vcpu *vcpu) 4829 { 4830 struct vcpu_vmx *vmx = to_vmx(vcpu); 4831 struct loaded_vmcs *loaded_vmcs = vmx->loaded_vmcs; 4832 4833 /* 4834 * We should allocate a shadow vmcs for vmcs01 only when L1 4835 * executes VMXON and free it when L1 executes VMXOFF. 4836 * As it is invalid to execute VMXON twice, we shouldn't reach 4837 * here when vmcs01 already have an allocated shadow vmcs. 4838 */ 4839 WARN_ON(loaded_vmcs == &vmx->vmcs01 && loaded_vmcs->shadow_vmcs); 4840 4841 if (!loaded_vmcs->shadow_vmcs) { 4842 loaded_vmcs->shadow_vmcs = alloc_vmcs(true); 4843 if (loaded_vmcs->shadow_vmcs) 4844 vmcs_clear(loaded_vmcs->shadow_vmcs); 4845 } 4846 return loaded_vmcs->shadow_vmcs; 4847 } 4848 4849 static int enter_vmx_operation(struct kvm_vcpu *vcpu) 4850 { 4851 struct vcpu_vmx *vmx = to_vmx(vcpu); 4852 int r; 4853 4854 r = alloc_loaded_vmcs(&vmx->nested.vmcs02); 4855 if (r < 0) 4856 goto out_vmcs02; 4857 4858 vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT); 4859 if (!vmx->nested.cached_vmcs12) 4860 goto out_cached_vmcs12; 4861 4862 vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT); 4863 if (!vmx->nested.cached_shadow_vmcs12) 4864 goto out_cached_shadow_vmcs12; 4865 4866 if (enable_shadow_vmcs && !alloc_shadow_vmcs(vcpu)) 4867 goto out_shadow_vmcs; 4868 4869 hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC, 4870 HRTIMER_MODE_ABS_PINNED); 4871 vmx->nested.preemption_timer.function = vmx_preemption_timer_fn; 4872 4873 vmx->nested.vpid02 = allocate_vpid(); 4874 4875 vmx->nested.vmcs02_initialized = false; 4876 vmx->nested.vmxon = true; 4877 4878 if (vmx_pt_mode_is_host_guest()) { 4879 vmx->pt_desc.guest.ctl = 0; 4880 pt_update_intercept_for_msr(vcpu); 4881 } 4882 4883 return 0; 4884 4885 out_shadow_vmcs: 4886 kfree(vmx->nested.cached_shadow_vmcs12); 4887 4888 out_cached_shadow_vmcs12: 4889 kfree(vmx->nested.cached_vmcs12); 4890 4891 out_cached_vmcs12: 4892 free_loaded_vmcs(&vmx->nested.vmcs02); 4893 4894 out_vmcs02: 4895 return -ENOMEM; 4896 } 4897 4898 /* 4899 * Emulate the VMXON instruction. 4900 * Currently, we just remember that VMX is active, and do not save or even 4901 * inspect the argument to VMXON (the so-called "VMXON pointer") because we 4902 * do not currently need to store anything in that guest-allocated memory 4903 * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their 4904 * argument is different from the VMXON pointer (which the spec says they do). 4905 */ 4906 static int handle_vmon(struct kvm_vcpu *vcpu) 4907 { 4908 int ret; 4909 gpa_t vmptr; 4910 uint32_t revision; 4911 struct vcpu_vmx *vmx = to_vmx(vcpu); 4912 const u64 VMXON_NEEDED_FEATURES = FEAT_CTL_LOCKED 4913 | FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX; 4914 4915 /* 4916 * The Intel VMX Instruction Reference lists a bunch of bits that are 4917 * prerequisite to running VMXON, most notably cr4.VMXE must be set to 4918 * 1 (see vmx_is_valid_cr4() for when we allow the guest to set this). 4919 * Otherwise, we should fail with #UD. But most faulting conditions 4920 * have already been checked by hardware, prior to the VM-exit for 4921 * VMXON. We do test guest cr4.VMXE because processor CR4 always has 4922 * that bit set to 1 in non-root mode. 4923 */ 4924 if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) { 4925 kvm_queue_exception(vcpu, UD_VECTOR); 4926 return 1; 4927 } 4928 4929 /* CPL=0 must be checked manually. */ 4930 if (vmx_get_cpl(vcpu)) { 4931 kvm_inject_gp(vcpu, 0); 4932 return 1; 4933 } 4934 4935 if (vmx->nested.vmxon) 4936 return nested_vmx_fail(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION); 4937 4938 if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES) 4939 != VMXON_NEEDED_FEATURES) { 4940 kvm_inject_gp(vcpu, 0); 4941 return 1; 4942 } 4943 4944 if (nested_vmx_get_vmptr(vcpu, &vmptr, &ret)) 4945 return ret; 4946 4947 /* 4948 * SDM 3: 24.11.5 4949 * The first 4 bytes of VMXON region contain the supported 4950 * VMCS revision identifier 4951 * 4952 * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case; 4953 * which replaces physical address width with 32 4954 */ 4955 if (!page_address_valid(vcpu, vmptr)) 4956 return nested_vmx_failInvalid(vcpu); 4957 4958 if (kvm_read_guest(vcpu->kvm, vmptr, &revision, sizeof(revision)) || 4959 revision != VMCS12_REVISION) 4960 return nested_vmx_failInvalid(vcpu); 4961 4962 vmx->nested.vmxon_ptr = vmptr; 4963 ret = enter_vmx_operation(vcpu); 4964 if (ret) 4965 return ret; 4966 4967 return nested_vmx_succeed(vcpu); 4968 } 4969 4970 static inline void nested_release_vmcs12(struct kvm_vcpu *vcpu) 4971 { 4972 struct vcpu_vmx *vmx = to_vmx(vcpu); 4973 4974 if (vmx->nested.current_vmptr == -1ull) 4975 return; 4976 4977 copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu)); 4978 4979 if (enable_shadow_vmcs) { 4980 /* copy to memory all shadowed fields in case 4981 they were modified */ 4982 copy_shadow_to_vmcs12(vmx); 4983 vmx_disable_shadow_vmcs(vmx); 4984 } 4985 vmx->nested.posted_intr_nv = -1; 4986 4987 /* Flush VMCS12 to guest memory */ 4988 kvm_vcpu_write_guest_page(vcpu, 4989 vmx->nested.current_vmptr >> PAGE_SHIFT, 4990 vmx->nested.cached_vmcs12, 0, VMCS12_SIZE); 4991 4992 kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); 4993 4994 vmx->nested.current_vmptr = -1ull; 4995 } 4996 4997 /* Emulate the VMXOFF instruction */ 4998 static int handle_vmoff(struct kvm_vcpu *vcpu) 4999 { 5000 if (!nested_vmx_check_permission(vcpu)) 5001 return 1; 5002 5003 free_nested(vcpu); 5004 5005 /* Process a latched INIT during time CPU was in VMX operation */ 5006 kvm_make_request(KVM_REQ_EVENT, vcpu); 5007 5008 return nested_vmx_succeed(vcpu); 5009 } 5010 5011 /* Emulate the VMCLEAR instruction */ 5012 static int handle_vmclear(struct kvm_vcpu *vcpu) 5013 { 5014 struct vcpu_vmx *vmx = to_vmx(vcpu); 5015 u32 zero = 0; 5016 gpa_t vmptr; 5017 u64 evmcs_gpa; 5018 int r; 5019 5020 if (!nested_vmx_check_permission(vcpu)) 5021 return 1; 5022 5023 if (nested_vmx_get_vmptr(vcpu, &vmptr, &r)) 5024 return r; 5025 5026 if (!page_address_valid(vcpu, vmptr)) 5027 return nested_vmx_fail(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS); 5028 5029 if (vmptr == vmx->nested.vmxon_ptr) 5030 return nested_vmx_fail(vcpu, VMXERR_VMCLEAR_VMXON_POINTER); 5031 5032 /* 5033 * When Enlightened VMEntry is enabled on the calling CPU we treat 5034 * memory area pointer by vmptr as Enlightened VMCS (as there's no good 5035 * way to distinguish it from VMCS12) and we must not corrupt it by 5036 * writing to the non-existent 'launch_state' field. The area doesn't 5037 * have to be the currently active EVMCS on the calling CPU and there's 5038 * nothing KVM has to do to transition it from 'active' to 'non-active' 5039 * state. It is possible that the area will stay mapped as 5040 * vmx->nested.hv_evmcs but this shouldn't be a problem. 5041 */ 5042 if (likely(!vmx->nested.enlightened_vmcs_enabled || 5043 !nested_enlightened_vmentry(vcpu, &evmcs_gpa))) { 5044 if (vmptr == vmx->nested.current_vmptr) 5045 nested_release_vmcs12(vcpu); 5046 5047 kvm_vcpu_write_guest(vcpu, 5048 vmptr + offsetof(struct vmcs12, 5049 launch_state), 5050 &zero, sizeof(zero)); 5051 } else if (vmx->nested.hv_evmcs && vmptr == vmx->nested.hv_evmcs_vmptr) { 5052 nested_release_evmcs(vcpu); 5053 } 5054 5055 return nested_vmx_succeed(vcpu); 5056 } 5057 5058 /* Emulate the VMLAUNCH instruction */ 5059 static int handle_vmlaunch(struct kvm_vcpu *vcpu) 5060 { 5061 return nested_vmx_run(vcpu, true); 5062 } 5063 5064 /* Emulate the VMRESUME instruction */ 5065 static int handle_vmresume(struct kvm_vcpu *vcpu) 5066 { 5067 5068 return nested_vmx_run(vcpu, false); 5069 } 5070 5071 static int handle_vmread(struct kvm_vcpu *vcpu) 5072 { 5073 struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu) 5074 : get_vmcs12(vcpu); 5075 unsigned long exit_qualification = vmx_get_exit_qual(vcpu); 5076 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO); 5077 struct vcpu_vmx *vmx = to_vmx(vcpu); 5078 struct x86_exception e; 5079 unsigned long field; 5080 u64 value; 5081 gva_t gva = 0; 5082 short offset; 5083 int len, r; 5084 5085 if (!nested_vmx_check_permission(vcpu)) 5086 return 1; 5087 5088 /* 5089 * In VMX non-root operation, when the VMCS-link pointer is -1ull, 5090 * any VMREAD sets the ALU flags for VMfailInvalid. 5091 */ 5092 if (vmx->nested.current_vmptr == -1ull || 5093 (is_guest_mode(vcpu) && 5094 get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)) 5095 return nested_vmx_failInvalid(vcpu); 5096 5097 /* Decode instruction info and find the field to read */ 5098 field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf)); 5099 5100 offset = vmcs_field_to_offset(field); 5101 if (offset < 0) 5102 return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); 5103 5104 if (!is_guest_mode(vcpu) && is_vmcs12_ext_field(field)) 5105 copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12); 5106 5107 /* Read the field, zero-extended to a u64 value */ 5108 value = vmcs12_read_any(vmcs12, field, offset); 5109 5110 /* 5111 * Now copy part of this value to register or memory, as requested. 5112 * Note that the number of bits actually copied is 32 or 64 depending 5113 * on the guest's mode (32 or 64 bit), not on the given field's length. 5114 */ 5115 if (instr_info & BIT(10)) { 5116 kvm_register_write(vcpu, (((instr_info) >> 3) & 0xf), value); 5117 } else { 5118 len = is_64_bit_mode(vcpu) ? 8 : 4; 5119 if (get_vmx_mem_address(vcpu, exit_qualification, 5120 instr_info, true, len, &gva)) 5121 return 1; 5122 /* _system ok, nested_vmx_check_permission has verified cpl=0 */ 5123 r = kvm_write_guest_virt_system(vcpu, gva, &value, len, &e); 5124 if (r != X86EMUL_CONTINUE) 5125 return kvm_handle_memory_failure(vcpu, r, &e); 5126 } 5127 5128 return nested_vmx_succeed(vcpu); 5129 } 5130 5131 static bool is_shadow_field_rw(unsigned long field) 5132 { 5133 switch (field) { 5134 #define SHADOW_FIELD_RW(x, y) case x: 5135 #include "vmcs_shadow_fields.h" 5136 return true; 5137 default: 5138 break; 5139 } 5140 return false; 5141 } 5142 5143 static bool is_shadow_field_ro(unsigned long field) 5144 { 5145 switch (field) { 5146 #define SHADOW_FIELD_RO(x, y) case x: 5147 #include "vmcs_shadow_fields.h" 5148 return true; 5149 default: 5150 break; 5151 } 5152 return false; 5153 } 5154 5155 static int handle_vmwrite(struct kvm_vcpu *vcpu) 5156 { 5157 struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu) 5158 : get_vmcs12(vcpu); 5159 unsigned long exit_qualification = vmx_get_exit_qual(vcpu); 5160 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO); 5161 struct vcpu_vmx *vmx = to_vmx(vcpu); 5162 struct x86_exception e; 5163 unsigned long field; 5164 short offset; 5165 gva_t gva; 5166 int len, r; 5167 5168 /* 5169 * The value to write might be 32 or 64 bits, depending on L1's long 5170 * mode, and eventually we need to write that into a field of several 5171 * possible lengths. The code below first zero-extends the value to 64 5172 * bit (value), and then copies only the appropriate number of 5173 * bits into the vmcs12 field. 5174 */ 5175 u64 value = 0; 5176 5177 if (!nested_vmx_check_permission(vcpu)) 5178 return 1; 5179 5180 /* 5181 * In VMX non-root operation, when the VMCS-link pointer is -1ull, 5182 * any VMWRITE sets the ALU flags for VMfailInvalid. 5183 */ 5184 if (vmx->nested.current_vmptr == -1ull || 5185 (is_guest_mode(vcpu) && 5186 get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)) 5187 return nested_vmx_failInvalid(vcpu); 5188 5189 if (instr_info & BIT(10)) 5190 value = kvm_register_read(vcpu, (((instr_info) >> 3) & 0xf)); 5191 else { 5192 len = is_64_bit_mode(vcpu) ? 8 : 4; 5193 if (get_vmx_mem_address(vcpu, exit_qualification, 5194 instr_info, false, len, &gva)) 5195 return 1; 5196 r = kvm_read_guest_virt(vcpu, gva, &value, len, &e); 5197 if (r != X86EMUL_CONTINUE) 5198 return kvm_handle_memory_failure(vcpu, r, &e); 5199 } 5200 5201 field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf)); 5202 5203 offset = vmcs_field_to_offset(field); 5204 if (offset < 0) 5205 return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); 5206 5207 /* 5208 * If the vCPU supports "VMWRITE to any supported field in the 5209 * VMCS," then the "read-only" fields are actually read/write. 5210 */ 5211 if (vmcs_field_readonly(field) && 5212 !nested_cpu_has_vmwrite_any_field(vcpu)) 5213 return nested_vmx_fail(vcpu, VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT); 5214 5215 /* 5216 * Ensure vmcs12 is up-to-date before any VMWRITE that dirties 5217 * vmcs12, else we may crush a field or consume a stale value. 5218 */ 5219 if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field)) 5220 copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12); 5221 5222 /* 5223 * Some Intel CPUs intentionally drop the reserved bits of the AR byte 5224 * fields on VMWRITE. Emulate this behavior to ensure consistent KVM 5225 * behavior regardless of the underlying hardware, e.g. if an AR_BYTE 5226 * field is intercepted for VMWRITE but not VMREAD (in L1), then VMREAD 5227 * from L1 will return a different value than VMREAD from L2 (L1 sees 5228 * the stripped down value, L2 sees the full value as stored by KVM). 5229 */ 5230 if (field >= GUEST_ES_AR_BYTES && field <= GUEST_TR_AR_BYTES) 5231 value &= 0x1f0ff; 5232 5233 vmcs12_write_any(vmcs12, field, offset, value); 5234 5235 /* 5236 * Do not track vmcs12 dirty-state if in guest-mode as we actually 5237 * dirty shadow vmcs12 instead of vmcs12. Fields that can be updated 5238 * by L1 without a vmexit are always updated in the vmcs02, i.e. don't 5239 * "dirty" vmcs12, all others go down the prepare_vmcs02() slow path. 5240 */ 5241 if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field)) { 5242 /* 5243 * L1 can read these fields without exiting, ensure the 5244 * shadow VMCS is up-to-date. 5245 */ 5246 if (enable_shadow_vmcs && is_shadow_field_ro(field)) { 5247 preempt_disable(); 5248 vmcs_load(vmx->vmcs01.shadow_vmcs); 5249 5250 __vmcs_writel(field, value); 5251 5252 vmcs_clear(vmx->vmcs01.shadow_vmcs); 5253 vmcs_load(vmx->loaded_vmcs->vmcs); 5254 preempt_enable(); 5255 } 5256 vmx->nested.dirty_vmcs12 = true; 5257 } 5258 5259 return nested_vmx_succeed(vcpu); 5260 } 5261 5262 static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr) 5263 { 5264 vmx->nested.current_vmptr = vmptr; 5265 if (enable_shadow_vmcs) { 5266 secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_SHADOW_VMCS); 5267 vmcs_write64(VMCS_LINK_POINTER, 5268 __pa(vmx->vmcs01.shadow_vmcs)); 5269 vmx->nested.need_vmcs12_to_shadow_sync = true; 5270 } 5271 vmx->nested.dirty_vmcs12 = true; 5272 } 5273 5274 /* Emulate the VMPTRLD instruction */ 5275 static int handle_vmptrld(struct kvm_vcpu *vcpu) 5276 { 5277 struct vcpu_vmx *vmx = to_vmx(vcpu); 5278 gpa_t vmptr; 5279 int r; 5280 5281 if (!nested_vmx_check_permission(vcpu)) 5282 return 1; 5283 5284 if (nested_vmx_get_vmptr(vcpu, &vmptr, &r)) 5285 return r; 5286 5287 if (!page_address_valid(vcpu, vmptr)) 5288 return nested_vmx_fail(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS); 5289 5290 if (vmptr == vmx->nested.vmxon_ptr) 5291 return nested_vmx_fail(vcpu, VMXERR_VMPTRLD_VMXON_POINTER); 5292 5293 /* Forbid normal VMPTRLD if Enlightened version was used */ 5294 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) 5295 return 1; 5296 5297 if (vmx->nested.current_vmptr != vmptr) { 5298 struct kvm_host_map map; 5299 struct vmcs12 *new_vmcs12; 5300 5301 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmptr), &map)) { 5302 /* 5303 * Reads from an unbacked page return all 1s, 5304 * which means that the 32 bits located at the 5305 * given physical address won't match the required 5306 * VMCS12_REVISION identifier. 5307 */ 5308 return nested_vmx_fail(vcpu, 5309 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); 5310 } 5311 5312 new_vmcs12 = map.hva; 5313 5314 if (new_vmcs12->hdr.revision_id != VMCS12_REVISION || 5315 (new_vmcs12->hdr.shadow_vmcs && 5316 !nested_cpu_has_vmx_shadow_vmcs(vcpu))) { 5317 kvm_vcpu_unmap(vcpu, &map, false); 5318 return nested_vmx_fail(vcpu, 5319 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); 5320 } 5321 5322 nested_release_vmcs12(vcpu); 5323 5324 /* 5325 * Load VMCS12 from guest memory since it is not already 5326 * cached. 5327 */ 5328 memcpy(vmx->nested.cached_vmcs12, new_vmcs12, VMCS12_SIZE); 5329 kvm_vcpu_unmap(vcpu, &map, false); 5330 5331 set_current_vmptr(vmx, vmptr); 5332 } 5333 5334 return nested_vmx_succeed(vcpu); 5335 } 5336 5337 /* Emulate the VMPTRST instruction */ 5338 static int handle_vmptrst(struct kvm_vcpu *vcpu) 5339 { 5340 unsigned long exit_qual = vmx_get_exit_qual(vcpu); 5341 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO); 5342 gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr; 5343 struct x86_exception e; 5344 gva_t gva; 5345 int r; 5346 5347 if (!nested_vmx_check_permission(vcpu)) 5348 return 1; 5349 5350 if (unlikely(evmptr_is_valid(to_vmx(vcpu)->nested.hv_evmcs_vmptr))) 5351 return 1; 5352 5353 if (get_vmx_mem_address(vcpu, exit_qual, instr_info, 5354 true, sizeof(gpa_t), &gva)) 5355 return 1; 5356 /* *_system ok, nested_vmx_check_permission has verified cpl=0 */ 5357 r = kvm_write_guest_virt_system(vcpu, gva, (void *)¤t_vmptr, 5358 sizeof(gpa_t), &e); 5359 if (r != X86EMUL_CONTINUE) 5360 return kvm_handle_memory_failure(vcpu, r, &e); 5361 5362 return nested_vmx_succeed(vcpu); 5363 } 5364 5365 /* Emulate the INVEPT instruction */ 5366 static int handle_invept(struct kvm_vcpu *vcpu) 5367 { 5368 struct vcpu_vmx *vmx = to_vmx(vcpu); 5369 u32 vmx_instruction_info, types; 5370 unsigned long type, roots_to_free; 5371 struct kvm_mmu *mmu; 5372 gva_t gva; 5373 struct x86_exception e; 5374 struct { 5375 u64 eptp, gpa; 5376 } operand; 5377 int i, r; 5378 5379 if (!(vmx->nested.msrs.secondary_ctls_high & 5380 SECONDARY_EXEC_ENABLE_EPT) || 5381 !(vmx->nested.msrs.ept_caps & VMX_EPT_INVEPT_BIT)) { 5382 kvm_queue_exception(vcpu, UD_VECTOR); 5383 return 1; 5384 } 5385 5386 if (!nested_vmx_check_permission(vcpu)) 5387 return 1; 5388 5389 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 5390 type = kvm_register_read(vcpu, (vmx_instruction_info >> 28) & 0xf); 5391 5392 types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; 5393 5394 if (type >= 32 || !(types & (1 << type))) 5395 return nested_vmx_fail(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 5396 5397 /* According to the Intel VMX instruction reference, the memory 5398 * operand is read even if it isn't needed (e.g., for type==global) 5399 */ 5400 if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu), 5401 vmx_instruction_info, false, sizeof(operand), &gva)) 5402 return 1; 5403 r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e); 5404 if (r != X86EMUL_CONTINUE) 5405 return kvm_handle_memory_failure(vcpu, r, &e); 5406 5407 /* 5408 * Nested EPT roots are always held through guest_mmu, 5409 * not root_mmu. 5410 */ 5411 mmu = &vcpu->arch.guest_mmu; 5412 5413 switch (type) { 5414 case VMX_EPT_EXTENT_CONTEXT: 5415 if (!nested_vmx_check_eptp(vcpu, operand.eptp)) 5416 return nested_vmx_fail(vcpu, 5417 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 5418 5419 roots_to_free = 0; 5420 if (nested_ept_root_matches(mmu->root_hpa, mmu->root_pgd, 5421 operand.eptp)) 5422 roots_to_free |= KVM_MMU_ROOT_CURRENT; 5423 5424 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { 5425 if (nested_ept_root_matches(mmu->prev_roots[i].hpa, 5426 mmu->prev_roots[i].pgd, 5427 operand.eptp)) 5428 roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i); 5429 } 5430 break; 5431 case VMX_EPT_EXTENT_GLOBAL: 5432 roots_to_free = KVM_MMU_ROOTS_ALL; 5433 break; 5434 default: 5435 BUG(); 5436 break; 5437 } 5438 5439 if (roots_to_free) 5440 kvm_mmu_free_roots(vcpu, mmu, roots_to_free); 5441 5442 return nested_vmx_succeed(vcpu); 5443 } 5444 5445 static int handle_invvpid(struct kvm_vcpu *vcpu) 5446 { 5447 struct vcpu_vmx *vmx = to_vmx(vcpu); 5448 u32 vmx_instruction_info; 5449 unsigned long type, types; 5450 gva_t gva; 5451 struct x86_exception e; 5452 struct { 5453 u64 vpid; 5454 u64 gla; 5455 } operand; 5456 u16 vpid02; 5457 int r; 5458 5459 if (!(vmx->nested.msrs.secondary_ctls_high & 5460 SECONDARY_EXEC_ENABLE_VPID) || 5461 !(vmx->nested.msrs.vpid_caps & VMX_VPID_INVVPID_BIT)) { 5462 kvm_queue_exception(vcpu, UD_VECTOR); 5463 return 1; 5464 } 5465 5466 if (!nested_vmx_check_permission(vcpu)) 5467 return 1; 5468 5469 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 5470 type = kvm_register_read(vcpu, (vmx_instruction_info >> 28) & 0xf); 5471 5472 types = (vmx->nested.msrs.vpid_caps & 5473 VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8; 5474 5475 if (type >= 32 || !(types & (1 << type))) 5476 return nested_vmx_fail(vcpu, 5477 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 5478 5479 /* according to the intel vmx instruction reference, the memory 5480 * operand is read even if it isn't needed (e.g., for type==global) 5481 */ 5482 if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu), 5483 vmx_instruction_info, false, sizeof(operand), &gva)) 5484 return 1; 5485 r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e); 5486 if (r != X86EMUL_CONTINUE) 5487 return kvm_handle_memory_failure(vcpu, r, &e); 5488 5489 if (operand.vpid >> 16) 5490 return nested_vmx_fail(vcpu, 5491 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 5492 5493 vpid02 = nested_get_vpid02(vcpu); 5494 switch (type) { 5495 case VMX_VPID_EXTENT_INDIVIDUAL_ADDR: 5496 if (!operand.vpid || 5497 is_noncanonical_address(operand.gla, vcpu)) 5498 return nested_vmx_fail(vcpu, 5499 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 5500 vpid_sync_vcpu_addr(vpid02, operand.gla); 5501 break; 5502 case VMX_VPID_EXTENT_SINGLE_CONTEXT: 5503 case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL: 5504 if (!operand.vpid) 5505 return nested_vmx_fail(vcpu, 5506 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 5507 vpid_sync_context(vpid02); 5508 break; 5509 case VMX_VPID_EXTENT_ALL_CONTEXT: 5510 vpid_sync_context(vpid02); 5511 break; 5512 default: 5513 WARN_ON_ONCE(1); 5514 return kvm_skip_emulated_instruction(vcpu); 5515 } 5516 5517 /* 5518 * Sync the shadow page tables if EPT is disabled, L1 is invalidating 5519 * linear mappings for L2 (tagged with L2's VPID). Free all guest 5520 * roots as VPIDs are not tracked in the MMU role. 5521 * 5522 * Note, this operates on root_mmu, not guest_mmu, as L1 and L2 share 5523 * an MMU when EPT is disabled. 5524 * 5525 * TODO: sync only the affected SPTEs for INVDIVIDUAL_ADDR. 5526 */ 5527 if (!enable_ept) 5528 kvm_mmu_free_guest_mode_roots(vcpu, &vcpu->arch.root_mmu); 5529 5530 return nested_vmx_succeed(vcpu); 5531 } 5532 5533 static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu, 5534 struct vmcs12 *vmcs12) 5535 { 5536 u32 index = kvm_rcx_read(vcpu); 5537 u64 new_eptp; 5538 5539 if (WARN_ON_ONCE(!nested_cpu_has_ept(vmcs12))) 5540 return 1; 5541 if (index >= VMFUNC_EPTP_ENTRIES) 5542 return 1; 5543 5544 if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT, 5545 &new_eptp, index * 8, 8)) 5546 return 1; 5547 5548 /* 5549 * If the (L2) guest does a vmfunc to the currently 5550 * active ept pointer, we don't have to do anything else 5551 */ 5552 if (vmcs12->ept_pointer != new_eptp) { 5553 if (!nested_vmx_check_eptp(vcpu, new_eptp)) 5554 return 1; 5555 5556 vmcs12->ept_pointer = new_eptp; 5557 nested_ept_new_eptp(vcpu); 5558 5559 if (!nested_cpu_has_vpid(vmcs12)) 5560 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 5561 } 5562 5563 return 0; 5564 } 5565 5566 static int handle_vmfunc(struct kvm_vcpu *vcpu) 5567 { 5568 struct vcpu_vmx *vmx = to_vmx(vcpu); 5569 struct vmcs12 *vmcs12; 5570 u32 function = kvm_rax_read(vcpu); 5571 5572 /* 5573 * VMFUNC is only supported for nested guests, but we always enable the 5574 * secondary control for simplicity; for non-nested mode, fake that we 5575 * didn't by injecting #UD. 5576 */ 5577 if (!is_guest_mode(vcpu)) { 5578 kvm_queue_exception(vcpu, UD_VECTOR); 5579 return 1; 5580 } 5581 5582 vmcs12 = get_vmcs12(vcpu); 5583 5584 /* 5585 * #UD on out-of-bounds function has priority over VM-Exit, and VMFUNC 5586 * is enabled in vmcs02 if and only if it's enabled in vmcs12. 5587 */ 5588 if (WARN_ON_ONCE((function > 63) || !nested_cpu_has_vmfunc(vmcs12))) { 5589 kvm_queue_exception(vcpu, UD_VECTOR); 5590 return 1; 5591 } 5592 5593 if (!(vmcs12->vm_function_control & BIT_ULL(function))) 5594 goto fail; 5595 5596 switch (function) { 5597 case 0: 5598 if (nested_vmx_eptp_switching(vcpu, vmcs12)) 5599 goto fail; 5600 break; 5601 default: 5602 goto fail; 5603 } 5604 return kvm_skip_emulated_instruction(vcpu); 5605 5606 fail: 5607 /* 5608 * This is effectively a reflected VM-Exit, as opposed to a synthesized 5609 * nested VM-Exit. Pass the original exit reason, i.e. don't hardcode 5610 * EXIT_REASON_VMFUNC as the exit reason. 5611 */ 5612 nested_vmx_vmexit(vcpu, vmx->exit_reason.full, 5613 vmx_get_intr_info(vcpu), 5614 vmx_get_exit_qual(vcpu)); 5615 return 1; 5616 } 5617 5618 /* 5619 * Return true if an IO instruction with the specified port and size should cause 5620 * a VM-exit into L1. 5621 */ 5622 bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port, 5623 int size) 5624 { 5625 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 5626 gpa_t bitmap, last_bitmap; 5627 u8 b; 5628 5629 last_bitmap = (gpa_t)-1; 5630 b = -1; 5631 5632 while (size > 0) { 5633 if (port < 0x8000) 5634 bitmap = vmcs12->io_bitmap_a; 5635 else if (port < 0x10000) 5636 bitmap = vmcs12->io_bitmap_b; 5637 else 5638 return true; 5639 bitmap += (port & 0x7fff) / 8; 5640 5641 if (last_bitmap != bitmap) 5642 if (kvm_vcpu_read_guest(vcpu, bitmap, &b, 1)) 5643 return true; 5644 if (b & (1 << (port & 7))) 5645 return true; 5646 5647 port++; 5648 size--; 5649 last_bitmap = bitmap; 5650 } 5651 5652 return false; 5653 } 5654 5655 static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu, 5656 struct vmcs12 *vmcs12) 5657 { 5658 unsigned long exit_qualification; 5659 unsigned short port; 5660 int size; 5661 5662 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) 5663 return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING); 5664 5665 exit_qualification = vmx_get_exit_qual(vcpu); 5666 5667 port = exit_qualification >> 16; 5668 size = (exit_qualification & 7) + 1; 5669 5670 return nested_vmx_check_io_bitmaps(vcpu, port, size); 5671 } 5672 5673 /* 5674 * Return 1 if we should exit from L2 to L1 to handle an MSR access, 5675 * rather than handle it ourselves in L0. I.e., check whether L1 expressed 5676 * disinterest in the current event (read or write a specific MSR) by using an 5677 * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps. 5678 */ 5679 static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu, 5680 struct vmcs12 *vmcs12, 5681 union vmx_exit_reason exit_reason) 5682 { 5683 u32 msr_index = kvm_rcx_read(vcpu); 5684 gpa_t bitmap; 5685 5686 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) 5687 return true; 5688 5689 /* 5690 * The MSR_BITMAP page is divided into four 1024-byte bitmaps, 5691 * for the four combinations of read/write and low/high MSR numbers. 5692 * First we need to figure out which of the four to use: 5693 */ 5694 bitmap = vmcs12->msr_bitmap; 5695 if (exit_reason.basic == EXIT_REASON_MSR_WRITE) 5696 bitmap += 2048; 5697 if (msr_index >= 0xc0000000) { 5698 msr_index -= 0xc0000000; 5699 bitmap += 1024; 5700 } 5701 5702 /* Then read the msr_index'th bit from this bitmap: */ 5703 if (msr_index < 1024*8) { 5704 unsigned char b; 5705 if (kvm_vcpu_read_guest(vcpu, bitmap + msr_index/8, &b, 1)) 5706 return true; 5707 return 1 & (b >> (msr_index & 7)); 5708 } else 5709 return true; /* let L1 handle the wrong parameter */ 5710 } 5711 5712 /* 5713 * Return 1 if we should exit from L2 to L1 to handle a CR access exit, 5714 * rather than handle it ourselves in L0. I.e., check if L1 wanted to 5715 * intercept (via guest_host_mask etc.) the current event. 5716 */ 5717 static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, 5718 struct vmcs12 *vmcs12) 5719 { 5720 unsigned long exit_qualification = vmx_get_exit_qual(vcpu); 5721 int cr = exit_qualification & 15; 5722 int reg; 5723 unsigned long val; 5724 5725 switch ((exit_qualification >> 4) & 3) { 5726 case 0: /* mov to cr */ 5727 reg = (exit_qualification >> 8) & 15; 5728 val = kvm_register_read(vcpu, reg); 5729 switch (cr) { 5730 case 0: 5731 if (vmcs12->cr0_guest_host_mask & 5732 (val ^ vmcs12->cr0_read_shadow)) 5733 return true; 5734 break; 5735 case 3: 5736 if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING)) 5737 return true; 5738 break; 5739 case 4: 5740 if (vmcs12->cr4_guest_host_mask & 5741 (vmcs12->cr4_read_shadow ^ val)) 5742 return true; 5743 break; 5744 case 8: 5745 if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING)) 5746 return true; 5747 break; 5748 } 5749 break; 5750 case 2: /* clts */ 5751 if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) && 5752 (vmcs12->cr0_read_shadow & X86_CR0_TS)) 5753 return true; 5754 break; 5755 case 1: /* mov from cr */ 5756 switch (cr) { 5757 case 3: 5758 if (vmcs12->cpu_based_vm_exec_control & 5759 CPU_BASED_CR3_STORE_EXITING) 5760 return true; 5761 break; 5762 case 8: 5763 if (vmcs12->cpu_based_vm_exec_control & 5764 CPU_BASED_CR8_STORE_EXITING) 5765 return true; 5766 break; 5767 } 5768 break; 5769 case 3: /* lmsw */ 5770 /* 5771 * lmsw can change bits 1..3 of cr0, and only set bit 0 of 5772 * cr0. Other attempted changes are ignored, with no exit. 5773 */ 5774 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f; 5775 if (vmcs12->cr0_guest_host_mask & 0xe & 5776 (val ^ vmcs12->cr0_read_shadow)) 5777 return true; 5778 if ((vmcs12->cr0_guest_host_mask & 0x1) && 5779 !(vmcs12->cr0_read_shadow & 0x1) && 5780 (val & 0x1)) 5781 return true; 5782 break; 5783 } 5784 return false; 5785 } 5786 5787 static bool nested_vmx_exit_handled_encls(struct kvm_vcpu *vcpu, 5788 struct vmcs12 *vmcs12) 5789 { 5790 u32 encls_leaf; 5791 5792 if (!guest_cpuid_has(vcpu, X86_FEATURE_SGX) || 5793 !nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENCLS_EXITING)) 5794 return false; 5795 5796 encls_leaf = kvm_rax_read(vcpu); 5797 if (encls_leaf > 62) 5798 encls_leaf = 63; 5799 return vmcs12->encls_exiting_bitmap & BIT_ULL(encls_leaf); 5800 } 5801 5802 static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu, 5803 struct vmcs12 *vmcs12, gpa_t bitmap) 5804 { 5805 u32 vmx_instruction_info; 5806 unsigned long field; 5807 u8 b; 5808 5809 if (!nested_cpu_has_shadow_vmcs(vmcs12)) 5810 return true; 5811 5812 /* Decode instruction info and find the field to access */ 5813 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 5814 field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); 5815 5816 /* Out-of-range fields always cause a VM exit from L2 to L1 */ 5817 if (field >> 15) 5818 return true; 5819 5820 if (kvm_vcpu_read_guest(vcpu, bitmap + field/8, &b, 1)) 5821 return true; 5822 5823 return 1 & (b >> (field & 7)); 5824 } 5825 5826 static bool nested_vmx_exit_handled_mtf(struct vmcs12 *vmcs12) 5827 { 5828 u32 entry_intr_info = vmcs12->vm_entry_intr_info_field; 5829 5830 if (nested_cpu_has_mtf(vmcs12)) 5831 return true; 5832 5833 /* 5834 * An MTF VM-exit may be injected into the guest by setting the 5835 * interruption-type to 7 (other event) and the vector field to 0. Such 5836 * is the case regardless of the 'monitor trap flag' VM-execution 5837 * control. 5838 */ 5839 return entry_intr_info == (INTR_INFO_VALID_MASK 5840 | INTR_TYPE_OTHER_EVENT); 5841 } 5842 5843 /* 5844 * Return true if L0 wants to handle an exit from L2 regardless of whether or not 5845 * L1 wants the exit. Only call this when in is_guest_mode (L2). 5846 */ 5847 static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu, 5848 union vmx_exit_reason exit_reason) 5849 { 5850 u32 intr_info; 5851 5852 switch ((u16)exit_reason.basic) { 5853 case EXIT_REASON_EXCEPTION_NMI: 5854 intr_info = vmx_get_intr_info(vcpu); 5855 if (is_nmi(intr_info)) 5856 return true; 5857 else if (is_page_fault(intr_info)) 5858 return vcpu->arch.apf.host_apf_flags || 5859 vmx_need_pf_intercept(vcpu); 5860 else if (is_debug(intr_info) && 5861 vcpu->guest_debug & 5862 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) 5863 return true; 5864 else if (is_breakpoint(intr_info) && 5865 vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) 5866 return true; 5867 else if (is_alignment_check(intr_info) && 5868 !vmx_guest_inject_ac(vcpu)) 5869 return true; 5870 return false; 5871 case EXIT_REASON_EXTERNAL_INTERRUPT: 5872 return true; 5873 case EXIT_REASON_MCE_DURING_VMENTRY: 5874 return true; 5875 case EXIT_REASON_EPT_VIOLATION: 5876 /* 5877 * L0 always deals with the EPT violation. If nested EPT is 5878 * used, and the nested mmu code discovers that the address is 5879 * missing in the guest EPT table (EPT12), the EPT violation 5880 * will be injected with nested_ept_inject_page_fault() 5881 */ 5882 return true; 5883 case EXIT_REASON_EPT_MISCONFIG: 5884 /* 5885 * L2 never uses directly L1's EPT, but rather L0's own EPT 5886 * table (shadow on EPT) or a merged EPT table that L0 built 5887 * (EPT on EPT). So any problems with the structure of the 5888 * table is L0's fault. 5889 */ 5890 return true; 5891 case EXIT_REASON_PREEMPTION_TIMER: 5892 return true; 5893 case EXIT_REASON_PML_FULL: 5894 /* 5895 * PML is emulated for an L1 VMM and should never be enabled in 5896 * vmcs02, always "handle" PML_FULL by exiting to userspace. 5897 */ 5898 return true; 5899 case EXIT_REASON_VMFUNC: 5900 /* VM functions are emulated through L2->L0 vmexits. */ 5901 return true; 5902 default: 5903 break; 5904 } 5905 return false; 5906 } 5907 5908 /* 5909 * Return 1 if L1 wants to intercept an exit from L2. Only call this when in 5910 * is_guest_mode (L2). 5911 */ 5912 static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu, 5913 union vmx_exit_reason exit_reason) 5914 { 5915 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 5916 u32 intr_info; 5917 5918 switch ((u16)exit_reason.basic) { 5919 case EXIT_REASON_EXCEPTION_NMI: 5920 intr_info = vmx_get_intr_info(vcpu); 5921 if (is_nmi(intr_info)) 5922 return true; 5923 else if (is_page_fault(intr_info)) 5924 return true; 5925 return vmcs12->exception_bitmap & 5926 (1u << (intr_info & INTR_INFO_VECTOR_MASK)); 5927 case EXIT_REASON_EXTERNAL_INTERRUPT: 5928 return nested_exit_on_intr(vcpu); 5929 case EXIT_REASON_TRIPLE_FAULT: 5930 return true; 5931 case EXIT_REASON_INTERRUPT_WINDOW: 5932 return nested_cpu_has(vmcs12, CPU_BASED_INTR_WINDOW_EXITING); 5933 case EXIT_REASON_NMI_WINDOW: 5934 return nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING); 5935 case EXIT_REASON_TASK_SWITCH: 5936 return true; 5937 case EXIT_REASON_CPUID: 5938 return true; 5939 case EXIT_REASON_HLT: 5940 return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING); 5941 case EXIT_REASON_INVD: 5942 return true; 5943 case EXIT_REASON_INVLPG: 5944 return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING); 5945 case EXIT_REASON_RDPMC: 5946 return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING); 5947 case EXIT_REASON_RDRAND: 5948 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDRAND_EXITING); 5949 case EXIT_REASON_RDSEED: 5950 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDSEED_EXITING); 5951 case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP: 5952 return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING); 5953 case EXIT_REASON_VMREAD: 5954 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12, 5955 vmcs12->vmread_bitmap); 5956 case EXIT_REASON_VMWRITE: 5957 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12, 5958 vmcs12->vmwrite_bitmap); 5959 case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR: 5960 case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD: 5961 case EXIT_REASON_VMPTRST: case EXIT_REASON_VMRESUME: 5962 case EXIT_REASON_VMOFF: case EXIT_REASON_VMON: 5963 case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID: 5964 /* 5965 * VMX instructions trap unconditionally. This allows L1 to 5966 * emulate them for its L2 guest, i.e., allows 3-level nesting! 5967 */ 5968 return true; 5969 case EXIT_REASON_CR_ACCESS: 5970 return nested_vmx_exit_handled_cr(vcpu, vmcs12); 5971 case EXIT_REASON_DR_ACCESS: 5972 return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING); 5973 case EXIT_REASON_IO_INSTRUCTION: 5974 return nested_vmx_exit_handled_io(vcpu, vmcs12); 5975 case EXIT_REASON_GDTR_IDTR: case EXIT_REASON_LDTR_TR: 5976 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC); 5977 case EXIT_REASON_MSR_READ: 5978 case EXIT_REASON_MSR_WRITE: 5979 return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason); 5980 case EXIT_REASON_INVALID_STATE: 5981 return true; 5982 case EXIT_REASON_MWAIT_INSTRUCTION: 5983 return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING); 5984 case EXIT_REASON_MONITOR_TRAP_FLAG: 5985 return nested_vmx_exit_handled_mtf(vmcs12); 5986 case EXIT_REASON_MONITOR_INSTRUCTION: 5987 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING); 5988 case EXIT_REASON_PAUSE_INSTRUCTION: 5989 return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) || 5990 nested_cpu_has2(vmcs12, 5991 SECONDARY_EXEC_PAUSE_LOOP_EXITING); 5992 case EXIT_REASON_MCE_DURING_VMENTRY: 5993 return true; 5994 case EXIT_REASON_TPR_BELOW_THRESHOLD: 5995 return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW); 5996 case EXIT_REASON_APIC_ACCESS: 5997 case EXIT_REASON_APIC_WRITE: 5998 case EXIT_REASON_EOI_INDUCED: 5999 /* 6000 * The controls for "virtualize APIC accesses," "APIC- 6001 * register virtualization," and "virtual-interrupt 6002 * delivery" only come from vmcs12. 6003 */ 6004 return true; 6005 case EXIT_REASON_INVPCID: 6006 return 6007 nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_INVPCID) && 6008 nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING); 6009 case EXIT_REASON_WBINVD: 6010 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING); 6011 case EXIT_REASON_XSETBV: 6012 return true; 6013 case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS: 6014 /* 6015 * This should never happen, since it is not possible to 6016 * set XSS to a non-zero value---neither in L1 nor in L2. 6017 * If if it were, XSS would have to be checked against 6018 * the XSS exit bitmap in vmcs12. 6019 */ 6020 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES); 6021 case EXIT_REASON_UMWAIT: 6022 case EXIT_REASON_TPAUSE: 6023 return nested_cpu_has2(vmcs12, 6024 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE); 6025 case EXIT_REASON_ENCLS: 6026 return nested_vmx_exit_handled_encls(vcpu, vmcs12); 6027 default: 6028 return true; 6029 } 6030 } 6031 6032 /* 6033 * Conditionally reflect a VM-Exit into L1. Returns %true if the VM-Exit was 6034 * reflected into L1. 6035 */ 6036 bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu) 6037 { 6038 struct vcpu_vmx *vmx = to_vmx(vcpu); 6039 union vmx_exit_reason exit_reason = vmx->exit_reason; 6040 unsigned long exit_qual; 6041 u32 exit_intr_info; 6042 6043 WARN_ON_ONCE(vmx->nested.nested_run_pending); 6044 6045 /* 6046 * Late nested VM-Fail shares the same flow as nested VM-Exit since KVM 6047 * has already loaded L2's state. 6048 */ 6049 if (unlikely(vmx->fail)) { 6050 trace_kvm_nested_vmenter_failed( 6051 "hardware VM-instruction error: ", 6052 vmcs_read32(VM_INSTRUCTION_ERROR)); 6053 exit_intr_info = 0; 6054 exit_qual = 0; 6055 goto reflect_vmexit; 6056 } 6057 6058 trace_kvm_nested_vmexit(exit_reason.full, vcpu, KVM_ISA_VMX); 6059 6060 /* If L0 (KVM) wants the exit, it trumps L1's desires. */ 6061 if (nested_vmx_l0_wants_exit(vcpu, exit_reason)) 6062 return false; 6063 6064 /* If L1 doesn't want the exit, handle it in L0. */ 6065 if (!nested_vmx_l1_wants_exit(vcpu, exit_reason)) 6066 return false; 6067 6068 /* 6069 * vmcs.VM_EXIT_INTR_INFO is only valid for EXCEPTION_NMI exits. For 6070 * EXTERNAL_INTERRUPT, the value for vmcs12->vm_exit_intr_info would 6071 * need to be synthesized by querying the in-kernel LAPIC, but external 6072 * interrupts are never reflected to L1 so it's a non-issue. 6073 */ 6074 exit_intr_info = vmx_get_intr_info(vcpu); 6075 if (is_exception_with_error_code(exit_intr_info)) { 6076 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 6077 6078 vmcs12->vm_exit_intr_error_code = 6079 vmcs_read32(VM_EXIT_INTR_ERROR_CODE); 6080 } 6081 exit_qual = vmx_get_exit_qual(vcpu); 6082 6083 reflect_vmexit: 6084 nested_vmx_vmexit(vcpu, exit_reason.full, exit_intr_info, exit_qual); 6085 return true; 6086 } 6087 6088 static int vmx_get_nested_state(struct kvm_vcpu *vcpu, 6089 struct kvm_nested_state __user *user_kvm_nested_state, 6090 u32 user_data_size) 6091 { 6092 struct vcpu_vmx *vmx; 6093 struct vmcs12 *vmcs12; 6094 struct kvm_nested_state kvm_state = { 6095 .flags = 0, 6096 .format = KVM_STATE_NESTED_FORMAT_VMX, 6097 .size = sizeof(kvm_state), 6098 .hdr.vmx.flags = 0, 6099 .hdr.vmx.vmxon_pa = -1ull, 6100 .hdr.vmx.vmcs12_pa = -1ull, 6101 .hdr.vmx.preemption_timer_deadline = 0, 6102 }; 6103 struct kvm_vmx_nested_state_data __user *user_vmx_nested_state = 6104 &user_kvm_nested_state->data.vmx[0]; 6105 6106 if (!vcpu) 6107 return kvm_state.size + sizeof(*user_vmx_nested_state); 6108 6109 vmx = to_vmx(vcpu); 6110 vmcs12 = get_vmcs12(vcpu); 6111 6112 if (nested_vmx_allowed(vcpu) && 6113 (vmx->nested.vmxon || vmx->nested.smm.vmxon)) { 6114 kvm_state.hdr.vmx.vmxon_pa = vmx->nested.vmxon_ptr; 6115 kvm_state.hdr.vmx.vmcs12_pa = vmx->nested.current_vmptr; 6116 6117 if (vmx_has_valid_vmcs12(vcpu)) { 6118 kvm_state.size += sizeof(user_vmx_nested_state->vmcs12); 6119 6120 /* 'hv_evmcs_vmptr' can also be EVMPTR_MAP_PENDING here */ 6121 if (vmx->nested.hv_evmcs_vmptr != EVMPTR_INVALID) 6122 kvm_state.flags |= KVM_STATE_NESTED_EVMCS; 6123 6124 if (is_guest_mode(vcpu) && 6125 nested_cpu_has_shadow_vmcs(vmcs12) && 6126 vmcs12->vmcs_link_pointer != -1ull) 6127 kvm_state.size += sizeof(user_vmx_nested_state->shadow_vmcs12); 6128 } 6129 6130 if (vmx->nested.smm.vmxon) 6131 kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON; 6132 6133 if (vmx->nested.smm.guest_mode) 6134 kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE; 6135 6136 if (is_guest_mode(vcpu)) { 6137 kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE; 6138 6139 if (vmx->nested.nested_run_pending) 6140 kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING; 6141 6142 if (vmx->nested.mtf_pending) 6143 kvm_state.flags |= KVM_STATE_NESTED_MTF_PENDING; 6144 6145 if (nested_cpu_has_preemption_timer(vmcs12) && 6146 vmx->nested.has_preemption_timer_deadline) { 6147 kvm_state.hdr.vmx.flags |= 6148 KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE; 6149 kvm_state.hdr.vmx.preemption_timer_deadline = 6150 vmx->nested.preemption_timer_deadline; 6151 } 6152 } 6153 } 6154 6155 if (user_data_size < kvm_state.size) 6156 goto out; 6157 6158 if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state))) 6159 return -EFAULT; 6160 6161 if (!vmx_has_valid_vmcs12(vcpu)) 6162 goto out; 6163 6164 /* 6165 * When running L2, the authoritative vmcs12 state is in the 6166 * vmcs02. When running L1, the authoritative vmcs12 state is 6167 * in the shadow or enlightened vmcs linked to vmcs01, unless 6168 * need_vmcs12_to_shadow_sync is set, in which case, the authoritative 6169 * vmcs12 state is in the vmcs12 already. 6170 */ 6171 if (is_guest_mode(vcpu)) { 6172 sync_vmcs02_to_vmcs12(vcpu, vmcs12); 6173 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12); 6174 } else { 6175 copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu)); 6176 if (!vmx->nested.need_vmcs12_to_shadow_sync) { 6177 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) 6178 /* 6179 * L1 hypervisor is not obliged to keep eVMCS 6180 * clean fields data always up-to-date while 6181 * not in guest mode, 'hv_clean_fields' is only 6182 * supposed to be actual upon vmentry so we need 6183 * to ignore it here and do full copy. 6184 */ 6185 copy_enlightened_to_vmcs12(vmx, 0); 6186 else if (enable_shadow_vmcs) 6187 copy_shadow_to_vmcs12(vmx); 6188 } 6189 } 6190 6191 BUILD_BUG_ON(sizeof(user_vmx_nested_state->vmcs12) < VMCS12_SIZE); 6192 BUILD_BUG_ON(sizeof(user_vmx_nested_state->shadow_vmcs12) < VMCS12_SIZE); 6193 6194 /* 6195 * Copy over the full allocated size of vmcs12 rather than just the size 6196 * of the struct. 6197 */ 6198 if (copy_to_user(user_vmx_nested_state->vmcs12, vmcs12, VMCS12_SIZE)) 6199 return -EFAULT; 6200 6201 if (nested_cpu_has_shadow_vmcs(vmcs12) && 6202 vmcs12->vmcs_link_pointer != -1ull) { 6203 if (copy_to_user(user_vmx_nested_state->shadow_vmcs12, 6204 get_shadow_vmcs12(vcpu), VMCS12_SIZE)) 6205 return -EFAULT; 6206 } 6207 out: 6208 return kvm_state.size; 6209 } 6210 6211 /* 6212 * Forcibly leave nested mode in order to be able to reset the VCPU later on. 6213 */ 6214 void vmx_leave_nested(struct kvm_vcpu *vcpu) 6215 { 6216 if (is_guest_mode(vcpu)) { 6217 to_vmx(vcpu)->nested.nested_run_pending = 0; 6218 nested_vmx_vmexit(vcpu, -1, 0, 0); 6219 } 6220 free_nested(vcpu); 6221 } 6222 6223 static int vmx_set_nested_state(struct kvm_vcpu *vcpu, 6224 struct kvm_nested_state __user *user_kvm_nested_state, 6225 struct kvm_nested_state *kvm_state) 6226 { 6227 struct vcpu_vmx *vmx = to_vmx(vcpu); 6228 struct vmcs12 *vmcs12; 6229 enum vm_entry_failure_code ignored; 6230 struct kvm_vmx_nested_state_data __user *user_vmx_nested_state = 6231 &user_kvm_nested_state->data.vmx[0]; 6232 int ret; 6233 6234 if (kvm_state->format != KVM_STATE_NESTED_FORMAT_VMX) 6235 return -EINVAL; 6236 6237 if (kvm_state->hdr.vmx.vmxon_pa == -1ull) { 6238 if (kvm_state->hdr.vmx.smm.flags) 6239 return -EINVAL; 6240 6241 if (kvm_state->hdr.vmx.vmcs12_pa != -1ull) 6242 return -EINVAL; 6243 6244 /* 6245 * KVM_STATE_NESTED_EVMCS used to signal that KVM should 6246 * enable eVMCS capability on vCPU. However, since then 6247 * code was changed such that flag signals vmcs12 should 6248 * be copied into eVMCS in guest memory. 6249 * 6250 * To preserve backwards compatability, allow user 6251 * to set this flag even when there is no VMXON region. 6252 */ 6253 if (kvm_state->flags & ~KVM_STATE_NESTED_EVMCS) 6254 return -EINVAL; 6255 } else { 6256 if (!nested_vmx_allowed(vcpu)) 6257 return -EINVAL; 6258 6259 if (!page_address_valid(vcpu, kvm_state->hdr.vmx.vmxon_pa)) 6260 return -EINVAL; 6261 } 6262 6263 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && 6264 (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) 6265 return -EINVAL; 6266 6267 if (kvm_state->hdr.vmx.smm.flags & 6268 ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON)) 6269 return -EINVAL; 6270 6271 if (kvm_state->hdr.vmx.flags & ~KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE) 6272 return -EINVAL; 6273 6274 /* 6275 * SMM temporarily disables VMX, so we cannot be in guest mode, 6276 * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags 6277 * must be zero. 6278 */ 6279 if (is_smm(vcpu) ? 6280 (kvm_state->flags & 6281 (KVM_STATE_NESTED_GUEST_MODE | KVM_STATE_NESTED_RUN_PENDING)) 6282 : kvm_state->hdr.vmx.smm.flags) 6283 return -EINVAL; 6284 6285 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && 6286 !(kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON)) 6287 return -EINVAL; 6288 6289 if ((kvm_state->flags & KVM_STATE_NESTED_EVMCS) && 6290 (!nested_vmx_allowed(vcpu) || !vmx->nested.enlightened_vmcs_enabled)) 6291 return -EINVAL; 6292 6293 vmx_leave_nested(vcpu); 6294 6295 if (kvm_state->hdr.vmx.vmxon_pa == -1ull) 6296 return 0; 6297 6298 vmx->nested.vmxon_ptr = kvm_state->hdr.vmx.vmxon_pa; 6299 ret = enter_vmx_operation(vcpu); 6300 if (ret) 6301 return ret; 6302 6303 /* Empty 'VMXON' state is permitted if no VMCS loaded */ 6304 if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12)) { 6305 /* See vmx_has_valid_vmcs12. */ 6306 if ((kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE) || 6307 (kvm_state->flags & KVM_STATE_NESTED_EVMCS) || 6308 (kvm_state->hdr.vmx.vmcs12_pa != -1ull)) 6309 return -EINVAL; 6310 else 6311 return 0; 6312 } 6313 6314 if (kvm_state->hdr.vmx.vmcs12_pa != -1ull) { 6315 if (kvm_state->hdr.vmx.vmcs12_pa == kvm_state->hdr.vmx.vmxon_pa || 6316 !page_address_valid(vcpu, kvm_state->hdr.vmx.vmcs12_pa)) 6317 return -EINVAL; 6318 6319 set_current_vmptr(vmx, kvm_state->hdr.vmx.vmcs12_pa); 6320 } else if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) { 6321 /* 6322 * nested_vmx_handle_enlightened_vmptrld() cannot be called 6323 * directly from here as HV_X64_MSR_VP_ASSIST_PAGE may not be 6324 * restored yet. EVMCS will be mapped from 6325 * nested_get_vmcs12_pages(). 6326 */ 6327 vmx->nested.hv_evmcs_vmptr = EVMPTR_MAP_PENDING; 6328 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); 6329 } else { 6330 return -EINVAL; 6331 } 6332 6333 if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) { 6334 vmx->nested.smm.vmxon = true; 6335 vmx->nested.vmxon = false; 6336 6337 if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) 6338 vmx->nested.smm.guest_mode = true; 6339 } 6340 6341 vmcs12 = get_vmcs12(vcpu); 6342 if (copy_from_user(vmcs12, user_vmx_nested_state->vmcs12, sizeof(*vmcs12))) 6343 return -EFAULT; 6344 6345 if (vmcs12->hdr.revision_id != VMCS12_REVISION) 6346 return -EINVAL; 6347 6348 if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) 6349 return 0; 6350 6351 vmx->nested.nested_run_pending = 6352 !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING); 6353 6354 vmx->nested.mtf_pending = 6355 !!(kvm_state->flags & KVM_STATE_NESTED_MTF_PENDING); 6356 6357 ret = -EINVAL; 6358 if (nested_cpu_has_shadow_vmcs(vmcs12) && 6359 vmcs12->vmcs_link_pointer != -1ull) { 6360 struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu); 6361 6362 if (kvm_state->size < 6363 sizeof(*kvm_state) + 6364 sizeof(user_vmx_nested_state->vmcs12) + sizeof(*shadow_vmcs12)) 6365 goto error_guest_mode; 6366 6367 if (copy_from_user(shadow_vmcs12, 6368 user_vmx_nested_state->shadow_vmcs12, 6369 sizeof(*shadow_vmcs12))) { 6370 ret = -EFAULT; 6371 goto error_guest_mode; 6372 } 6373 6374 if (shadow_vmcs12->hdr.revision_id != VMCS12_REVISION || 6375 !shadow_vmcs12->hdr.shadow_vmcs) 6376 goto error_guest_mode; 6377 } 6378 6379 vmx->nested.has_preemption_timer_deadline = false; 6380 if (kvm_state->hdr.vmx.flags & KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE) { 6381 vmx->nested.has_preemption_timer_deadline = true; 6382 vmx->nested.preemption_timer_deadline = 6383 kvm_state->hdr.vmx.preemption_timer_deadline; 6384 } 6385 6386 if (nested_vmx_check_controls(vcpu, vmcs12) || 6387 nested_vmx_check_host_state(vcpu, vmcs12) || 6388 nested_vmx_check_guest_state(vcpu, vmcs12, &ignored)) 6389 goto error_guest_mode; 6390 6391 vmx->nested.dirty_vmcs12 = true; 6392 ret = nested_vmx_enter_non_root_mode(vcpu, false); 6393 if (ret) 6394 goto error_guest_mode; 6395 6396 return 0; 6397 6398 error_guest_mode: 6399 vmx->nested.nested_run_pending = 0; 6400 return ret; 6401 } 6402 6403 void nested_vmx_set_vmcs_shadowing_bitmap(void) 6404 { 6405 if (enable_shadow_vmcs) { 6406 vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap)); 6407 vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap)); 6408 } 6409 } 6410 6411 /* 6412 * Indexing into the vmcs12 uses the VMCS encoding rotated left by 6. Undo 6413 * that madness to get the encoding for comparison. 6414 */ 6415 #define VMCS12_IDX_TO_ENC(idx) ((u16)(((u16)(idx) >> 6) | ((u16)(idx) << 10))) 6416 6417 static u64 nested_vmx_calc_vmcs_enum_msr(void) 6418 { 6419 /* 6420 * Note these are the so called "index" of the VMCS field encoding, not 6421 * the index into vmcs12. 6422 */ 6423 unsigned int max_idx, idx; 6424 int i; 6425 6426 /* 6427 * For better or worse, KVM allows VMREAD/VMWRITE to all fields in 6428 * vmcs12, regardless of whether or not the associated feature is 6429 * exposed to L1. Simply find the field with the highest index. 6430 */ 6431 max_idx = 0; 6432 for (i = 0; i < nr_vmcs12_fields; i++) { 6433 /* The vmcs12 table is very, very sparsely populated. */ 6434 if (!vmcs_field_to_offset_table[i]) 6435 continue; 6436 6437 idx = vmcs_field_index(VMCS12_IDX_TO_ENC(i)); 6438 if (idx > max_idx) 6439 max_idx = idx; 6440 } 6441 6442 return (u64)max_idx << VMCS_FIELD_INDEX_SHIFT; 6443 } 6444 6445 /* 6446 * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be 6447 * returned for the various VMX controls MSRs when nested VMX is enabled. 6448 * The same values should also be used to verify that vmcs12 control fields are 6449 * valid during nested entry from L1 to L2. 6450 * Each of these control msrs has a low and high 32-bit half: A low bit is on 6451 * if the corresponding bit in the (32-bit) control field *must* be on, and a 6452 * bit in the high half is on if the corresponding bit in the control field 6453 * may be on. See also vmx_control_verify(). 6454 */ 6455 void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps) 6456 { 6457 /* 6458 * Note that as a general rule, the high half of the MSRs (bits in 6459 * the control fields which may be 1) should be initialized by the 6460 * intersection of the underlying hardware's MSR (i.e., features which 6461 * can be supported) and the list of features we want to expose - 6462 * because they are known to be properly supported in our code. 6463 * Also, usually, the low half of the MSRs (bits which must be 1) can 6464 * be set to 0, meaning that L1 may turn off any of these bits. The 6465 * reason is that if one of these bits is necessary, it will appear 6466 * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control 6467 * fields of vmcs01 and vmcs02, will turn these bits off - and 6468 * nested_vmx_l1_wants_exit() will not pass related exits to L1. 6469 * These rules have exceptions below. 6470 */ 6471 6472 /* pin-based controls */ 6473 rdmsr(MSR_IA32_VMX_PINBASED_CTLS, 6474 msrs->pinbased_ctls_low, 6475 msrs->pinbased_ctls_high); 6476 msrs->pinbased_ctls_low |= 6477 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR; 6478 msrs->pinbased_ctls_high &= 6479 PIN_BASED_EXT_INTR_MASK | 6480 PIN_BASED_NMI_EXITING | 6481 PIN_BASED_VIRTUAL_NMIS | 6482 (enable_apicv ? PIN_BASED_POSTED_INTR : 0); 6483 msrs->pinbased_ctls_high |= 6484 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR | 6485 PIN_BASED_VMX_PREEMPTION_TIMER; 6486 6487 /* exit controls */ 6488 rdmsr(MSR_IA32_VMX_EXIT_CTLS, 6489 msrs->exit_ctls_low, 6490 msrs->exit_ctls_high); 6491 msrs->exit_ctls_low = 6492 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR; 6493 6494 msrs->exit_ctls_high &= 6495 #ifdef CONFIG_X86_64 6496 VM_EXIT_HOST_ADDR_SPACE_SIZE | 6497 #endif 6498 VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT | 6499 VM_EXIT_CLEAR_BNDCFGS | VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL; 6500 msrs->exit_ctls_high |= 6501 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR | 6502 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER | 6503 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT; 6504 6505 /* We support free control of debug control saving. */ 6506 msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS; 6507 6508 /* entry controls */ 6509 rdmsr(MSR_IA32_VMX_ENTRY_CTLS, 6510 msrs->entry_ctls_low, 6511 msrs->entry_ctls_high); 6512 msrs->entry_ctls_low = 6513 VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR; 6514 msrs->entry_ctls_high &= 6515 #ifdef CONFIG_X86_64 6516 VM_ENTRY_IA32E_MODE | 6517 #endif 6518 VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_BNDCFGS | 6519 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL; 6520 msrs->entry_ctls_high |= 6521 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER); 6522 6523 /* We support free control of debug control loading. */ 6524 msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS; 6525 6526 /* cpu-based controls */ 6527 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, 6528 msrs->procbased_ctls_low, 6529 msrs->procbased_ctls_high); 6530 msrs->procbased_ctls_low = 6531 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR; 6532 msrs->procbased_ctls_high &= 6533 CPU_BASED_INTR_WINDOW_EXITING | 6534 CPU_BASED_NMI_WINDOW_EXITING | CPU_BASED_USE_TSC_OFFSETTING | 6535 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING | 6536 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING | 6537 CPU_BASED_CR3_STORE_EXITING | 6538 #ifdef CONFIG_X86_64 6539 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING | 6540 #endif 6541 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING | 6542 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG | 6543 CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING | 6544 CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING | 6545 CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; 6546 /* 6547 * We can allow some features even when not supported by the 6548 * hardware. For example, L1 can specify an MSR bitmap - and we 6549 * can use it to avoid exits to L1 - even when L0 runs L2 6550 * without MSR bitmaps. 6551 */ 6552 msrs->procbased_ctls_high |= 6553 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR | 6554 CPU_BASED_USE_MSR_BITMAPS; 6555 6556 /* We support free control of CR3 access interception. */ 6557 msrs->procbased_ctls_low &= 6558 ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING); 6559 6560 /* 6561 * secondary cpu-based controls. Do not include those that 6562 * depend on CPUID bits, they are added later by 6563 * vmx_vcpu_after_set_cpuid. 6564 */ 6565 if (msrs->procbased_ctls_high & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) 6566 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, 6567 msrs->secondary_ctls_low, 6568 msrs->secondary_ctls_high); 6569 6570 msrs->secondary_ctls_low = 0; 6571 msrs->secondary_ctls_high &= 6572 SECONDARY_EXEC_DESC | 6573 SECONDARY_EXEC_ENABLE_RDTSCP | 6574 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 6575 SECONDARY_EXEC_WBINVD_EXITING | 6576 SECONDARY_EXEC_APIC_REGISTER_VIRT | 6577 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 6578 SECONDARY_EXEC_RDRAND_EXITING | 6579 SECONDARY_EXEC_ENABLE_INVPCID | 6580 SECONDARY_EXEC_RDSEED_EXITING | 6581 SECONDARY_EXEC_XSAVES | 6582 SECONDARY_EXEC_TSC_SCALING; 6583 6584 /* 6585 * We can emulate "VMCS shadowing," even if the hardware 6586 * doesn't support it. 6587 */ 6588 msrs->secondary_ctls_high |= 6589 SECONDARY_EXEC_SHADOW_VMCS; 6590 6591 if (enable_ept) { 6592 /* nested EPT: emulate EPT also to L1 */ 6593 msrs->secondary_ctls_high |= 6594 SECONDARY_EXEC_ENABLE_EPT; 6595 msrs->ept_caps = 6596 VMX_EPT_PAGE_WALK_4_BIT | 6597 VMX_EPT_PAGE_WALK_5_BIT | 6598 VMX_EPTP_WB_BIT | 6599 VMX_EPT_INVEPT_BIT | 6600 VMX_EPT_EXECUTE_ONLY_BIT; 6601 6602 msrs->ept_caps &= ept_caps; 6603 msrs->ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT | 6604 VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT | 6605 VMX_EPT_1GB_PAGE_BIT; 6606 if (enable_ept_ad_bits) { 6607 msrs->secondary_ctls_high |= 6608 SECONDARY_EXEC_ENABLE_PML; 6609 msrs->ept_caps |= VMX_EPT_AD_BIT; 6610 } 6611 } 6612 6613 if (cpu_has_vmx_vmfunc()) { 6614 msrs->secondary_ctls_high |= 6615 SECONDARY_EXEC_ENABLE_VMFUNC; 6616 /* 6617 * Advertise EPTP switching unconditionally 6618 * since we emulate it 6619 */ 6620 if (enable_ept) 6621 msrs->vmfunc_controls = 6622 VMX_VMFUNC_EPTP_SWITCHING; 6623 } 6624 6625 /* 6626 * Old versions of KVM use the single-context version without 6627 * checking for support, so declare that it is supported even 6628 * though it is treated as global context. The alternative is 6629 * not failing the single-context invvpid, and it is worse. 6630 */ 6631 if (enable_vpid) { 6632 msrs->secondary_ctls_high |= 6633 SECONDARY_EXEC_ENABLE_VPID; 6634 msrs->vpid_caps = VMX_VPID_INVVPID_BIT | 6635 VMX_VPID_EXTENT_SUPPORTED_MASK; 6636 } 6637 6638 if (enable_unrestricted_guest) 6639 msrs->secondary_ctls_high |= 6640 SECONDARY_EXEC_UNRESTRICTED_GUEST; 6641 6642 if (flexpriority_enabled) 6643 msrs->secondary_ctls_high |= 6644 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; 6645 6646 if (enable_sgx) 6647 msrs->secondary_ctls_high |= SECONDARY_EXEC_ENCLS_EXITING; 6648 6649 /* miscellaneous data */ 6650 rdmsr(MSR_IA32_VMX_MISC, 6651 msrs->misc_low, 6652 msrs->misc_high); 6653 msrs->misc_low &= VMX_MISC_SAVE_EFER_LMA; 6654 msrs->misc_low |= 6655 MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS | 6656 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE | 6657 VMX_MISC_ACTIVITY_HLT | 6658 VMX_MISC_ACTIVITY_WAIT_SIPI; 6659 msrs->misc_high = 0; 6660 6661 /* 6662 * This MSR reports some information about VMX support. We 6663 * should return information about the VMX we emulate for the 6664 * guest, and the VMCS structure we give it - not about the 6665 * VMX support of the underlying hardware. 6666 */ 6667 msrs->basic = 6668 VMCS12_REVISION | 6669 VMX_BASIC_TRUE_CTLS | 6670 ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) | 6671 (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT); 6672 6673 if (cpu_has_vmx_basic_inout()) 6674 msrs->basic |= VMX_BASIC_INOUT; 6675 6676 /* 6677 * These MSRs specify bits which the guest must keep fixed on 6678 * while L1 is in VMXON mode (in L1's root mode, or running an L2). 6679 * We picked the standard core2 setting. 6680 */ 6681 #define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE) 6682 #define VMXON_CR4_ALWAYSON X86_CR4_VMXE 6683 msrs->cr0_fixed0 = VMXON_CR0_ALWAYSON; 6684 msrs->cr4_fixed0 = VMXON_CR4_ALWAYSON; 6685 6686 /* These MSRs specify bits which the guest must keep fixed off. */ 6687 rdmsrl(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1); 6688 rdmsrl(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1); 6689 6690 msrs->vmcs_enum = nested_vmx_calc_vmcs_enum_msr(); 6691 } 6692 6693 void nested_vmx_hardware_unsetup(void) 6694 { 6695 int i; 6696 6697 if (enable_shadow_vmcs) { 6698 for (i = 0; i < VMX_BITMAP_NR; i++) 6699 free_page((unsigned long)vmx_bitmap[i]); 6700 } 6701 } 6702 6703 __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *)) 6704 { 6705 int i; 6706 6707 if (!cpu_has_vmx_shadow_vmcs()) 6708 enable_shadow_vmcs = 0; 6709 if (enable_shadow_vmcs) { 6710 for (i = 0; i < VMX_BITMAP_NR; i++) { 6711 /* 6712 * The vmx_bitmap is not tied to a VM and so should 6713 * not be charged to a memcg. 6714 */ 6715 vmx_bitmap[i] = (unsigned long *) 6716 __get_free_page(GFP_KERNEL); 6717 if (!vmx_bitmap[i]) { 6718 nested_vmx_hardware_unsetup(); 6719 return -ENOMEM; 6720 } 6721 } 6722 6723 init_vmcs_shadow_fields(); 6724 } 6725 6726 exit_handlers[EXIT_REASON_VMCLEAR] = handle_vmclear; 6727 exit_handlers[EXIT_REASON_VMLAUNCH] = handle_vmlaunch; 6728 exit_handlers[EXIT_REASON_VMPTRLD] = handle_vmptrld; 6729 exit_handlers[EXIT_REASON_VMPTRST] = handle_vmptrst; 6730 exit_handlers[EXIT_REASON_VMREAD] = handle_vmread; 6731 exit_handlers[EXIT_REASON_VMRESUME] = handle_vmresume; 6732 exit_handlers[EXIT_REASON_VMWRITE] = handle_vmwrite; 6733 exit_handlers[EXIT_REASON_VMOFF] = handle_vmoff; 6734 exit_handlers[EXIT_REASON_VMON] = handle_vmon; 6735 exit_handlers[EXIT_REASON_INVEPT] = handle_invept; 6736 exit_handlers[EXIT_REASON_INVVPID] = handle_invvpid; 6737 exit_handlers[EXIT_REASON_VMFUNC] = handle_vmfunc; 6738 6739 return 0; 6740 } 6741 6742 struct kvm_x86_nested_ops vmx_nested_ops = { 6743 .check_events = vmx_check_nested_events, 6744 .hv_timer_pending = nested_vmx_preemption_timer_pending, 6745 .triple_fault = nested_vmx_triple_fault, 6746 .get_state = vmx_get_nested_state, 6747 .set_state = vmx_set_nested_state, 6748 .get_nested_state_pages = vmx_get_nested_state_pages, 6749 .write_log_dirty = nested_vmx_write_pml_buffer, 6750 .enable_evmcs = nested_enable_evmcs, 6751 .get_evmcs_version = nested_get_evmcs_version, 6752 }; 6753