1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/objtool.h> 4 #include <linux/percpu.h> 5 6 #include <asm/debugreg.h> 7 #include <asm/mmu_context.h> 8 9 #include "cpuid.h" 10 #include "hyperv.h" 11 #include "mmu.h" 12 #include "nested.h" 13 #include "pmu.h" 14 #include "trace.h" 15 #include "vmx.h" 16 #include "x86.h" 17 18 static bool __read_mostly enable_shadow_vmcs = 1; 19 module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO); 20 21 static bool __read_mostly nested_early_check = 0; 22 module_param(nested_early_check, bool, S_IRUGO); 23 24 #define CC(consistency_check) \ 25 ({ \ 26 bool failed = (consistency_check); \ 27 if (failed) \ 28 trace_kvm_nested_vmenter_failed(#consistency_check, 0); \ 29 failed; \ 30 }) 31 32 /* 33 * Hyper-V requires all of these, so mark them as supported even though 34 * they are just treated the same as all-context. 35 */ 36 #define VMX_VPID_EXTENT_SUPPORTED_MASK \ 37 (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \ 38 VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \ 39 VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \ 40 VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT) 41 42 #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5 43 44 enum { 45 VMX_VMREAD_BITMAP, 46 VMX_VMWRITE_BITMAP, 47 VMX_BITMAP_NR 48 }; 49 static unsigned long *vmx_bitmap[VMX_BITMAP_NR]; 50 51 #define vmx_vmread_bitmap (vmx_bitmap[VMX_VMREAD_BITMAP]) 52 #define vmx_vmwrite_bitmap (vmx_bitmap[VMX_VMWRITE_BITMAP]) 53 54 struct shadow_vmcs_field { 55 u16 encoding; 56 u16 offset; 57 }; 58 static struct shadow_vmcs_field shadow_read_only_fields[] = { 59 #define SHADOW_FIELD_RO(x, y) { x, offsetof(struct vmcs12, y) }, 60 #include "vmcs_shadow_fields.h" 61 }; 62 static int max_shadow_read_only_fields = 63 ARRAY_SIZE(shadow_read_only_fields); 64 65 static struct shadow_vmcs_field shadow_read_write_fields[] = { 66 #define SHADOW_FIELD_RW(x, y) { x, offsetof(struct vmcs12, y) }, 67 #include "vmcs_shadow_fields.h" 68 }; 69 static int max_shadow_read_write_fields = 70 ARRAY_SIZE(shadow_read_write_fields); 71 72 static void init_vmcs_shadow_fields(void) 73 { 74 int i, j; 75 76 memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE); 77 memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE); 78 79 for (i = j = 0; i < max_shadow_read_only_fields; i++) { 80 struct shadow_vmcs_field entry = shadow_read_only_fields[i]; 81 u16 field = entry.encoding; 82 83 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 && 84 (i + 1 == max_shadow_read_only_fields || 85 shadow_read_only_fields[i + 1].encoding != field + 1)) 86 pr_err("Missing field from shadow_read_only_field %x\n", 87 field + 1); 88 89 clear_bit(field, vmx_vmread_bitmap); 90 if (field & 1) 91 #ifdef CONFIG_X86_64 92 continue; 93 #else 94 entry.offset += sizeof(u32); 95 #endif 96 shadow_read_only_fields[j++] = entry; 97 } 98 max_shadow_read_only_fields = j; 99 100 for (i = j = 0; i < max_shadow_read_write_fields; i++) { 101 struct shadow_vmcs_field entry = shadow_read_write_fields[i]; 102 u16 field = entry.encoding; 103 104 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 && 105 (i + 1 == max_shadow_read_write_fields || 106 shadow_read_write_fields[i + 1].encoding != field + 1)) 107 pr_err("Missing field from shadow_read_write_field %x\n", 108 field + 1); 109 110 WARN_ONCE(field >= GUEST_ES_AR_BYTES && 111 field <= GUEST_TR_AR_BYTES, 112 "Update vmcs12_write_any() to drop reserved bits from AR_BYTES"); 113 114 /* 115 * PML and the preemption timer can be emulated, but the 116 * processor cannot vmwrite to fields that don't exist 117 * on bare metal. 118 */ 119 switch (field) { 120 case GUEST_PML_INDEX: 121 if (!cpu_has_vmx_pml()) 122 continue; 123 break; 124 case VMX_PREEMPTION_TIMER_VALUE: 125 if (!cpu_has_vmx_preemption_timer()) 126 continue; 127 break; 128 case GUEST_INTR_STATUS: 129 if (!cpu_has_vmx_apicv()) 130 continue; 131 break; 132 default: 133 break; 134 } 135 136 clear_bit(field, vmx_vmwrite_bitmap); 137 clear_bit(field, vmx_vmread_bitmap); 138 if (field & 1) 139 #ifdef CONFIG_X86_64 140 continue; 141 #else 142 entry.offset += sizeof(u32); 143 #endif 144 shadow_read_write_fields[j++] = entry; 145 } 146 max_shadow_read_write_fields = j; 147 } 148 149 /* 150 * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(), 151 * set the success or error code of an emulated VMX instruction (as specified 152 * by Vol 2B, VMX Instruction Reference, "Conventions"), and skip the emulated 153 * instruction. 154 */ 155 static int nested_vmx_succeed(struct kvm_vcpu *vcpu) 156 { 157 vmx_set_rflags(vcpu, vmx_get_rflags(vcpu) 158 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | 159 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF)); 160 return kvm_skip_emulated_instruction(vcpu); 161 } 162 163 static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu) 164 { 165 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) 166 & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF | 167 X86_EFLAGS_SF | X86_EFLAGS_OF)) 168 | X86_EFLAGS_CF); 169 return kvm_skip_emulated_instruction(vcpu); 170 } 171 172 static int nested_vmx_failValid(struct kvm_vcpu *vcpu, 173 u32 vm_instruction_error) 174 { 175 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) 176 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | 177 X86_EFLAGS_SF | X86_EFLAGS_OF)) 178 | X86_EFLAGS_ZF); 179 get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error; 180 /* 181 * We don't need to force a shadow sync because 182 * VM_INSTRUCTION_ERROR is not shadowed 183 */ 184 return kvm_skip_emulated_instruction(vcpu); 185 } 186 187 static int nested_vmx_fail(struct kvm_vcpu *vcpu, u32 vm_instruction_error) 188 { 189 struct vcpu_vmx *vmx = to_vmx(vcpu); 190 191 /* 192 * failValid writes the error number to the current VMCS, which 193 * can't be done if there isn't a current VMCS. 194 */ 195 if (vmx->nested.current_vmptr == -1ull && !vmx->nested.hv_evmcs) 196 return nested_vmx_failInvalid(vcpu); 197 198 return nested_vmx_failValid(vcpu, vm_instruction_error); 199 } 200 201 static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator) 202 { 203 /* TODO: not to reset guest simply here. */ 204 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 205 pr_debug_ratelimited("kvm: nested vmx abort, indicator %d\n", indicator); 206 } 207 208 static inline bool vmx_control_verify(u32 control, u32 low, u32 high) 209 { 210 return fixed_bits_valid(control, low, high); 211 } 212 213 static inline u64 vmx_control_msr(u32 low, u32 high) 214 { 215 return low | ((u64)high << 32); 216 } 217 218 static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx) 219 { 220 secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_SHADOW_VMCS); 221 vmcs_write64(VMCS_LINK_POINTER, -1ull); 222 vmx->nested.need_vmcs12_to_shadow_sync = false; 223 } 224 225 static inline void nested_release_evmcs(struct kvm_vcpu *vcpu) 226 { 227 struct vcpu_vmx *vmx = to_vmx(vcpu); 228 229 if (!vmx->nested.hv_evmcs) 230 return; 231 232 kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true); 233 vmx->nested.hv_evmcs_vmptr = 0; 234 vmx->nested.hv_evmcs = NULL; 235 } 236 237 static void vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx, 238 struct loaded_vmcs *prev) 239 { 240 struct vmcs_host_state *dest, *src; 241 242 if (unlikely(!vmx->guest_state_loaded)) 243 return; 244 245 src = &prev->host_state; 246 dest = &vmx->loaded_vmcs->host_state; 247 248 vmx_set_host_fs_gs(dest, src->fs_sel, src->gs_sel, src->fs_base, src->gs_base); 249 dest->ldt_sel = src->ldt_sel; 250 #ifdef CONFIG_X86_64 251 dest->ds_sel = src->ds_sel; 252 dest->es_sel = src->es_sel; 253 #endif 254 } 255 256 static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs) 257 { 258 struct vcpu_vmx *vmx = to_vmx(vcpu); 259 struct loaded_vmcs *prev; 260 int cpu; 261 262 if (WARN_ON_ONCE(vmx->loaded_vmcs == vmcs)) 263 return; 264 265 cpu = get_cpu(); 266 prev = vmx->loaded_vmcs; 267 vmx->loaded_vmcs = vmcs; 268 vmx_vcpu_load_vmcs(vcpu, cpu, prev); 269 vmx_sync_vmcs_host_state(vmx, prev); 270 put_cpu(); 271 272 vmx_register_cache_reset(vcpu); 273 } 274 275 /* 276 * Free whatever needs to be freed from vmx->nested when L1 goes down, or 277 * just stops using VMX. 278 */ 279 static void free_nested(struct kvm_vcpu *vcpu) 280 { 281 struct vcpu_vmx *vmx = to_vmx(vcpu); 282 283 if (WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01)) 284 vmx_switch_vmcs(vcpu, &vmx->vmcs01); 285 286 if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon) 287 return; 288 289 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); 290 291 vmx->nested.vmxon = false; 292 vmx->nested.smm.vmxon = false; 293 free_vpid(vmx->nested.vpid02); 294 vmx->nested.posted_intr_nv = -1; 295 vmx->nested.current_vmptr = -1ull; 296 if (enable_shadow_vmcs) { 297 vmx_disable_shadow_vmcs(vmx); 298 vmcs_clear(vmx->vmcs01.shadow_vmcs); 299 free_vmcs(vmx->vmcs01.shadow_vmcs); 300 vmx->vmcs01.shadow_vmcs = NULL; 301 } 302 kfree(vmx->nested.cached_vmcs12); 303 vmx->nested.cached_vmcs12 = NULL; 304 kfree(vmx->nested.cached_shadow_vmcs12); 305 vmx->nested.cached_shadow_vmcs12 = NULL; 306 /* Unpin physical memory we referred to in the vmcs02 */ 307 if (vmx->nested.apic_access_page) { 308 kvm_release_page_clean(vmx->nested.apic_access_page); 309 vmx->nested.apic_access_page = NULL; 310 } 311 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true); 312 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true); 313 vmx->nested.pi_desc = NULL; 314 315 kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); 316 317 nested_release_evmcs(vcpu); 318 319 free_loaded_vmcs(&vmx->nested.vmcs02); 320 } 321 322 /* 323 * Ensure that the current vmcs of the logical processor is the 324 * vmcs01 of the vcpu before calling free_nested(). 325 */ 326 void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu) 327 { 328 vcpu_load(vcpu); 329 vmx_leave_nested(vcpu); 330 vcpu_put(vcpu); 331 } 332 333 static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu, 334 struct x86_exception *fault) 335 { 336 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 337 struct vcpu_vmx *vmx = to_vmx(vcpu); 338 u32 vm_exit_reason; 339 unsigned long exit_qualification = vcpu->arch.exit_qualification; 340 341 if (vmx->nested.pml_full) { 342 vm_exit_reason = EXIT_REASON_PML_FULL; 343 vmx->nested.pml_full = false; 344 exit_qualification &= INTR_INFO_UNBLOCK_NMI; 345 } else if (fault->error_code & PFERR_RSVD_MASK) 346 vm_exit_reason = EXIT_REASON_EPT_MISCONFIG; 347 else 348 vm_exit_reason = EXIT_REASON_EPT_VIOLATION; 349 350 nested_vmx_vmexit(vcpu, vm_exit_reason, 0, exit_qualification); 351 vmcs12->guest_physical_address = fault->address; 352 } 353 354 static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) 355 { 356 WARN_ON(mmu_is_nested(vcpu)); 357 358 vcpu->arch.mmu = &vcpu->arch.guest_mmu; 359 kvm_init_shadow_ept_mmu(vcpu, 360 to_vmx(vcpu)->nested.msrs.ept_caps & 361 VMX_EPT_EXECUTE_ONLY_BIT, 362 nested_ept_ad_enabled(vcpu), 363 nested_ept_get_eptp(vcpu)); 364 vcpu->arch.mmu->get_guest_pgd = nested_ept_get_eptp; 365 vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault; 366 vcpu->arch.mmu->get_pdptr = kvm_pdptr_read; 367 368 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; 369 } 370 371 static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu) 372 { 373 vcpu->arch.mmu = &vcpu->arch.root_mmu; 374 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; 375 } 376 377 static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12, 378 u16 error_code) 379 { 380 bool inequality, bit; 381 382 bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0; 383 inequality = 384 (error_code & vmcs12->page_fault_error_code_mask) != 385 vmcs12->page_fault_error_code_match; 386 return inequality ^ bit; 387 } 388 389 390 /* 391 * KVM wants to inject page-faults which it got to the guest. This function 392 * checks whether in a nested guest, we need to inject them to L1 or L2. 393 */ 394 static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit_qual) 395 { 396 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 397 unsigned int nr = vcpu->arch.exception.nr; 398 bool has_payload = vcpu->arch.exception.has_payload; 399 unsigned long payload = vcpu->arch.exception.payload; 400 401 if (nr == PF_VECTOR) { 402 if (vcpu->arch.exception.nested_apf) { 403 *exit_qual = vcpu->arch.apf.nested_apf_token; 404 return 1; 405 } 406 if (nested_vmx_is_page_fault_vmexit(vmcs12, 407 vcpu->arch.exception.error_code)) { 408 *exit_qual = has_payload ? payload : vcpu->arch.cr2; 409 return 1; 410 } 411 } else if (vmcs12->exception_bitmap & (1u << nr)) { 412 if (nr == DB_VECTOR) { 413 if (!has_payload) { 414 payload = vcpu->arch.dr6; 415 payload &= ~DR6_BT; 416 payload ^= DR6_ACTIVE_LOW; 417 } 418 *exit_qual = payload; 419 } else 420 *exit_qual = 0; 421 return 1; 422 } 423 424 return 0; 425 } 426 427 428 static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu, 429 struct x86_exception *fault) 430 { 431 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 432 433 WARN_ON(!is_guest_mode(vcpu)); 434 435 if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code) && 436 !to_vmx(vcpu)->nested.nested_run_pending) { 437 vmcs12->vm_exit_intr_error_code = fault->error_code; 438 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, 439 PF_VECTOR | INTR_TYPE_HARD_EXCEPTION | 440 INTR_INFO_DELIVER_CODE_MASK | INTR_INFO_VALID_MASK, 441 fault->address); 442 } else { 443 kvm_inject_page_fault(vcpu, fault); 444 } 445 } 446 447 static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu, 448 struct vmcs12 *vmcs12) 449 { 450 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) 451 return 0; 452 453 if (CC(!page_address_valid(vcpu, vmcs12->io_bitmap_a)) || 454 CC(!page_address_valid(vcpu, vmcs12->io_bitmap_b))) 455 return -EINVAL; 456 457 return 0; 458 } 459 460 static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu, 461 struct vmcs12 *vmcs12) 462 { 463 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) 464 return 0; 465 466 if (CC(!page_address_valid(vcpu, vmcs12->msr_bitmap))) 467 return -EINVAL; 468 469 return 0; 470 } 471 472 static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu, 473 struct vmcs12 *vmcs12) 474 { 475 if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) 476 return 0; 477 478 if (CC(!page_address_valid(vcpu, vmcs12->virtual_apic_page_addr))) 479 return -EINVAL; 480 481 return 0; 482 } 483 484 /* 485 * Check if MSR is intercepted for L01 MSR bitmap. 486 */ 487 static bool msr_write_intercepted_l01(struct kvm_vcpu *vcpu, u32 msr) 488 { 489 unsigned long *msr_bitmap; 490 int f = sizeof(unsigned long); 491 492 if (!cpu_has_vmx_msr_bitmap()) 493 return true; 494 495 msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap; 496 497 if (msr <= 0x1fff) { 498 return !!test_bit(msr, msr_bitmap + 0x800 / f); 499 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { 500 msr &= 0x1fff; 501 return !!test_bit(msr, msr_bitmap + 0xc00 / f); 502 } 503 504 return true; 505 } 506 507 /* 508 * If a msr is allowed by L0, we should check whether it is allowed by L1. 509 * The corresponding bit will be cleared unless both of L0 and L1 allow it. 510 */ 511 static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1, 512 unsigned long *msr_bitmap_nested, 513 u32 msr, int type) 514 { 515 int f = sizeof(unsigned long); 516 517 /* 518 * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals 519 * have the write-low and read-high bitmap offsets the wrong way round. 520 * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. 521 */ 522 if (msr <= 0x1fff) { 523 if (type & MSR_TYPE_R && 524 !test_bit(msr, msr_bitmap_l1 + 0x000 / f)) 525 /* read-low */ 526 __clear_bit(msr, msr_bitmap_nested + 0x000 / f); 527 528 if (type & MSR_TYPE_W && 529 !test_bit(msr, msr_bitmap_l1 + 0x800 / f)) 530 /* write-low */ 531 __clear_bit(msr, msr_bitmap_nested + 0x800 / f); 532 533 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { 534 msr &= 0x1fff; 535 if (type & MSR_TYPE_R && 536 !test_bit(msr, msr_bitmap_l1 + 0x400 / f)) 537 /* read-high */ 538 __clear_bit(msr, msr_bitmap_nested + 0x400 / f); 539 540 if (type & MSR_TYPE_W && 541 !test_bit(msr, msr_bitmap_l1 + 0xc00 / f)) 542 /* write-high */ 543 __clear_bit(msr, msr_bitmap_nested + 0xc00 / f); 544 545 } 546 } 547 548 static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap) 549 { 550 int msr; 551 552 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { 553 unsigned word = msr / BITS_PER_LONG; 554 555 msr_bitmap[word] = ~0; 556 msr_bitmap[word + (0x800 / sizeof(long))] = ~0; 557 } 558 } 559 560 /* 561 * Merge L0's and L1's MSR bitmap, return false to indicate that 562 * we do not use the hardware. 563 */ 564 static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu, 565 struct vmcs12 *vmcs12) 566 { 567 int msr; 568 unsigned long *msr_bitmap_l1; 569 unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap; 570 struct kvm_host_map *map = &to_vmx(vcpu)->nested.msr_bitmap_map; 571 572 /* Nothing to do if the MSR bitmap is not in use. */ 573 if (!cpu_has_vmx_msr_bitmap() || 574 !nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) 575 return false; 576 577 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->msr_bitmap), map)) 578 return false; 579 580 msr_bitmap_l1 = (unsigned long *)map->hva; 581 582 /* 583 * To keep the control flow simple, pay eight 8-byte writes (sixteen 584 * 4-byte writes on 32-bit systems) up front to enable intercepts for 585 * the x2APIC MSR range and selectively disable them below. 586 */ 587 enable_x2apic_msr_intercepts(msr_bitmap_l0); 588 589 if (nested_cpu_has_virt_x2apic_mode(vmcs12)) { 590 if (nested_cpu_has_apic_reg_virt(vmcs12)) { 591 /* 592 * L0 need not intercept reads for MSRs between 0x800 593 * and 0x8ff, it just lets the processor take the value 594 * from the virtual-APIC page; take those 256 bits 595 * directly from the L1 bitmap. 596 */ 597 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { 598 unsigned word = msr / BITS_PER_LONG; 599 600 msr_bitmap_l0[word] = msr_bitmap_l1[word]; 601 } 602 } 603 604 nested_vmx_disable_intercept_for_msr( 605 msr_bitmap_l1, msr_bitmap_l0, 606 X2APIC_MSR(APIC_TASKPRI), 607 MSR_TYPE_R | MSR_TYPE_W); 608 609 if (nested_cpu_has_vid(vmcs12)) { 610 nested_vmx_disable_intercept_for_msr( 611 msr_bitmap_l1, msr_bitmap_l0, 612 X2APIC_MSR(APIC_EOI), 613 MSR_TYPE_W); 614 nested_vmx_disable_intercept_for_msr( 615 msr_bitmap_l1, msr_bitmap_l0, 616 X2APIC_MSR(APIC_SELF_IPI), 617 MSR_TYPE_W); 618 } 619 } 620 621 /* KVM unconditionally exposes the FS/GS base MSRs to L1. */ 622 nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0, 623 MSR_FS_BASE, MSR_TYPE_RW); 624 625 nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0, 626 MSR_GS_BASE, MSR_TYPE_RW); 627 628 nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0, 629 MSR_KERNEL_GS_BASE, MSR_TYPE_RW); 630 631 /* 632 * Checking the L0->L1 bitmap is trying to verify two things: 633 * 634 * 1. L0 gave a permission to L1 to actually passthrough the MSR. This 635 * ensures that we do not accidentally generate an L02 MSR bitmap 636 * from the L12 MSR bitmap that is too permissive. 637 * 2. That L1 or L2s have actually used the MSR. This avoids 638 * unnecessarily merging of the bitmap if the MSR is unused. This 639 * works properly because we only update the L01 MSR bitmap lazily. 640 * So even if L0 should pass L1 these MSRs, the L01 bitmap is only 641 * updated to reflect this when L1 (or its L2s) actually write to 642 * the MSR. 643 */ 644 if (!msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL)) 645 nested_vmx_disable_intercept_for_msr( 646 msr_bitmap_l1, msr_bitmap_l0, 647 MSR_IA32_SPEC_CTRL, 648 MSR_TYPE_R | MSR_TYPE_W); 649 650 if (!msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD)) 651 nested_vmx_disable_intercept_for_msr( 652 msr_bitmap_l1, msr_bitmap_l0, 653 MSR_IA32_PRED_CMD, 654 MSR_TYPE_W); 655 656 kvm_vcpu_unmap(vcpu, &to_vmx(vcpu)->nested.msr_bitmap_map, false); 657 658 return true; 659 } 660 661 static void nested_cache_shadow_vmcs12(struct kvm_vcpu *vcpu, 662 struct vmcs12 *vmcs12) 663 { 664 struct kvm_host_map map; 665 struct vmcs12 *shadow; 666 667 if (!nested_cpu_has_shadow_vmcs(vmcs12) || 668 vmcs12->vmcs_link_pointer == -1ull) 669 return; 670 671 shadow = get_shadow_vmcs12(vcpu); 672 673 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map)) 674 return; 675 676 memcpy(shadow, map.hva, VMCS12_SIZE); 677 kvm_vcpu_unmap(vcpu, &map, false); 678 } 679 680 static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu *vcpu, 681 struct vmcs12 *vmcs12) 682 { 683 struct vcpu_vmx *vmx = to_vmx(vcpu); 684 685 if (!nested_cpu_has_shadow_vmcs(vmcs12) || 686 vmcs12->vmcs_link_pointer == -1ull) 687 return; 688 689 kvm_write_guest(vmx->vcpu.kvm, vmcs12->vmcs_link_pointer, 690 get_shadow_vmcs12(vcpu), VMCS12_SIZE); 691 } 692 693 /* 694 * In nested virtualization, check if L1 has set 695 * VM_EXIT_ACK_INTR_ON_EXIT 696 */ 697 static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu) 698 { 699 return get_vmcs12(vcpu)->vm_exit_controls & 700 VM_EXIT_ACK_INTR_ON_EXIT; 701 } 702 703 static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu, 704 struct vmcs12 *vmcs12) 705 { 706 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) && 707 CC(!page_address_valid(vcpu, vmcs12->apic_access_addr))) 708 return -EINVAL; 709 else 710 return 0; 711 } 712 713 static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, 714 struct vmcs12 *vmcs12) 715 { 716 if (!nested_cpu_has_virt_x2apic_mode(vmcs12) && 717 !nested_cpu_has_apic_reg_virt(vmcs12) && 718 !nested_cpu_has_vid(vmcs12) && 719 !nested_cpu_has_posted_intr(vmcs12)) 720 return 0; 721 722 /* 723 * If virtualize x2apic mode is enabled, 724 * virtualize apic access must be disabled. 725 */ 726 if (CC(nested_cpu_has_virt_x2apic_mode(vmcs12) && 727 nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))) 728 return -EINVAL; 729 730 /* 731 * If virtual interrupt delivery is enabled, 732 * we must exit on external interrupts. 733 */ 734 if (CC(nested_cpu_has_vid(vmcs12) && !nested_exit_on_intr(vcpu))) 735 return -EINVAL; 736 737 /* 738 * bits 15:8 should be zero in posted_intr_nv, 739 * the descriptor address has been already checked 740 * in nested_get_vmcs12_pages. 741 * 742 * bits 5:0 of posted_intr_desc_addr should be zero. 743 */ 744 if (nested_cpu_has_posted_intr(vmcs12) && 745 (CC(!nested_cpu_has_vid(vmcs12)) || 746 CC(!nested_exit_intr_ack_set(vcpu)) || 747 CC((vmcs12->posted_intr_nv & 0xff00)) || 748 CC(!kvm_vcpu_is_legal_aligned_gpa(vcpu, vmcs12->posted_intr_desc_addr, 64)))) 749 return -EINVAL; 750 751 /* tpr shadow is needed by all apicv features. */ 752 if (CC(!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))) 753 return -EINVAL; 754 755 return 0; 756 } 757 758 static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu, 759 u32 count, u64 addr) 760 { 761 if (count == 0) 762 return 0; 763 764 if (!kvm_vcpu_is_legal_aligned_gpa(vcpu, addr, 16) || 765 !kvm_vcpu_is_legal_gpa(vcpu, (addr + count * sizeof(struct vmx_msr_entry) - 1))) 766 return -EINVAL; 767 768 return 0; 769 } 770 771 static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu *vcpu, 772 struct vmcs12 *vmcs12) 773 { 774 if (CC(nested_vmx_check_msr_switch(vcpu, 775 vmcs12->vm_exit_msr_load_count, 776 vmcs12->vm_exit_msr_load_addr)) || 777 CC(nested_vmx_check_msr_switch(vcpu, 778 vmcs12->vm_exit_msr_store_count, 779 vmcs12->vm_exit_msr_store_addr))) 780 return -EINVAL; 781 782 return 0; 783 } 784 785 static int nested_vmx_check_entry_msr_switch_controls(struct kvm_vcpu *vcpu, 786 struct vmcs12 *vmcs12) 787 { 788 if (CC(nested_vmx_check_msr_switch(vcpu, 789 vmcs12->vm_entry_msr_load_count, 790 vmcs12->vm_entry_msr_load_addr))) 791 return -EINVAL; 792 793 return 0; 794 } 795 796 static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu, 797 struct vmcs12 *vmcs12) 798 { 799 if (!nested_cpu_has_pml(vmcs12)) 800 return 0; 801 802 if (CC(!nested_cpu_has_ept(vmcs12)) || 803 CC(!page_address_valid(vcpu, vmcs12->pml_address))) 804 return -EINVAL; 805 806 return 0; 807 } 808 809 static int nested_vmx_check_unrestricted_guest_controls(struct kvm_vcpu *vcpu, 810 struct vmcs12 *vmcs12) 811 { 812 if (CC(nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST) && 813 !nested_cpu_has_ept(vmcs12))) 814 return -EINVAL; 815 return 0; 816 } 817 818 static int nested_vmx_check_mode_based_ept_exec_controls(struct kvm_vcpu *vcpu, 819 struct vmcs12 *vmcs12) 820 { 821 if (CC(nested_cpu_has2(vmcs12, SECONDARY_EXEC_MODE_BASED_EPT_EXEC) && 822 !nested_cpu_has_ept(vmcs12))) 823 return -EINVAL; 824 return 0; 825 } 826 827 static int nested_vmx_check_shadow_vmcs_controls(struct kvm_vcpu *vcpu, 828 struct vmcs12 *vmcs12) 829 { 830 if (!nested_cpu_has_shadow_vmcs(vmcs12)) 831 return 0; 832 833 if (CC(!page_address_valid(vcpu, vmcs12->vmread_bitmap)) || 834 CC(!page_address_valid(vcpu, vmcs12->vmwrite_bitmap))) 835 return -EINVAL; 836 837 return 0; 838 } 839 840 static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu, 841 struct vmx_msr_entry *e) 842 { 843 /* x2APIC MSR accesses are not allowed */ 844 if (CC(vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8)) 845 return -EINVAL; 846 if (CC(e->index == MSR_IA32_UCODE_WRITE) || /* SDM Table 35-2 */ 847 CC(e->index == MSR_IA32_UCODE_REV)) 848 return -EINVAL; 849 if (CC(e->reserved != 0)) 850 return -EINVAL; 851 return 0; 852 } 853 854 static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu, 855 struct vmx_msr_entry *e) 856 { 857 if (CC(e->index == MSR_FS_BASE) || 858 CC(e->index == MSR_GS_BASE) || 859 CC(e->index == MSR_IA32_SMM_MONITOR_CTL) || /* SMM is not supported */ 860 nested_vmx_msr_check_common(vcpu, e)) 861 return -EINVAL; 862 return 0; 863 } 864 865 static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu, 866 struct vmx_msr_entry *e) 867 { 868 if (CC(e->index == MSR_IA32_SMBASE) || /* SMM is not supported */ 869 nested_vmx_msr_check_common(vcpu, e)) 870 return -EINVAL; 871 return 0; 872 } 873 874 static u32 nested_vmx_max_atomic_switch_msrs(struct kvm_vcpu *vcpu) 875 { 876 struct vcpu_vmx *vmx = to_vmx(vcpu); 877 u64 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low, 878 vmx->nested.msrs.misc_high); 879 880 return (vmx_misc_max_msr(vmx_misc) + 1) * VMX_MISC_MSR_LIST_MULTIPLIER; 881 } 882 883 /* 884 * Load guest's/host's msr at nested entry/exit. 885 * return 0 for success, entry index for failure. 886 * 887 * One of the failure modes for MSR load/store is when a list exceeds the 888 * virtual hardware's capacity. To maintain compatibility with hardware inasmuch 889 * as possible, process all valid entries before failing rather than precheck 890 * for a capacity violation. 891 */ 892 static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) 893 { 894 u32 i; 895 struct vmx_msr_entry e; 896 u32 max_msr_list_size = nested_vmx_max_atomic_switch_msrs(vcpu); 897 898 for (i = 0; i < count; i++) { 899 if (unlikely(i >= max_msr_list_size)) 900 goto fail; 901 902 if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e), 903 &e, sizeof(e))) { 904 pr_debug_ratelimited( 905 "%s cannot read MSR entry (%u, 0x%08llx)\n", 906 __func__, i, gpa + i * sizeof(e)); 907 goto fail; 908 } 909 if (nested_vmx_load_msr_check(vcpu, &e)) { 910 pr_debug_ratelimited( 911 "%s check failed (%u, 0x%x, 0x%x)\n", 912 __func__, i, e.index, e.reserved); 913 goto fail; 914 } 915 if (kvm_set_msr(vcpu, e.index, e.value)) { 916 pr_debug_ratelimited( 917 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n", 918 __func__, i, e.index, e.value); 919 goto fail; 920 } 921 } 922 return 0; 923 fail: 924 /* Note, max_msr_list_size is at most 4096, i.e. this can't wrap. */ 925 return i + 1; 926 } 927 928 static bool nested_vmx_get_vmexit_msr_value(struct kvm_vcpu *vcpu, 929 u32 msr_index, 930 u64 *data) 931 { 932 struct vcpu_vmx *vmx = to_vmx(vcpu); 933 934 /* 935 * If the L0 hypervisor stored a more accurate value for the TSC that 936 * does not include the time taken for emulation of the L2->L1 937 * VM-exit in L0, use the more accurate value. 938 */ 939 if (msr_index == MSR_IA32_TSC) { 940 int i = vmx_find_loadstore_msr_slot(&vmx->msr_autostore.guest, 941 MSR_IA32_TSC); 942 943 if (i >= 0) { 944 u64 val = vmx->msr_autostore.guest.val[i].value; 945 946 *data = kvm_read_l1_tsc(vcpu, val); 947 return true; 948 } 949 } 950 951 if (kvm_get_msr(vcpu, msr_index, data)) { 952 pr_debug_ratelimited("%s cannot read MSR (0x%x)\n", __func__, 953 msr_index); 954 return false; 955 } 956 return true; 957 } 958 959 static bool read_and_check_msr_entry(struct kvm_vcpu *vcpu, u64 gpa, int i, 960 struct vmx_msr_entry *e) 961 { 962 if (kvm_vcpu_read_guest(vcpu, 963 gpa + i * sizeof(*e), 964 e, 2 * sizeof(u32))) { 965 pr_debug_ratelimited( 966 "%s cannot read MSR entry (%u, 0x%08llx)\n", 967 __func__, i, gpa + i * sizeof(*e)); 968 return false; 969 } 970 if (nested_vmx_store_msr_check(vcpu, e)) { 971 pr_debug_ratelimited( 972 "%s check failed (%u, 0x%x, 0x%x)\n", 973 __func__, i, e->index, e->reserved); 974 return false; 975 } 976 return true; 977 } 978 979 static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) 980 { 981 u64 data; 982 u32 i; 983 struct vmx_msr_entry e; 984 u32 max_msr_list_size = nested_vmx_max_atomic_switch_msrs(vcpu); 985 986 for (i = 0; i < count; i++) { 987 if (unlikely(i >= max_msr_list_size)) 988 return -EINVAL; 989 990 if (!read_and_check_msr_entry(vcpu, gpa, i, &e)) 991 return -EINVAL; 992 993 if (!nested_vmx_get_vmexit_msr_value(vcpu, e.index, &data)) 994 return -EINVAL; 995 996 if (kvm_vcpu_write_guest(vcpu, 997 gpa + i * sizeof(e) + 998 offsetof(struct vmx_msr_entry, value), 999 &data, sizeof(data))) { 1000 pr_debug_ratelimited( 1001 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n", 1002 __func__, i, e.index, data); 1003 return -EINVAL; 1004 } 1005 } 1006 return 0; 1007 } 1008 1009 static bool nested_msr_store_list_has_msr(struct kvm_vcpu *vcpu, u32 msr_index) 1010 { 1011 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 1012 u32 count = vmcs12->vm_exit_msr_store_count; 1013 u64 gpa = vmcs12->vm_exit_msr_store_addr; 1014 struct vmx_msr_entry e; 1015 u32 i; 1016 1017 for (i = 0; i < count; i++) { 1018 if (!read_and_check_msr_entry(vcpu, gpa, i, &e)) 1019 return false; 1020 1021 if (e.index == msr_index) 1022 return true; 1023 } 1024 return false; 1025 } 1026 1027 static void prepare_vmx_msr_autostore_list(struct kvm_vcpu *vcpu, 1028 u32 msr_index) 1029 { 1030 struct vcpu_vmx *vmx = to_vmx(vcpu); 1031 struct vmx_msrs *autostore = &vmx->msr_autostore.guest; 1032 bool in_vmcs12_store_list; 1033 int msr_autostore_slot; 1034 bool in_autostore_list; 1035 int last; 1036 1037 msr_autostore_slot = vmx_find_loadstore_msr_slot(autostore, msr_index); 1038 in_autostore_list = msr_autostore_slot >= 0; 1039 in_vmcs12_store_list = nested_msr_store_list_has_msr(vcpu, msr_index); 1040 1041 if (in_vmcs12_store_list && !in_autostore_list) { 1042 if (autostore->nr == MAX_NR_LOADSTORE_MSRS) { 1043 /* 1044 * Emulated VMEntry does not fail here. Instead a less 1045 * accurate value will be returned by 1046 * nested_vmx_get_vmexit_msr_value() using kvm_get_msr() 1047 * instead of reading the value from the vmcs02 VMExit 1048 * MSR-store area. 1049 */ 1050 pr_warn_ratelimited( 1051 "Not enough msr entries in msr_autostore. Can't add msr %x\n", 1052 msr_index); 1053 return; 1054 } 1055 last = autostore->nr++; 1056 autostore->val[last].index = msr_index; 1057 } else if (!in_vmcs12_store_list && in_autostore_list) { 1058 last = --autostore->nr; 1059 autostore->val[msr_autostore_slot] = autostore->val[last]; 1060 } 1061 } 1062 1063 /* 1064 * Returns true if the MMU needs to be sync'd on nested VM-Enter/VM-Exit. 1065 * tl;dr: the MMU needs a sync if L0 is using shadow paging and L1 didn't 1066 * enable VPID for L2 (implying it expects a TLB flush on VMX transitions). 1067 * Here's why. 1068 * 1069 * If EPT is enabled by L0 a sync is never needed: 1070 * - if it is disabled by L1, then L0 is not shadowing L1 or L2 PTEs, there 1071 * cannot be unsync'd SPTEs for either L1 or L2. 1072 * 1073 * - if it is also enabled by L1, then L0 doesn't need to sync on VM-Enter 1074 * VM-Enter as VM-Enter isn't required to invalidate guest-physical mappings 1075 * (irrespective of VPID), i.e. L1 can't rely on the (virtual) CPU to flush 1076 * stale guest-physical mappings for L2 from the TLB. And as above, L0 isn't 1077 * shadowing L1 PTEs so there are no unsync'd SPTEs to sync on VM-Exit. 1078 * 1079 * If EPT is disabled by L0: 1080 * - if VPID is enabled by L1 (for L2), the situation is similar to when L1 1081 * enables EPT: L0 doesn't need to sync as VM-Enter and VM-Exit aren't 1082 * required to invalidate linear mappings (EPT is disabled so there are 1083 * no combined or guest-physical mappings), i.e. L1 can't rely on the 1084 * (virtual) CPU to flush stale linear mappings for either L2 or itself (L1). 1085 * 1086 * - however if VPID is disabled by L1, then a sync is needed as L1 expects all 1087 * linear mappings (EPT is disabled so there are no combined or guest-physical 1088 * mappings) to be invalidated on both VM-Enter and VM-Exit. 1089 * 1090 * Note, this logic is subtly different than nested_has_guest_tlb_tag(), which 1091 * additionally checks that L2 has been assigned a VPID (when EPT is disabled). 1092 * Whether or not L2 has been assigned a VPID by L0 is irrelevant with respect 1093 * to L1's expectations, e.g. L0 needs to invalidate hardware TLB entries if L2 1094 * doesn't have a unique VPID to prevent reusing L1's entries (assuming L1 has 1095 * been assigned a VPID), but L0 doesn't need to do a MMU sync because L1 1096 * doesn't expect stale (virtual) TLB entries to be flushed, i.e. L1 doesn't 1097 * know that L0 will flush the TLB and so L1 will do INVVPID as needed to flush 1098 * stale TLB entries, at which point L0 will sync L2's MMU. 1099 */ 1100 static bool nested_vmx_transition_mmu_sync(struct kvm_vcpu *vcpu) 1101 { 1102 return !enable_ept && !nested_cpu_has_vpid(get_vmcs12(vcpu)); 1103 } 1104 1105 /* 1106 * Load guest's/host's cr3 at nested entry/exit. @nested_ept is true if we are 1107 * emulating VM-Entry into a guest with EPT enabled. On failure, the expected 1108 * Exit Qualification (for a VM-Entry consistency check VM-Exit) is assigned to 1109 * @entry_failure_code. 1110 */ 1111 static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept, 1112 enum vm_entry_failure_code *entry_failure_code) 1113 { 1114 if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3))) { 1115 *entry_failure_code = ENTRY_FAIL_DEFAULT; 1116 return -EINVAL; 1117 } 1118 1119 /* 1120 * If PAE paging and EPT are both on, CR3 is not used by the CPU and 1121 * must not be dereferenced. 1122 */ 1123 if (!nested_ept && is_pae_paging(vcpu) && 1124 (cr3 != kvm_read_cr3(vcpu) || pdptrs_changed(vcpu))) { 1125 if (CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))) { 1126 *entry_failure_code = ENTRY_FAIL_PDPTE; 1127 return -EINVAL; 1128 } 1129 } 1130 1131 /* 1132 * Unconditionally skip the TLB flush on fast CR3 switch, all TLB 1133 * flushes are handled by nested_vmx_transition_tlb_flush(). See 1134 * nested_vmx_transition_mmu_sync for details on skipping the MMU sync. 1135 */ 1136 if (!nested_ept) 1137 kvm_mmu_new_pgd(vcpu, cr3, true, 1138 !nested_vmx_transition_mmu_sync(vcpu)); 1139 1140 vcpu->arch.cr3 = cr3; 1141 kvm_register_mark_available(vcpu, VCPU_EXREG_CR3); 1142 1143 kvm_init_mmu(vcpu, false); 1144 1145 return 0; 1146 } 1147 1148 /* 1149 * Returns if KVM is able to config CPU to tag TLB entries 1150 * populated by L2 differently than TLB entries populated 1151 * by L1. 1152 * 1153 * If L0 uses EPT, L1 and L2 run with different EPTP because 1154 * guest_mode is part of kvm_mmu_page_role. Thus, TLB entries 1155 * are tagged with different EPTP. 1156 * 1157 * If L1 uses VPID and we allocated a vpid02, TLB entries are tagged 1158 * with different VPID (L1 entries are tagged with vmx->vpid 1159 * while L2 entries are tagged with vmx->nested.vpid02). 1160 */ 1161 static bool nested_has_guest_tlb_tag(struct kvm_vcpu *vcpu) 1162 { 1163 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 1164 1165 return enable_ept || 1166 (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02); 1167 } 1168 1169 static void nested_vmx_transition_tlb_flush(struct kvm_vcpu *vcpu, 1170 struct vmcs12 *vmcs12, 1171 bool is_vmenter) 1172 { 1173 struct vcpu_vmx *vmx = to_vmx(vcpu); 1174 1175 /* 1176 * If VPID is disabled, linear and combined mappings are flushed on 1177 * VM-Enter/VM-Exit, and guest-physical mappings are valid only for 1178 * their associated EPTP. 1179 */ 1180 if (!enable_vpid) 1181 return; 1182 1183 /* 1184 * If vmcs12 doesn't use VPID, L1 expects linear and combined mappings 1185 * for *all* contexts to be flushed on VM-Enter/VM-Exit. 1186 * 1187 * If VPID is enabled and used by vmc12, but L2 does not have a unique 1188 * TLB tag (ASID), i.e. EPT is disabled and KVM was unable to allocate 1189 * a VPID for L2, flush the current context as the effective ASID is 1190 * common to both L1 and L2. 1191 * 1192 * Defer the flush so that it runs after vmcs02.EPTP has been set by 1193 * KVM_REQ_LOAD_MMU_PGD (if nested EPT is enabled) and to avoid 1194 * redundant flushes further down the nested pipeline. 1195 * 1196 * If a TLB flush isn't required due to any of the above, and vpid12 is 1197 * changing then the new "virtual" VPID (vpid12) will reuse the same 1198 * "real" VPID (vpid02), and so needs to be sync'd. There is no direct 1199 * mapping between vpid02 and vpid12, vpid02 is per-vCPU and reused for 1200 * all nested vCPUs. 1201 */ 1202 if (!nested_cpu_has_vpid(vmcs12)) { 1203 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 1204 } else if (!nested_has_guest_tlb_tag(vcpu)) { 1205 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); 1206 } else if (is_vmenter && 1207 vmcs12->virtual_processor_id != vmx->nested.last_vpid) { 1208 vmx->nested.last_vpid = vmcs12->virtual_processor_id; 1209 vpid_sync_context(nested_get_vpid02(vcpu)); 1210 } 1211 } 1212 1213 static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask) 1214 { 1215 superset &= mask; 1216 subset &= mask; 1217 1218 return (superset | subset) == superset; 1219 } 1220 1221 static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data) 1222 { 1223 const u64 feature_and_reserved = 1224 /* feature (except bit 48; see below) */ 1225 BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) | 1226 /* reserved */ 1227 BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56); 1228 u64 vmx_basic = vmx->nested.msrs.basic; 1229 1230 if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved)) 1231 return -EINVAL; 1232 1233 /* 1234 * KVM does not emulate a version of VMX that constrains physical 1235 * addresses of VMX structures (e.g. VMCS) to 32-bits. 1236 */ 1237 if (data & BIT_ULL(48)) 1238 return -EINVAL; 1239 1240 if (vmx_basic_vmcs_revision_id(vmx_basic) != 1241 vmx_basic_vmcs_revision_id(data)) 1242 return -EINVAL; 1243 1244 if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data)) 1245 return -EINVAL; 1246 1247 vmx->nested.msrs.basic = data; 1248 return 0; 1249 } 1250 1251 static int 1252 vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) 1253 { 1254 u64 supported; 1255 u32 *lowp, *highp; 1256 1257 switch (msr_index) { 1258 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: 1259 lowp = &vmx->nested.msrs.pinbased_ctls_low; 1260 highp = &vmx->nested.msrs.pinbased_ctls_high; 1261 break; 1262 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: 1263 lowp = &vmx->nested.msrs.procbased_ctls_low; 1264 highp = &vmx->nested.msrs.procbased_ctls_high; 1265 break; 1266 case MSR_IA32_VMX_TRUE_EXIT_CTLS: 1267 lowp = &vmx->nested.msrs.exit_ctls_low; 1268 highp = &vmx->nested.msrs.exit_ctls_high; 1269 break; 1270 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: 1271 lowp = &vmx->nested.msrs.entry_ctls_low; 1272 highp = &vmx->nested.msrs.entry_ctls_high; 1273 break; 1274 case MSR_IA32_VMX_PROCBASED_CTLS2: 1275 lowp = &vmx->nested.msrs.secondary_ctls_low; 1276 highp = &vmx->nested.msrs.secondary_ctls_high; 1277 break; 1278 default: 1279 BUG(); 1280 } 1281 1282 supported = vmx_control_msr(*lowp, *highp); 1283 1284 /* Check must-be-1 bits are still 1. */ 1285 if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0))) 1286 return -EINVAL; 1287 1288 /* Check must-be-0 bits are still 0. */ 1289 if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32))) 1290 return -EINVAL; 1291 1292 *lowp = data; 1293 *highp = data >> 32; 1294 return 0; 1295 } 1296 1297 static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data) 1298 { 1299 const u64 feature_and_reserved_bits = 1300 /* feature */ 1301 BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) | 1302 BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) | 1303 /* reserved */ 1304 GENMASK_ULL(13, 9) | BIT_ULL(31); 1305 u64 vmx_misc; 1306 1307 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low, 1308 vmx->nested.msrs.misc_high); 1309 1310 if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits)) 1311 return -EINVAL; 1312 1313 if ((vmx->nested.msrs.pinbased_ctls_high & 1314 PIN_BASED_VMX_PREEMPTION_TIMER) && 1315 vmx_misc_preemption_timer_rate(data) != 1316 vmx_misc_preemption_timer_rate(vmx_misc)) 1317 return -EINVAL; 1318 1319 if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc)) 1320 return -EINVAL; 1321 1322 if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc)) 1323 return -EINVAL; 1324 1325 if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc)) 1326 return -EINVAL; 1327 1328 vmx->nested.msrs.misc_low = data; 1329 vmx->nested.msrs.misc_high = data >> 32; 1330 1331 return 0; 1332 } 1333 1334 static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data) 1335 { 1336 u64 vmx_ept_vpid_cap; 1337 1338 vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.msrs.ept_caps, 1339 vmx->nested.msrs.vpid_caps); 1340 1341 /* Every bit is either reserved or a feature bit. */ 1342 if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL)) 1343 return -EINVAL; 1344 1345 vmx->nested.msrs.ept_caps = data; 1346 vmx->nested.msrs.vpid_caps = data >> 32; 1347 return 0; 1348 } 1349 1350 static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) 1351 { 1352 u64 *msr; 1353 1354 switch (msr_index) { 1355 case MSR_IA32_VMX_CR0_FIXED0: 1356 msr = &vmx->nested.msrs.cr0_fixed0; 1357 break; 1358 case MSR_IA32_VMX_CR4_FIXED0: 1359 msr = &vmx->nested.msrs.cr4_fixed0; 1360 break; 1361 default: 1362 BUG(); 1363 } 1364 1365 /* 1366 * 1 bits (which indicates bits which "must-be-1" during VMX operation) 1367 * must be 1 in the restored value. 1368 */ 1369 if (!is_bitwise_subset(data, *msr, -1ULL)) 1370 return -EINVAL; 1371 1372 *msr = data; 1373 return 0; 1374 } 1375 1376 /* 1377 * Called when userspace is restoring VMX MSRs. 1378 * 1379 * Returns 0 on success, non-0 otherwise. 1380 */ 1381 int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) 1382 { 1383 struct vcpu_vmx *vmx = to_vmx(vcpu); 1384 1385 /* 1386 * Don't allow changes to the VMX capability MSRs while the vCPU 1387 * is in VMX operation. 1388 */ 1389 if (vmx->nested.vmxon) 1390 return -EBUSY; 1391 1392 switch (msr_index) { 1393 case MSR_IA32_VMX_BASIC: 1394 return vmx_restore_vmx_basic(vmx, data); 1395 case MSR_IA32_VMX_PINBASED_CTLS: 1396 case MSR_IA32_VMX_PROCBASED_CTLS: 1397 case MSR_IA32_VMX_EXIT_CTLS: 1398 case MSR_IA32_VMX_ENTRY_CTLS: 1399 /* 1400 * The "non-true" VMX capability MSRs are generated from the 1401 * "true" MSRs, so we do not support restoring them directly. 1402 * 1403 * If userspace wants to emulate VMX_BASIC[55]=0, userspace 1404 * should restore the "true" MSRs with the must-be-1 bits 1405 * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND 1406 * DEFAULT SETTINGS". 1407 */ 1408 return -EINVAL; 1409 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: 1410 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: 1411 case MSR_IA32_VMX_TRUE_EXIT_CTLS: 1412 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: 1413 case MSR_IA32_VMX_PROCBASED_CTLS2: 1414 return vmx_restore_control_msr(vmx, msr_index, data); 1415 case MSR_IA32_VMX_MISC: 1416 return vmx_restore_vmx_misc(vmx, data); 1417 case MSR_IA32_VMX_CR0_FIXED0: 1418 case MSR_IA32_VMX_CR4_FIXED0: 1419 return vmx_restore_fixed0_msr(vmx, msr_index, data); 1420 case MSR_IA32_VMX_CR0_FIXED1: 1421 case MSR_IA32_VMX_CR4_FIXED1: 1422 /* 1423 * These MSRs are generated based on the vCPU's CPUID, so we 1424 * do not support restoring them directly. 1425 */ 1426 return -EINVAL; 1427 case MSR_IA32_VMX_EPT_VPID_CAP: 1428 return vmx_restore_vmx_ept_vpid_cap(vmx, data); 1429 case MSR_IA32_VMX_VMCS_ENUM: 1430 vmx->nested.msrs.vmcs_enum = data; 1431 return 0; 1432 case MSR_IA32_VMX_VMFUNC: 1433 if (data & ~vmx->nested.msrs.vmfunc_controls) 1434 return -EINVAL; 1435 vmx->nested.msrs.vmfunc_controls = data; 1436 return 0; 1437 default: 1438 /* 1439 * The rest of the VMX capability MSRs do not support restore. 1440 */ 1441 return -EINVAL; 1442 } 1443 } 1444 1445 /* Returns 0 on success, non-0 otherwise. */ 1446 int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata) 1447 { 1448 switch (msr_index) { 1449 case MSR_IA32_VMX_BASIC: 1450 *pdata = msrs->basic; 1451 break; 1452 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: 1453 case MSR_IA32_VMX_PINBASED_CTLS: 1454 *pdata = vmx_control_msr( 1455 msrs->pinbased_ctls_low, 1456 msrs->pinbased_ctls_high); 1457 if (msr_index == MSR_IA32_VMX_PINBASED_CTLS) 1458 *pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR; 1459 break; 1460 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: 1461 case MSR_IA32_VMX_PROCBASED_CTLS: 1462 *pdata = vmx_control_msr( 1463 msrs->procbased_ctls_low, 1464 msrs->procbased_ctls_high); 1465 if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS) 1466 *pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR; 1467 break; 1468 case MSR_IA32_VMX_TRUE_EXIT_CTLS: 1469 case MSR_IA32_VMX_EXIT_CTLS: 1470 *pdata = vmx_control_msr( 1471 msrs->exit_ctls_low, 1472 msrs->exit_ctls_high); 1473 if (msr_index == MSR_IA32_VMX_EXIT_CTLS) 1474 *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR; 1475 break; 1476 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: 1477 case MSR_IA32_VMX_ENTRY_CTLS: 1478 *pdata = vmx_control_msr( 1479 msrs->entry_ctls_low, 1480 msrs->entry_ctls_high); 1481 if (msr_index == MSR_IA32_VMX_ENTRY_CTLS) 1482 *pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR; 1483 break; 1484 case MSR_IA32_VMX_MISC: 1485 *pdata = vmx_control_msr( 1486 msrs->misc_low, 1487 msrs->misc_high); 1488 break; 1489 case MSR_IA32_VMX_CR0_FIXED0: 1490 *pdata = msrs->cr0_fixed0; 1491 break; 1492 case MSR_IA32_VMX_CR0_FIXED1: 1493 *pdata = msrs->cr0_fixed1; 1494 break; 1495 case MSR_IA32_VMX_CR4_FIXED0: 1496 *pdata = msrs->cr4_fixed0; 1497 break; 1498 case MSR_IA32_VMX_CR4_FIXED1: 1499 *pdata = msrs->cr4_fixed1; 1500 break; 1501 case MSR_IA32_VMX_VMCS_ENUM: 1502 *pdata = msrs->vmcs_enum; 1503 break; 1504 case MSR_IA32_VMX_PROCBASED_CTLS2: 1505 *pdata = vmx_control_msr( 1506 msrs->secondary_ctls_low, 1507 msrs->secondary_ctls_high); 1508 break; 1509 case MSR_IA32_VMX_EPT_VPID_CAP: 1510 *pdata = msrs->ept_caps | 1511 ((u64)msrs->vpid_caps << 32); 1512 break; 1513 case MSR_IA32_VMX_VMFUNC: 1514 *pdata = msrs->vmfunc_controls; 1515 break; 1516 default: 1517 return 1; 1518 } 1519 1520 return 0; 1521 } 1522 1523 /* 1524 * Copy the writable VMCS shadow fields back to the VMCS12, in case they have 1525 * been modified by the L1 guest. Note, "writable" in this context means 1526 * "writable by the guest", i.e. tagged SHADOW_FIELD_RW; the set of 1527 * fields tagged SHADOW_FIELD_RO may or may not align with the "read-only" 1528 * VM-exit information fields (which are actually writable if the vCPU is 1529 * configured to support "VMWRITE to any supported field in the VMCS"). 1530 */ 1531 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx) 1532 { 1533 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; 1534 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu); 1535 struct shadow_vmcs_field field; 1536 unsigned long val; 1537 int i; 1538 1539 if (WARN_ON(!shadow_vmcs)) 1540 return; 1541 1542 preempt_disable(); 1543 1544 vmcs_load(shadow_vmcs); 1545 1546 for (i = 0; i < max_shadow_read_write_fields; i++) { 1547 field = shadow_read_write_fields[i]; 1548 val = __vmcs_readl(field.encoding); 1549 vmcs12_write_any(vmcs12, field.encoding, field.offset, val); 1550 } 1551 1552 vmcs_clear(shadow_vmcs); 1553 vmcs_load(vmx->loaded_vmcs->vmcs); 1554 1555 preempt_enable(); 1556 } 1557 1558 static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx) 1559 { 1560 const struct shadow_vmcs_field *fields[] = { 1561 shadow_read_write_fields, 1562 shadow_read_only_fields 1563 }; 1564 const int max_fields[] = { 1565 max_shadow_read_write_fields, 1566 max_shadow_read_only_fields 1567 }; 1568 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; 1569 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu); 1570 struct shadow_vmcs_field field; 1571 unsigned long val; 1572 int i, q; 1573 1574 if (WARN_ON(!shadow_vmcs)) 1575 return; 1576 1577 vmcs_load(shadow_vmcs); 1578 1579 for (q = 0; q < ARRAY_SIZE(fields); q++) { 1580 for (i = 0; i < max_fields[q]; i++) { 1581 field = fields[q][i]; 1582 val = vmcs12_read_any(vmcs12, field.encoding, 1583 field.offset); 1584 __vmcs_writel(field.encoding, val); 1585 } 1586 } 1587 1588 vmcs_clear(shadow_vmcs); 1589 vmcs_load(vmx->loaded_vmcs->vmcs); 1590 } 1591 1592 static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx) 1593 { 1594 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; 1595 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; 1596 1597 /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */ 1598 vmcs12->tpr_threshold = evmcs->tpr_threshold; 1599 vmcs12->guest_rip = evmcs->guest_rip; 1600 1601 if (unlikely(!(evmcs->hv_clean_fields & 1602 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC))) { 1603 vmcs12->guest_rsp = evmcs->guest_rsp; 1604 vmcs12->guest_rflags = evmcs->guest_rflags; 1605 vmcs12->guest_interruptibility_info = 1606 evmcs->guest_interruptibility_info; 1607 } 1608 1609 if (unlikely(!(evmcs->hv_clean_fields & 1610 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) { 1611 vmcs12->cpu_based_vm_exec_control = 1612 evmcs->cpu_based_vm_exec_control; 1613 } 1614 1615 if (unlikely(!(evmcs->hv_clean_fields & 1616 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EXCPN))) { 1617 vmcs12->exception_bitmap = evmcs->exception_bitmap; 1618 } 1619 1620 if (unlikely(!(evmcs->hv_clean_fields & 1621 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY))) { 1622 vmcs12->vm_entry_controls = evmcs->vm_entry_controls; 1623 } 1624 1625 if (unlikely(!(evmcs->hv_clean_fields & 1626 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT))) { 1627 vmcs12->vm_entry_intr_info_field = 1628 evmcs->vm_entry_intr_info_field; 1629 vmcs12->vm_entry_exception_error_code = 1630 evmcs->vm_entry_exception_error_code; 1631 vmcs12->vm_entry_instruction_len = 1632 evmcs->vm_entry_instruction_len; 1633 } 1634 1635 if (unlikely(!(evmcs->hv_clean_fields & 1636 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) { 1637 vmcs12->host_ia32_pat = evmcs->host_ia32_pat; 1638 vmcs12->host_ia32_efer = evmcs->host_ia32_efer; 1639 vmcs12->host_cr0 = evmcs->host_cr0; 1640 vmcs12->host_cr3 = evmcs->host_cr3; 1641 vmcs12->host_cr4 = evmcs->host_cr4; 1642 vmcs12->host_ia32_sysenter_esp = evmcs->host_ia32_sysenter_esp; 1643 vmcs12->host_ia32_sysenter_eip = evmcs->host_ia32_sysenter_eip; 1644 vmcs12->host_rip = evmcs->host_rip; 1645 vmcs12->host_ia32_sysenter_cs = evmcs->host_ia32_sysenter_cs; 1646 vmcs12->host_es_selector = evmcs->host_es_selector; 1647 vmcs12->host_cs_selector = evmcs->host_cs_selector; 1648 vmcs12->host_ss_selector = evmcs->host_ss_selector; 1649 vmcs12->host_ds_selector = evmcs->host_ds_selector; 1650 vmcs12->host_fs_selector = evmcs->host_fs_selector; 1651 vmcs12->host_gs_selector = evmcs->host_gs_selector; 1652 vmcs12->host_tr_selector = evmcs->host_tr_selector; 1653 } 1654 1655 if (unlikely(!(evmcs->hv_clean_fields & 1656 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP1))) { 1657 vmcs12->pin_based_vm_exec_control = 1658 evmcs->pin_based_vm_exec_control; 1659 vmcs12->vm_exit_controls = evmcs->vm_exit_controls; 1660 vmcs12->secondary_vm_exec_control = 1661 evmcs->secondary_vm_exec_control; 1662 } 1663 1664 if (unlikely(!(evmcs->hv_clean_fields & 1665 HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP))) { 1666 vmcs12->io_bitmap_a = evmcs->io_bitmap_a; 1667 vmcs12->io_bitmap_b = evmcs->io_bitmap_b; 1668 } 1669 1670 if (unlikely(!(evmcs->hv_clean_fields & 1671 HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP))) { 1672 vmcs12->msr_bitmap = evmcs->msr_bitmap; 1673 } 1674 1675 if (unlikely(!(evmcs->hv_clean_fields & 1676 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2))) { 1677 vmcs12->guest_es_base = evmcs->guest_es_base; 1678 vmcs12->guest_cs_base = evmcs->guest_cs_base; 1679 vmcs12->guest_ss_base = evmcs->guest_ss_base; 1680 vmcs12->guest_ds_base = evmcs->guest_ds_base; 1681 vmcs12->guest_fs_base = evmcs->guest_fs_base; 1682 vmcs12->guest_gs_base = evmcs->guest_gs_base; 1683 vmcs12->guest_ldtr_base = evmcs->guest_ldtr_base; 1684 vmcs12->guest_tr_base = evmcs->guest_tr_base; 1685 vmcs12->guest_gdtr_base = evmcs->guest_gdtr_base; 1686 vmcs12->guest_idtr_base = evmcs->guest_idtr_base; 1687 vmcs12->guest_es_limit = evmcs->guest_es_limit; 1688 vmcs12->guest_cs_limit = evmcs->guest_cs_limit; 1689 vmcs12->guest_ss_limit = evmcs->guest_ss_limit; 1690 vmcs12->guest_ds_limit = evmcs->guest_ds_limit; 1691 vmcs12->guest_fs_limit = evmcs->guest_fs_limit; 1692 vmcs12->guest_gs_limit = evmcs->guest_gs_limit; 1693 vmcs12->guest_ldtr_limit = evmcs->guest_ldtr_limit; 1694 vmcs12->guest_tr_limit = evmcs->guest_tr_limit; 1695 vmcs12->guest_gdtr_limit = evmcs->guest_gdtr_limit; 1696 vmcs12->guest_idtr_limit = evmcs->guest_idtr_limit; 1697 vmcs12->guest_es_ar_bytes = evmcs->guest_es_ar_bytes; 1698 vmcs12->guest_cs_ar_bytes = evmcs->guest_cs_ar_bytes; 1699 vmcs12->guest_ss_ar_bytes = evmcs->guest_ss_ar_bytes; 1700 vmcs12->guest_ds_ar_bytes = evmcs->guest_ds_ar_bytes; 1701 vmcs12->guest_fs_ar_bytes = evmcs->guest_fs_ar_bytes; 1702 vmcs12->guest_gs_ar_bytes = evmcs->guest_gs_ar_bytes; 1703 vmcs12->guest_ldtr_ar_bytes = evmcs->guest_ldtr_ar_bytes; 1704 vmcs12->guest_tr_ar_bytes = evmcs->guest_tr_ar_bytes; 1705 vmcs12->guest_es_selector = evmcs->guest_es_selector; 1706 vmcs12->guest_cs_selector = evmcs->guest_cs_selector; 1707 vmcs12->guest_ss_selector = evmcs->guest_ss_selector; 1708 vmcs12->guest_ds_selector = evmcs->guest_ds_selector; 1709 vmcs12->guest_fs_selector = evmcs->guest_fs_selector; 1710 vmcs12->guest_gs_selector = evmcs->guest_gs_selector; 1711 vmcs12->guest_ldtr_selector = evmcs->guest_ldtr_selector; 1712 vmcs12->guest_tr_selector = evmcs->guest_tr_selector; 1713 } 1714 1715 if (unlikely(!(evmcs->hv_clean_fields & 1716 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2))) { 1717 vmcs12->tsc_offset = evmcs->tsc_offset; 1718 vmcs12->virtual_apic_page_addr = evmcs->virtual_apic_page_addr; 1719 vmcs12->xss_exit_bitmap = evmcs->xss_exit_bitmap; 1720 } 1721 1722 if (unlikely(!(evmcs->hv_clean_fields & 1723 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR))) { 1724 vmcs12->cr0_guest_host_mask = evmcs->cr0_guest_host_mask; 1725 vmcs12->cr4_guest_host_mask = evmcs->cr4_guest_host_mask; 1726 vmcs12->cr0_read_shadow = evmcs->cr0_read_shadow; 1727 vmcs12->cr4_read_shadow = evmcs->cr4_read_shadow; 1728 vmcs12->guest_cr0 = evmcs->guest_cr0; 1729 vmcs12->guest_cr3 = evmcs->guest_cr3; 1730 vmcs12->guest_cr4 = evmcs->guest_cr4; 1731 vmcs12->guest_dr7 = evmcs->guest_dr7; 1732 } 1733 1734 if (unlikely(!(evmcs->hv_clean_fields & 1735 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER))) { 1736 vmcs12->host_fs_base = evmcs->host_fs_base; 1737 vmcs12->host_gs_base = evmcs->host_gs_base; 1738 vmcs12->host_tr_base = evmcs->host_tr_base; 1739 vmcs12->host_gdtr_base = evmcs->host_gdtr_base; 1740 vmcs12->host_idtr_base = evmcs->host_idtr_base; 1741 vmcs12->host_rsp = evmcs->host_rsp; 1742 } 1743 1744 if (unlikely(!(evmcs->hv_clean_fields & 1745 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT))) { 1746 vmcs12->ept_pointer = evmcs->ept_pointer; 1747 vmcs12->virtual_processor_id = evmcs->virtual_processor_id; 1748 } 1749 1750 if (unlikely(!(evmcs->hv_clean_fields & 1751 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1))) { 1752 vmcs12->vmcs_link_pointer = evmcs->vmcs_link_pointer; 1753 vmcs12->guest_ia32_debugctl = evmcs->guest_ia32_debugctl; 1754 vmcs12->guest_ia32_pat = evmcs->guest_ia32_pat; 1755 vmcs12->guest_ia32_efer = evmcs->guest_ia32_efer; 1756 vmcs12->guest_pdptr0 = evmcs->guest_pdptr0; 1757 vmcs12->guest_pdptr1 = evmcs->guest_pdptr1; 1758 vmcs12->guest_pdptr2 = evmcs->guest_pdptr2; 1759 vmcs12->guest_pdptr3 = evmcs->guest_pdptr3; 1760 vmcs12->guest_pending_dbg_exceptions = 1761 evmcs->guest_pending_dbg_exceptions; 1762 vmcs12->guest_sysenter_esp = evmcs->guest_sysenter_esp; 1763 vmcs12->guest_sysenter_eip = evmcs->guest_sysenter_eip; 1764 vmcs12->guest_bndcfgs = evmcs->guest_bndcfgs; 1765 vmcs12->guest_activity_state = evmcs->guest_activity_state; 1766 vmcs12->guest_sysenter_cs = evmcs->guest_sysenter_cs; 1767 } 1768 1769 /* 1770 * Not used? 1771 * vmcs12->vm_exit_msr_store_addr = evmcs->vm_exit_msr_store_addr; 1772 * vmcs12->vm_exit_msr_load_addr = evmcs->vm_exit_msr_load_addr; 1773 * vmcs12->vm_entry_msr_load_addr = evmcs->vm_entry_msr_load_addr; 1774 * vmcs12->page_fault_error_code_mask = 1775 * evmcs->page_fault_error_code_mask; 1776 * vmcs12->page_fault_error_code_match = 1777 * evmcs->page_fault_error_code_match; 1778 * vmcs12->cr3_target_count = evmcs->cr3_target_count; 1779 * vmcs12->vm_exit_msr_store_count = evmcs->vm_exit_msr_store_count; 1780 * vmcs12->vm_exit_msr_load_count = evmcs->vm_exit_msr_load_count; 1781 * vmcs12->vm_entry_msr_load_count = evmcs->vm_entry_msr_load_count; 1782 */ 1783 1784 /* 1785 * Read only fields: 1786 * vmcs12->guest_physical_address = evmcs->guest_physical_address; 1787 * vmcs12->vm_instruction_error = evmcs->vm_instruction_error; 1788 * vmcs12->vm_exit_reason = evmcs->vm_exit_reason; 1789 * vmcs12->vm_exit_intr_info = evmcs->vm_exit_intr_info; 1790 * vmcs12->vm_exit_intr_error_code = evmcs->vm_exit_intr_error_code; 1791 * vmcs12->idt_vectoring_info_field = evmcs->idt_vectoring_info_field; 1792 * vmcs12->idt_vectoring_error_code = evmcs->idt_vectoring_error_code; 1793 * vmcs12->vm_exit_instruction_len = evmcs->vm_exit_instruction_len; 1794 * vmcs12->vmx_instruction_info = evmcs->vmx_instruction_info; 1795 * vmcs12->exit_qualification = evmcs->exit_qualification; 1796 * vmcs12->guest_linear_address = evmcs->guest_linear_address; 1797 * 1798 * Not present in struct vmcs12: 1799 * vmcs12->exit_io_instruction_ecx = evmcs->exit_io_instruction_ecx; 1800 * vmcs12->exit_io_instruction_esi = evmcs->exit_io_instruction_esi; 1801 * vmcs12->exit_io_instruction_edi = evmcs->exit_io_instruction_edi; 1802 * vmcs12->exit_io_instruction_eip = evmcs->exit_io_instruction_eip; 1803 */ 1804 1805 return 0; 1806 } 1807 1808 static int copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx) 1809 { 1810 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; 1811 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; 1812 1813 /* 1814 * Should not be changed by KVM: 1815 * 1816 * evmcs->host_es_selector = vmcs12->host_es_selector; 1817 * evmcs->host_cs_selector = vmcs12->host_cs_selector; 1818 * evmcs->host_ss_selector = vmcs12->host_ss_selector; 1819 * evmcs->host_ds_selector = vmcs12->host_ds_selector; 1820 * evmcs->host_fs_selector = vmcs12->host_fs_selector; 1821 * evmcs->host_gs_selector = vmcs12->host_gs_selector; 1822 * evmcs->host_tr_selector = vmcs12->host_tr_selector; 1823 * evmcs->host_ia32_pat = vmcs12->host_ia32_pat; 1824 * evmcs->host_ia32_efer = vmcs12->host_ia32_efer; 1825 * evmcs->host_cr0 = vmcs12->host_cr0; 1826 * evmcs->host_cr3 = vmcs12->host_cr3; 1827 * evmcs->host_cr4 = vmcs12->host_cr4; 1828 * evmcs->host_ia32_sysenter_esp = vmcs12->host_ia32_sysenter_esp; 1829 * evmcs->host_ia32_sysenter_eip = vmcs12->host_ia32_sysenter_eip; 1830 * evmcs->host_rip = vmcs12->host_rip; 1831 * evmcs->host_ia32_sysenter_cs = vmcs12->host_ia32_sysenter_cs; 1832 * evmcs->host_fs_base = vmcs12->host_fs_base; 1833 * evmcs->host_gs_base = vmcs12->host_gs_base; 1834 * evmcs->host_tr_base = vmcs12->host_tr_base; 1835 * evmcs->host_gdtr_base = vmcs12->host_gdtr_base; 1836 * evmcs->host_idtr_base = vmcs12->host_idtr_base; 1837 * evmcs->host_rsp = vmcs12->host_rsp; 1838 * sync_vmcs02_to_vmcs12() doesn't read these: 1839 * evmcs->io_bitmap_a = vmcs12->io_bitmap_a; 1840 * evmcs->io_bitmap_b = vmcs12->io_bitmap_b; 1841 * evmcs->msr_bitmap = vmcs12->msr_bitmap; 1842 * evmcs->ept_pointer = vmcs12->ept_pointer; 1843 * evmcs->xss_exit_bitmap = vmcs12->xss_exit_bitmap; 1844 * evmcs->vm_exit_msr_store_addr = vmcs12->vm_exit_msr_store_addr; 1845 * evmcs->vm_exit_msr_load_addr = vmcs12->vm_exit_msr_load_addr; 1846 * evmcs->vm_entry_msr_load_addr = vmcs12->vm_entry_msr_load_addr; 1847 * evmcs->tpr_threshold = vmcs12->tpr_threshold; 1848 * evmcs->virtual_processor_id = vmcs12->virtual_processor_id; 1849 * evmcs->exception_bitmap = vmcs12->exception_bitmap; 1850 * evmcs->vmcs_link_pointer = vmcs12->vmcs_link_pointer; 1851 * evmcs->pin_based_vm_exec_control = vmcs12->pin_based_vm_exec_control; 1852 * evmcs->vm_exit_controls = vmcs12->vm_exit_controls; 1853 * evmcs->secondary_vm_exec_control = vmcs12->secondary_vm_exec_control; 1854 * evmcs->page_fault_error_code_mask = 1855 * vmcs12->page_fault_error_code_mask; 1856 * evmcs->page_fault_error_code_match = 1857 * vmcs12->page_fault_error_code_match; 1858 * evmcs->cr3_target_count = vmcs12->cr3_target_count; 1859 * evmcs->virtual_apic_page_addr = vmcs12->virtual_apic_page_addr; 1860 * evmcs->tsc_offset = vmcs12->tsc_offset; 1861 * evmcs->guest_ia32_debugctl = vmcs12->guest_ia32_debugctl; 1862 * evmcs->cr0_guest_host_mask = vmcs12->cr0_guest_host_mask; 1863 * evmcs->cr4_guest_host_mask = vmcs12->cr4_guest_host_mask; 1864 * evmcs->cr0_read_shadow = vmcs12->cr0_read_shadow; 1865 * evmcs->cr4_read_shadow = vmcs12->cr4_read_shadow; 1866 * evmcs->vm_exit_msr_store_count = vmcs12->vm_exit_msr_store_count; 1867 * evmcs->vm_exit_msr_load_count = vmcs12->vm_exit_msr_load_count; 1868 * evmcs->vm_entry_msr_load_count = vmcs12->vm_entry_msr_load_count; 1869 * 1870 * Not present in struct vmcs12: 1871 * evmcs->exit_io_instruction_ecx = vmcs12->exit_io_instruction_ecx; 1872 * evmcs->exit_io_instruction_esi = vmcs12->exit_io_instruction_esi; 1873 * evmcs->exit_io_instruction_edi = vmcs12->exit_io_instruction_edi; 1874 * evmcs->exit_io_instruction_eip = vmcs12->exit_io_instruction_eip; 1875 */ 1876 1877 evmcs->guest_es_selector = vmcs12->guest_es_selector; 1878 evmcs->guest_cs_selector = vmcs12->guest_cs_selector; 1879 evmcs->guest_ss_selector = vmcs12->guest_ss_selector; 1880 evmcs->guest_ds_selector = vmcs12->guest_ds_selector; 1881 evmcs->guest_fs_selector = vmcs12->guest_fs_selector; 1882 evmcs->guest_gs_selector = vmcs12->guest_gs_selector; 1883 evmcs->guest_ldtr_selector = vmcs12->guest_ldtr_selector; 1884 evmcs->guest_tr_selector = vmcs12->guest_tr_selector; 1885 1886 evmcs->guest_es_limit = vmcs12->guest_es_limit; 1887 evmcs->guest_cs_limit = vmcs12->guest_cs_limit; 1888 evmcs->guest_ss_limit = vmcs12->guest_ss_limit; 1889 evmcs->guest_ds_limit = vmcs12->guest_ds_limit; 1890 evmcs->guest_fs_limit = vmcs12->guest_fs_limit; 1891 evmcs->guest_gs_limit = vmcs12->guest_gs_limit; 1892 evmcs->guest_ldtr_limit = vmcs12->guest_ldtr_limit; 1893 evmcs->guest_tr_limit = vmcs12->guest_tr_limit; 1894 evmcs->guest_gdtr_limit = vmcs12->guest_gdtr_limit; 1895 evmcs->guest_idtr_limit = vmcs12->guest_idtr_limit; 1896 1897 evmcs->guest_es_ar_bytes = vmcs12->guest_es_ar_bytes; 1898 evmcs->guest_cs_ar_bytes = vmcs12->guest_cs_ar_bytes; 1899 evmcs->guest_ss_ar_bytes = vmcs12->guest_ss_ar_bytes; 1900 evmcs->guest_ds_ar_bytes = vmcs12->guest_ds_ar_bytes; 1901 evmcs->guest_fs_ar_bytes = vmcs12->guest_fs_ar_bytes; 1902 evmcs->guest_gs_ar_bytes = vmcs12->guest_gs_ar_bytes; 1903 evmcs->guest_ldtr_ar_bytes = vmcs12->guest_ldtr_ar_bytes; 1904 evmcs->guest_tr_ar_bytes = vmcs12->guest_tr_ar_bytes; 1905 1906 evmcs->guest_es_base = vmcs12->guest_es_base; 1907 evmcs->guest_cs_base = vmcs12->guest_cs_base; 1908 evmcs->guest_ss_base = vmcs12->guest_ss_base; 1909 evmcs->guest_ds_base = vmcs12->guest_ds_base; 1910 evmcs->guest_fs_base = vmcs12->guest_fs_base; 1911 evmcs->guest_gs_base = vmcs12->guest_gs_base; 1912 evmcs->guest_ldtr_base = vmcs12->guest_ldtr_base; 1913 evmcs->guest_tr_base = vmcs12->guest_tr_base; 1914 evmcs->guest_gdtr_base = vmcs12->guest_gdtr_base; 1915 evmcs->guest_idtr_base = vmcs12->guest_idtr_base; 1916 1917 evmcs->guest_ia32_pat = vmcs12->guest_ia32_pat; 1918 evmcs->guest_ia32_efer = vmcs12->guest_ia32_efer; 1919 1920 evmcs->guest_pdptr0 = vmcs12->guest_pdptr0; 1921 evmcs->guest_pdptr1 = vmcs12->guest_pdptr1; 1922 evmcs->guest_pdptr2 = vmcs12->guest_pdptr2; 1923 evmcs->guest_pdptr3 = vmcs12->guest_pdptr3; 1924 1925 evmcs->guest_pending_dbg_exceptions = 1926 vmcs12->guest_pending_dbg_exceptions; 1927 evmcs->guest_sysenter_esp = vmcs12->guest_sysenter_esp; 1928 evmcs->guest_sysenter_eip = vmcs12->guest_sysenter_eip; 1929 1930 evmcs->guest_activity_state = vmcs12->guest_activity_state; 1931 evmcs->guest_sysenter_cs = vmcs12->guest_sysenter_cs; 1932 1933 evmcs->guest_cr0 = vmcs12->guest_cr0; 1934 evmcs->guest_cr3 = vmcs12->guest_cr3; 1935 evmcs->guest_cr4 = vmcs12->guest_cr4; 1936 evmcs->guest_dr7 = vmcs12->guest_dr7; 1937 1938 evmcs->guest_physical_address = vmcs12->guest_physical_address; 1939 1940 evmcs->vm_instruction_error = vmcs12->vm_instruction_error; 1941 evmcs->vm_exit_reason = vmcs12->vm_exit_reason; 1942 evmcs->vm_exit_intr_info = vmcs12->vm_exit_intr_info; 1943 evmcs->vm_exit_intr_error_code = vmcs12->vm_exit_intr_error_code; 1944 evmcs->idt_vectoring_info_field = vmcs12->idt_vectoring_info_field; 1945 evmcs->idt_vectoring_error_code = vmcs12->idt_vectoring_error_code; 1946 evmcs->vm_exit_instruction_len = vmcs12->vm_exit_instruction_len; 1947 evmcs->vmx_instruction_info = vmcs12->vmx_instruction_info; 1948 1949 evmcs->exit_qualification = vmcs12->exit_qualification; 1950 1951 evmcs->guest_linear_address = vmcs12->guest_linear_address; 1952 evmcs->guest_rsp = vmcs12->guest_rsp; 1953 evmcs->guest_rflags = vmcs12->guest_rflags; 1954 1955 evmcs->guest_interruptibility_info = 1956 vmcs12->guest_interruptibility_info; 1957 evmcs->cpu_based_vm_exec_control = vmcs12->cpu_based_vm_exec_control; 1958 evmcs->vm_entry_controls = vmcs12->vm_entry_controls; 1959 evmcs->vm_entry_intr_info_field = vmcs12->vm_entry_intr_info_field; 1960 evmcs->vm_entry_exception_error_code = 1961 vmcs12->vm_entry_exception_error_code; 1962 evmcs->vm_entry_instruction_len = vmcs12->vm_entry_instruction_len; 1963 1964 evmcs->guest_rip = vmcs12->guest_rip; 1965 1966 evmcs->guest_bndcfgs = vmcs12->guest_bndcfgs; 1967 1968 return 0; 1969 } 1970 1971 /* 1972 * This is an equivalent of the nested hypervisor executing the vmptrld 1973 * instruction. 1974 */ 1975 static enum nested_evmptrld_status nested_vmx_handle_enlightened_vmptrld( 1976 struct kvm_vcpu *vcpu, bool from_launch) 1977 { 1978 struct vcpu_vmx *vmx = to_vmx(vcpu); 1979 bool evmcs_gpa_changed = false; 1980 u64 evmcs_gpa; 1981 1982 if (likely(!vmx->nested.enlightened_vmcs_enabled)) 1983 return EVMPTRLD_DISABLED; 1984 1985 if (!nested_enlightened_vmentry(vcpu, &evmcs_gpa)) 1986 return EVMPTRLD_DISABLED; 1987 1988 if (unlikely(!vmx->nested.hv_evmcs || 1989 evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) { 1990 if (!vmx->nested.hv_evmcs) 1991 vmx->nested.current_vmptr = -1ull; 1992 1993 nested_release_evmcs(vcpu); 1994 1995 if (kvm_vcpu_map(vcpu, gpa_to_gfn(evmcs_gpa), 1996 &vmx->nested.hv_evmcs_map)) 1997 return EVMPTRLD_ERROR; 1998 1999 vmx->nested.hv_evmcs = vmx->nested.hv_evmcs_map.hva; 2000 2001 /* 2002 * Currently, KVM only supports eVMCS version 1 2003 * (== KVM_EVMCS_VERSION) and thus we expect guest to set this 2004 * value to first u32 field of eVMCS which should specify eVMCS 2005 * VersionNumber. 2006 * 2007 * Guest should be aware of supported eVMCS versions by host by 2008 * examining CPUID.0x4000000A.EAX[0:15]. Host userspace VMM is 2009 * expected to set this CPUID leaf according to the value 2010 * returned in vmcs_version from nested_enable_evmcs(). 2011 * 2012 * However, it turns out that Microsoft Hyper-V fails to comply 2013 * to their own invented interface: When Hyper-V use eVMCS, it 2014 * just sets first u32 field of eVMCS to revision_id specified 2015 * in MSR_IA32_VMX_BASIC. Instead of used eVMCS version number 2016 * which is one of the supported versions specified in 2017 * CPUID.0x4000000A.EAX[0:15]. 2018 * 2019 * To overcome Hyper-V bug, we accept here either a supported 2020 * eVMCS version or VMCS12 revision_id as valid values for first 2021 * u32 field of eVMCS. 2022 */ 2023 if ((vmx->nested.hv_evmcs->revision_id != KVM_EVMCS_VERSION) && 2024 (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION)) { 2025 nested_release_evmcs(vcpu); 2026 return EVMPTRLD_VMFAIL; 2027 } 2028 2029 vmx->nested.dirty_vmcs12 = true; 2030 vmx->nested.hv_evmcs_vmptr = evmcs_gpa; 2031 2032 evmcs_gpa_changed = true; 2033 /* 2034 * Unlike normal vmcs12, enlightened vmcs12 is not fully 2035 * reloaded from guest's memory (read only fields, fields not 2036 * present in struct hv_enlightened_vmcs, ...). Make sure there 2037 * are no leftovers. 2038 */ 2039 if (from_launch) { 2040 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 2041 memset(vmcs12, 0, sizeof(*vmcs12)); 2042 vmcs12->hdr.revision_id = VMCS12_REVISION; 2043 } 2044 2045 } 2046 2047 /* 2048 * Clean fields data can't be used on VMLAUNCH and when we switch 2049 * between different L2 guests as KVM keeps a single VMCS12 per L1. 2050 */ 2051 if (from_launch || evmcs_gpa_changed) 2052 vmx->nested.hv_evmcs->hv_clean_fields &= 2053 ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; 2054 2055 return EVMPTRLD_SUCCEEDED; 2056 } 2057 2058 void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu) 2059 { 2060 struct vcpu_vmx *vmx = to_vmx(vcpu); 2061 2062 if (vmx->nested.hv_evmcs) { 2063 copy_vmcs12_to_enlightened(vmx); 2064 /* All fields are clean */ 2065 vmx->nested.hv_evmcs->hv_clean_fields |= 2066 HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; 2067 } else { 2068 copy_vmcs12_to_shadow(vmx); 2069 } 2070 2071 vmx->nested.need_vmcs12_to_shadow_sync = false; 2072 } 2073 2074 static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer) 2075 { 2076 struct vcpu_vmx *vmx = 2077 container_of(timer, struct vcpu_vmx, nested.preemption_timer); 2078 2079 vmx->nested.preemption_timer_expired = true; 2080 kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu); 2081 kvm_vcpu_kick(&vmx->vcpu); 2082 2083 return HRTIMER_NORESTART; 2084 } 2085 2086 static u64 vmx_calc_preemption_timer_value(struct kvm_vcpu *vcpu) 2087 { 2088 struct vcpu_vmx *vmx = to_vmx(vcpu); 2089 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 2090 2091 u64 l1_scaled_tsc = kvm_read_l1_tsc(vcpu, rdtsc()) >> 2092 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; 2093 2094 if (!vmx->nested.has_preemption_timer_deadline) { 2095 vmx->nested.preemption_timer_deadline = 2096 vmcs12->vmx_preemption_timer_value + l1_scaled_tsc; 2097 vmx->nested.has_preemption_timer_deadline = true; 2098 } 2099 return vmx->nested.preemption_timer_deadline - l1_scaled_tsc; 2100 } 2101 2102 static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu, 2103 u64 preemption_timeout) 2104 { 2105 struct vcpu_vmx *vmx = to_vmx(vcpu); 2106 2107 /* 2108 * A timer value of zero is architecturally guaranteed to cause 2109 * a VMExit prior to executing any instructions in the guest. 2110 */ 2111 if (preemption_timeout == 0) { 2112 vmx_preemption_timer_fn(&vmx->nested.preemption_timer); 2113 return; 2114 } 2115 2116 if (vcpu->arch.virtual_tsc_khz == 0) 2117 return; 2118 2119 preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; 2120 preemption_timeout *= 1000000; 2121 do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz); 2122 hrtimer_start(&vmx->nested.preemption_timer, 2123 ktime_add_ns(ktime_get(), preemption_timeout), 2124 HRTIMER_MODE_ABS_PINNED); 2125 } 2126 2127 static u64 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) 2128 { 2129 if (vmx->nested.nested_run_pending && 2130 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) 2131 return vmcs12->guest_ia32_efer; 2132 else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) 2133 return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME); 2134 else 2135 return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME); 2136 } 2137 2138 static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx) 2139 { 2140 /* 2141 * If vmcs02 hasn't been initialized, set the constant vmcs02 state 2142 * according to L0's settings (vmcs12 is irrelevant here). Host 2143 * fields that come from L0 and are not constant, e.g. HOST_CR3, 2144 * will be set as needed prior to VMLAUNCH/VMRESUME. 2145 */ 2146 if (vmx->nested.vmcs02_initialized) 2147 return; 2148 vmx->nested.vmcs02_initialized = true; 2149 2150 /* 2151 * We don't care what the EPTP value is we just need to guarantee 2152 * it's valid so we don't get a false positive when doing early 2153 * consistency checks. 2154 */ 2155 if (enable_ept && nested_early_check) 2156 vmcs_write64(EPT_POINTER, 2157 construct_eptp(&vmx->vcpu, 0, PT64_ROOT_4LEVEL)); 2158 2159 /* All VMFUNCs are currently emulated through L0 vmexits. */ 2160 if (cpu_has_vmx_vmfunc()) 2161 vmcs_write64(VM_FUNCTION_CONTROL, 0); 2162 2163 if (cpu_has_vmx_posted_intr()) 2164 vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR); 2165 2166 if (cpu_has_vmx_msr_bitmap()) 2167 vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap)); 2168 2169 /* 2170 * PML is emulated for L2, but never enabled in hardware as the MMU 2171 * handles A/D emulation. Disabling PML for L2 also avoids having to 2172 * deal with filtering out L2 GPAs from the buffer. 2173 */ 2174 if (enable_pml) { 2175 vmcs_write64(PML_ADDRESS, 0); 2176 vmcs_write16(GUEST_PML_INDEX, -1); 2177 } 2178 2179 if (cpu_has_vmx_encls_vmexit()) 2180 vmcs_write64(ENCLS_EXITING_BITMAP, -1ull); 2181 2182 /* 2183 * Set the MSR load/store lists to match L0's settings. Only the 2184 * addresses are constant (for vmcs02), the counts can change based 2185 * on L2's behavior, e.g. switching to/from long mode. 2186 */ 2187 vmcs_write64(VM_EXIT_MSR_STORE_ADDR, __pa(vmx->msr_autostore.guest.val)); 2188 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val)); 2189 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val)); 2190 2191 vmx_set_constant_host_state(vmx); 2192 } 2193 2194 static void prepare_vmcs02_early_rare(struct vcpu_vmx *vmx, 2195 struct vmcs12 *vmcs12) 2196 { 2197 prepare_vmcs02_constant_state(vmx); 2198 2199 vmcs_write64(VMCS_LINK_POINTER, -1ull); 2200 2201 if (enable_vpid) { 2202 if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) 2203 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02); 2204 else 2205 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); 2206 } 2207 } 2208 2209 static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) 2210 { 2211 u32 exec_control; 2212 u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12); 2213 2214 if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs) 2215 prepare_vmcs02_early_rare(vmx, vmcs12); 2216 2217 /* 2218 * PIN CONTROLS 2219 */ 2220 exec_control = vmx_pin_based_exec_ctrl(vmx); 2221 exec_control |= (vmcs12->pin_based_vm_exec_control & 2222 ~PIN_BASED_VMX_PREEMPTION_TIMER); 2223 2224 /* Posted interrupts setting is only taken from vmcs12. */ 2225 if (nested_cpu_has_posted_intr(vmcs12)) { 2226 vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv; 2227 vmx->nested.pi_pending = false; 2228 } else { 2229 exec_control &= ~PIN_BASED_POSTED_INTR; 2230 } 2231 pin_controls_set(vmx, exec_control); 2232 2233 /* 2234 * EXEC CONTROLS 2235 */ 2236 exec_control = vmx_exec_control(vmx); /* L0's desires */ 2237 exec_control &= ~CPU_BASED_INTR_WINDOW_EXITING; 2238 exec_control &= ~CPU_BASED_NMI_WINDOW_EXITING; 2239 exec_control &= ~CPU_BASED_TPR_SHADOW; 2240 exec_control |= vmcs12->cpu_based_vm_exec_control; 2241 2242 vmx->nested.l1_tpr_threshold = -1; 2243 if (exec_control & CPU_BASED_TPR_SHADOW) 2244 vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold); 2245 #ifdef CONFIG_X86_64 2246 else 2247 exec_control |= CPU_BASED_CR8_LOAD_EXITING | 2248 CPU_BASED_CR8_STORE_EXITING; 2249 #endif 2250 2251 /* 2252 * A vmexit (to either L1 hypervisor or L0 userspace) is always needed 2253 * for I/O port accesses. 2254 */ 2255 exec_control |= CPU_BASED_UNCOND_IO_EXITING; 2256 exec_control &= ~CPU_BASED_USE_IO_BITMAPS; 2257 2258 /* 2259 * This bit will be computed in nested_get_vmcs12_pages, because 2260 * we do not have access to L1's MSR bitmap yet. For now, keep 2261 * the same bit as before, hoping to avoid multiple VMWRITEs that 2262 * only set/clear this bit. 2263 */ 2264 exec_control &= ~CPU_BASED_USE_MSR_BITMAPS; 2265 exec_control |= exec_controls_get(vmx) & CPU_BASED_USE_MSR_BITMAPS; 2266 2267 exec_controls_set(vmx, exec_control); 2268 2269 /* 2270 * SECONDARY EXEC CONTROLS 2271 */ 2272 if (cpu_has_secondary_exec_ctrls()) { 2273 exec_control = vmx->secondary_exec_control; 2274 2275 /* Take the following fields only from vmcs12 */ 2276 exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2277 SECONDARY_EXEC_ENABLE_INVPCID | 2278 SECONDARY_EXEC_ENABLE_RDTSCP | 2279 SECONDARY_EXEC_XSAVES | 2280 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE | 2281 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2282 SECONDARY_EXEC_APIC_REGISTER_VIRT | 2283 SECONDARY_EXEC_ENABLE_VMFUNC); 2284 if (nested_cpu_has(vmcs12, 2285 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) 2286 exec_control |= vmcs12->secondary_vm_exec_control; 2287 2288 /* PML is emulated and never enabled in hardware for L2. */ 2289 exec_control &= ~SECONDARY_EXEC_ENABLE_PML; 2290 2291 /* VMCS shadowing for L2 is emulated for now */ 2292 exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS; 2293 2294 /* 2295 * Preset *DT exiting when emulating UMIP, so that vmx_set_cr4() 2296 * will not have to rewrite the controls just for this bit. 2297 */ 2298 if (!boot_cpu_has(X86_FEATURE_UMIP) && vmx_umip_emulated() && 2299 (vmcs12->guest_cr4 & X86_CR4_UMIP)) 2300 exec_control |= SECONDARY_EXEC_DESC; 2301 2302 if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) 2303 vmcs_write16(GUEST_INTR_STATUS, 2304 vmcs12->guest_intr_status); 2305 2306 if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST)) 2307 exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST; 2308 2309 secondary_exec_controls_set(vmx, exec_control); 2310 } 2311 2312 /* 2313 * ENTRY CONTROLS 2314 * 2315 * vmcs12's VM_{ENTRY,EXIT}_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE 2316 * are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate 2317 * on the related bits (if supported by the CPU) in the hope that 2318 * we can avoid VMWrites during vmx_set_efer(). 2319 */ 2320 exec_control = (vmcs12->vm_entry_controls | vmx_vmentry_ctrl()) & 2321 ~VM_ENTRY_IA32E_MODE & ~VM_ENTRY_LOAD_IA32_EFER; 2322 if (cpu_has_load_ia32_efer()) { 2323 if (guest_efer & EFER_LMA) 2324 exec_control |= VM_ENTRY_IA32E_MODE; 2325 if (guest_efer != host_efer) 2326 exec_control |= VM_ENTRY_LOAD_IA32_EFER; 2327 } 2328 vm_entry_controls_set(vmx, exec_control); 2329 2330 /* 2331 * EXIT CONTROLS 2332 * 2333 * L2->L1 exit controls are emulated - the hardware exit is to L0 so 2334 * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER 2335 * bits may be modified by vmx_set_efer() in prepare_vmcs02(). 2336 */ 2337 exec_control = vmx_vmexit_ctrl(); 2338 if (cpu_has_load_ia32_efer() && guest_efer != host_efer) 2339 exec_control |= VM_EXIT_LOAD_IA32_EFER; 2340 vm_exit_controls_set(vmx, exec_control); 2341 2342 /* 2343 * Interrupt/Exception Fields 2344 */ 2345 if (vmx->nested.nested_run_pending) { 2346 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 2347 vmcs12->vm_entry_intr_info_field); 2348 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, 2349 vmcs12->vm_entry_exception_error_code); 2350 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 2351 vmcs12->vm_entry_instruction_len); 2352 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 2353 vmcs12->guest_interruptibility_info); 2354 vmx->loaded_vmcs->nmi_known_unmasked = 2355 !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI); 2356 } else { 2357 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); 2358 } 2359 } 2360 2361 static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) 2362 { 2363 struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs; 2364 2365 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields & 2366 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) { 2367 vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector); 2368 vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector); 2369 vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector); 2370 vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector); 2371 vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector); 2372 vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector); 2373 vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector); 2374 vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector); 2375 vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit); 2376 vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit); 2377 vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit); 2378 vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit); 2379 vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit); 2380 vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit); 2381 vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit); 2382 vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit); 2383 vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit); 2384 vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit); 2385 vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes); 2386 vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes); 2387 vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes); 2388 vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes); 2389 vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes); 2390 vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes); 2391 vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes); 2392 vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes); 2393 vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base); 2394 vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base); 2395 vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base); 2396 vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base); 2397 vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base); 2398 vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base); 2399 vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base); 2400 vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base); 2401 vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base); 2402 vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base); 2403 2404 vmx->segment_cache.bitmask = 0; 2405 } 2406 2407 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields & 2408 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1)) { 2409 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs); 2410 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, 2411 vmcs12->guest_pending_dbg_exceptions); 2412 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp); 2413 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip); 2414 2415 /* 2416 * L1 may access the L2's PDPTR, so save them to construct 2417 * vmcs12 2418 */ 2419 if (enable_ept) { 2420 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0); 2421 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); 2422 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); 2423 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); 2424 } 2425 2426 if (kvm_mpx_supported() && vmx->nested.nested_run_pending && 2427 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)) 2428 vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs); 2429 } 2430 2431 if (nested_cpu_has_xsaves(vmcs12)) 2432 vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap); 2433 2434 /* 2435 * Whether page-faults are trapped is determined by a combination of 2436 * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF. If L0 2437 * doesn't care about page faults then we should set all of these to 2438 * L1's desires. However, if L0 does care about (some) page faults, it 2439 * is not easy (if at all possible?) to merge L0 and L1's desires, we 2440 * simply ask to exit on each and every L2 page fault. This is done by 2441 * setting MASK=MATCH=0 and (see below) EB.PF=1. 2442 * Note that below we don't need special code to set EB.PF beyond the 2443 * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept, 2444 * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when 2445 * !enable_ept, EB.PF is 1, so the "or" will always be 1. 2446 */ 2447 if (vmx_need_pf_intercept(&vmx->vcpu)) { 2448 /* 2449 * TODO: if both L0 and L1 need the same MASK and MATCH, 2450 * go ahead and use it? 2451 */ 2452 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0); 2453 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0); 2454 } else { 2455 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, vmcs12->page_fault_error_code_mask); 2456 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, vmcs12->page_fault_error_code_match); 2457 } 2458 2459 if (cpu_has_vmx_apicv()) { 2460 vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0); 2461 vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1); 2462 vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2); 2463 vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3); 2464 } 2465 2466 /* 2467 * Make sure the msr_autostore list is up to date before we set the 2468 * count in the vmcs02. 2469 */ 2470 prepare_vmx_msr_autostore_list(&vmx->vcpu, MSR_IA32_TSC); 2471 2472 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, vmx->msr_autostore.guest.nr); 2473 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); 2474 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); 2475 2476 set_cr4_guest_host_mask(vmx); 2477 } 2478 2479 /* 2480 * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested 2481 * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it 2482 * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2 2483 * guest in a way that will both be appropriate to L1's requests, and our 2484 * needs. In addition to modifying the active vmcs (which is vmcs02), this 2485 * function also has additional necessary side-effects, like setting various 2486 * vcpu->arch fields. 2487 * Returns 0 on success, 1 on failure. Invalid state exit qualification code 2488 * is assigned to entry_failure_code on failure. 2489 */ 2490 static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, 2491 enum vm_entry_failure_code *entry_failure_code) 2492 { 2493 struct vcpu_vmx *vmx = to_vmx(vcpu); 2494 struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs; 2495 bool load_guest_pdptrs_vmcs12 = false; 2496 2497 if (vmx->nested.dirty_vmcs12 || hv_evmcs) { 2498 prepare_vmcs02_rare(vmx, vmcs12); 2499 vmx->nested.dirty_vmcs12 = false; 2500 2501 load_guest_pdptrs_vmcs12 = !hv_evmcs || 2502 !(hv_evmcs->hv_clean_fields & 2503 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1); 2504 } 2505 2506 if (vmx->nested.nested_run_pending && 2507 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) { 2508 kvm_set_dr(vcpu, 7, vmcs12->guest_dr7); 2509 vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl); 2510 } else { 2511 kvm_set_dr(vcpu, 7, vcpu->arch.dr7); 2512 vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl); 2513 } 2514 if (kvm_mpx_supported() && (!vmx->nested.nested_run_pending || 2515 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))) 2516 vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs); 2517 vmx_set_rflags(vcpu, vmcs12->guest_rflags); 2518 2519 /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the 2520 * bitwise-or of what L1 wants to trap for L2, and what we want to 2521 * trap. Note that CR0.TS also needs updating - we do this later. 2522 */ 2523 vmx_update_exception_bitmap(vcpu); 2524 vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask; 2525 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); 2526 2527 if (vmx->nested.nested_run_pending && 2528 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) { 2529 vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat); 2530 vcpu->arch.pat = vmcs12->guest_ia32_pat; 2531 } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { 2532 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); 2533 } 2534 2535 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); 2536 2537 if (kvm_has_tsc_control) 2538 decache_tsc_multiplier(vmx); 2539 2540 nested_vmx_transition_tlb_flush(vcpu, vmcs12, true); 2541 2542 if (nested_cpu_has_ept(vmcs12)) 2543 nested_ept_init_mmu_context(vcpu); 2544 2545 /* 2546 * This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those 2547 * bits which we consider mandatory enabled. 2548 * The CR0_READ_SHADOW is what L2 should have expected to read given 2549 * the specifications by L1; It's not enough to take 2550 * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we 2551 * have more bits than L1 expected. 2552 */ 2553 vmx_set_cr0(vcpu, vmcs12->guest_cr0); 2554 vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12)); 2555 2556 vmx_set_cr4(vcpu, vmcs12->guest_cr4); 2557 vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12)); 2558 2559 vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12); 2560 /* Note: may modify VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */ 2561 vmx_set_efer(vcpu, vcpu->arch.efer); 2562 2563 /* 2564 * Guest state is invalid and unrestricted guest is disabled, 2565 * which means L1 attempted VMEntry to L2 with invalid state. 2566 * Fail the VMEntry. 2567 */ 2568 if (CC(!vmx_guest_state_valid(vcpu))) { 2569 *entry_failure_code = ENTRY_FAIL_DEFAULT; 2570 return -EINVAL; 2571 } 2572 2573 /* Shadow page tables on either EPT or shadow page tables. */ 2574 if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12), 2575 entry_failure_code)) 2576 return -EINVAL; 2577 2578 /* 2579 * Immediately write vmcs02.GUEST_CR3. It will be propagated to vmcs12 2580 * on nested VM-Exit, which can occur without actually running L2 and 2581 * thus without hitting vmx_load_mmu_pgd(), e.g. if L1 is entering L2 with 2582 * vmcs12.GUEST_ACTIVITYSTATE=HLT, in which case KVM will intercept the 2583 * transition to HLT instead of running L2. 2584 */ 2585 if (enable_ept) 2586 vmcs_writel(GUEST_CR3, vmcs12->guest_cr3); 2587 2588 /* Late preparation of GUEST_PDPTRs now that EFER and CRs are set. */ 2589 if (load_guest_pdptrs_vmcs12 && nested_cpu_has_ept(vmcs12) && 2590 is_pae_paging(vcpu)) { 2591 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0); 2592 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); 2593 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); 2594 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); 2595 } 2596 2597 if (!enable_ept) 2598 vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested; 2599 2600 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) && 2601 WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL, 2602 vmcs12->guest_ia32_perf_global_ctrl))) 2603 return -EINVAL; 2604 2605 kvm_rsp_write(vcpu, vmcs12->guest_rsp); 2606 kvm_rip_write(vcpu, vmcs12->guest_rip); 2607 return 0; 2608 } 2609 2610 static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12) 2611 { 2612 if (CC(!nested_cpu_has_nmi_exiting(vmcs12) && 2613 nested_cpu_has_virtual_nmis(vmcs12))) 2614 return -EINVAL; 2615 2616 if (CC(!nested_cpu_has_virtual_nmis(vmcs12) && 2617 nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING))) 2618 return -EINVAL; 2619 2620 return 0; 2621 } 2622 2623 static bool nested_vmx_check_eptp(struct kvm_vcpu *vcpu, u64 new_eptp) 2624 { 2625 struct vcpu_vmx *vmx = to_vmx(vcpu); 2626 2627 /* Check for memory type validity */ 2628 switch (new_eptp & VMX_EPTP_MT_MASK) { 2629 case VMX_EPTP_MT_UC: 2630 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT))) 2631 return false; 2632 break; 2633 case VMX_EPTP_MT_WB: 2634 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT))) 2635 return false; 2636 break; 2637 default: 2638 return false; 2639 } 2640 2641 /* Page-walk levels validity. */ 2642 switch (new_eptp & VMX_EPTP_PWL_MASK) { 2643 case VMX_EPTP_PWL_5: 2644 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_5_BIT))) 2645 return false; 2646 break; 2647 case VMX_EPTP_PWL_4: 2648 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_4_BIT))) 2649 return false; 2650 break; 2651 default: 2652 return false; 2653 } 2654 2655 /* Reserved bits should not be set */ 2656 if (CC(kvm_vcpu_is_illegal_gpa(vcpu, new_eptp) || ((new_eptp >> 7) & 0x1f))) 2657 return false; 2658 2659 /* AD, if set, should be supported */ 2660 if (new_eptp & VMX_EPTP_AD_ENABLE_BIT) { 2661 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT))) 2662 return false; 2663 } 2664 2665 return true; 2666 } 2667 2668 /* 2669 * Checks related to VM-Execution Control Fields 2670 */ 2671 static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu, 2672 struct vmcs12 *vmcs12) 2673 { 2674 struct vcpu_vmx *vmx = to_vmx(vcpu); 2675 2676 if (CC(!vmx_control_verify(vmcs12->pin_based_vm_exec_control, 2677 vmx->nested.msrs.pinbased_ctls_low, 2678 vmx->nested.msrs.pinbased_ctls_high)) || 2679 CC(!vmx_control_verify(vmcs12->cpu_based_vm_exec_control, 2680 vmx->nested.msrs.procbased_ctls_low, 2681 vmx->nested.msrs.procbased_ctls_high))) 2682 return -EINVAL; 2683 2684 if (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) && 2685 CC(!vmx_control_verify(vmcs12->secondary_vm_exec_control, 2686 vmx->nested.msrs.secondary_ctls_low, 2687 vmx->nested.msrs.secondary_ctls_high))) 2688 return -EINVAL; 2689 2690 if (CC(vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu)) || 2691 nested_vmx_check_io_bitmap_controls(vcpu, vmcs12) || 2692 nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12) || 2693 nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12) || 2694 nested_vmx_check_apic_access_controls(vcpu, vmcs12) || 2695 nested_vmx_check_apicv_controls(vcpu, vmcs12) || 2696 nested_vmx_check_nmi_controls(vmcs12) || 2697 nested_vmx_check_pml_controls(vcpu, vmcs12) || 2698 nested_vmx_check_unrestricted_guest_controls(vcpu, vmcs12) || 2699 nested_vmx_check_mode_based_ept_exec_controls(vcpu, vmcs12) || 2700 nested_vmx_check_shadow_vmcs_controls(vcpu, vmcs12) || 2701 CC(nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id)) 2702 return -EINVAL; 2703 2704 if (!nested_cpu_has_preemption_timer(vmcs12) && 2705 nested_cpu_has_save_preemption_timer(vmcs12)) 2706 return -EINVAL; 2707 2708 if (nested_cpu_has_ept(vmcs12) && 2709 CC(!nested_vmx_check_eptp(vcpu, vmcs12->ept_pointer))) 2710 return -EINVAL; 2711 2712 if (nested_cpu_has_vmfunc(vmcs12)) { 2713 if (CC(vmcs12->vm_function_control & 2714 ~vmx->nested.msrs.vmfunc_controls)) 2715 return -EINVAL; 2716 2717 if (nested_cpu_has_eptp_switching(vmcs12)) { 2718 if (CC(!nested_cpu_has_ept(vmcs12)) || 2719 CC(!page_address_valid(vcpu, vmcs12->eptp_list_address))) 2720 return -EINVAL; 2721 } 2722 } 2723 2724 return 0; 2725 } 2726 2727 /* 2728 * Checks related to VM-Exit Control Fields 2729 */ 2730 static int nested_check_vm_exit_controls(struct kvm_vcpu *vcpu, 2731 struct vmcs12 *vmcs12) 2732 { 2733 struct vcpu_vmx *vmx = to_vmx(vcpu); 2734 2735 if (CC(!vmx_control_verify(vmcs12->vm_exit_controls, 2736 vmx->nested.msrs.exit_ctls_low, 2737 vmx->nested.msrs.exit_ctls_high)) || 2738 CC(nested_vmx_check_exit_msr_switch_controls(vcpu, vmcs12))) 2739 return -EINVAL; 2740 2741 return 0; 2742 } 2743 2744 /* 2745 * Checks related to VM-Entry Control Fields 2746 */ 2747 static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu, 2748 struct vmcs12 *vmcs12) 2749 { 2750 struct vcpu_vmx *vmx = to_vmx(vcpu); 2751 2752 if (CC(!vmx_control_verify(vmcs12->vm_entry_controls, 2753 vmx->nested.msrs.entry_ctls_low, 2754 vmx->nested.msrs.entry_ctls_high))) 2755 return -EINVAL; 2756 2757 /* 2758 * From the Intel SDM, volume 3: 2759 * Fields relevant to VM-entry event injection must be set properly. 2760 * These fields are the VM-entry interruption-information field, the 2761 * VM-entry exception error code, and the VM-entry instruction length. 2762 */ 2763 if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) { 2764 u32 intr_info = vmcs12->vm_entry_intr_info_field; 2765 u8 vector = intr_info & INTR_INFO_VECTOR_MASK; 2766 u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK; 2767 bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK; 2768 bool should_have_error_code; 2769 bool urg = nested_cpu_has2(vmcs12, 2770 SECONDARY_EXEC_UNRESTRICTED_GUEST); 2771 bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE; 2772 2773 /* VM-entry interruption-info field: interruption type */ 2774 if (CC(intr_type == INTR_TYPE_RESERVED) || 2775 CC(intr_type == INTR_TYPE_OTHER_EVENT && 2776 !nested_cpu_supports_monitor_trap_flag(vcpu))) 2777 return -EINVAL; 2778 2779 /* VM-entry interruption-info field: vector */ 2780 if (CC(intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) || 2781 CC(intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) || 2782 CC(intr_type == INTR_TYPE_OTHER_EVENT && vector != 0)) 2783 return -EINVAL; 2784 2785 /* VM-entry interruption-info field: deliver error code */ 2786 should_have_error_code = 2787 intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode && 2788 x86_exception_has_error_code(vector); 2789 if (CC(has_error_code != should_have_error_code)) 2790 return -EINVAL; 2791 2792 /* VM-entry exception error code */ 2793 if (CC(has_error_code && 2794 vmcs12->vm_entry_exception_error_code & GENMASK(31, 16))) 2795 return -EINVAL; 2796 2797 /* VM-entry interruption-info field: reserved bits */ 2798 if (CC(intr_info & INTR_INFO_RESVD_BITS_MASK)) 2799 return -EINVAL; 2800 2801 /* VM-entry instruction length */ 2802 switch (intr_type) { 2803 case INTR_TYPE_SOFT_EXCEPTION: 2804 case INTR_TYPE_SOFT_INTR: 2805 case INTR_TYPE_PRIV_SW_EXCEPTION: 2806 if (CC(vmcs12->vm_entry_instruction_len > 15) || 2807 CC(vmcs12->vm_entry_instruction_len == 0 && 2808 CC(!nested_cpu_has_zero_length_injection(vcpu)))) 2809 return -EINVAL; 2810 } 2811 } 2812 2813 if (nested_vmx_check_entry_msr_switch_controls(vcpu, vmcs12)) 2814 return -EINVAL; 2815 2816 return 0; 2817 } 2818 2819 static int nested_vmx_check_controls(struct kvm_vcpu *vcpu, 2820 struct vmcs12 *vmcs12) 2821 { 2822 if (nested_check_vm_execution_controls(vcpu, vmcs12) || 2823 nested_check_vm_exit_controls(vcpu, vmcs12) || 2824 nested_check_vm_entry_controls(vcpu, vmcs12)) 2825 return -EINVAL; 2826 2827 if (to_vmx(vcpu)->nested.enlightened_vmcs_enabled) 2828 return nested_evmcs_check_controls(vmcs12); 2829 2830 return 0; 2831 } 2832 2833 static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu, 2834 struct vmcs12 *vmcs12) 2835 { 2836 bool ia32e; 2837 2838 if (CC(!nested_host_cr0_valid(vcpu, vmcs12->host_cr0)) || 2839 CC(!nested_host_cr4_valid(vcpu, vmcs12->host_cr4)) || 2840 CC(kvm_vcpu_is_illegal_gpa(vcpu, vmcs12->host_cr3))) 2841 return -EINVAL; 2842 2843 if (CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_esp, vcpu)) || 2844 CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_eip, vcpu))) 2845 return -EINVAL; 2846 2847 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) && 2848 CC(!kvm_pat_valid(vmcs12->host_ia32_pat))) 2849 return -EINVAL; 2850 2851 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) && 2852 CC(!kvm_valid_perf_global_ctrl(vcpu_to_pmu(vcpu), 2853 vmcs12->host_ia32_perf_global_ctrl))) 2854 return -EINVAL; 2855 2856 #ifdef CONFIG_X86_64 2857 ia32e = !!(vcpu->arch.efer & EFER_LMA); 2858 #else 2859 ia32e = false; 2860 #endif 2861 2862 if (ia32e) { 2863 if (CC(!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)) || 2864 CC(!(vmcs12->host_cr4 & X86_CR4_PAE))) 2865 return -EINVAL; 2866 } else { 2867 if (CC(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) || 2868 CC(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) || 2869 CC(vmcs12->host_cr4 & X86_CR4_PCIDE) || 2870 CC((vmcs12->host_rip) >> 32)) 2871 return -EINVAL; 2872 } 2873 2874 if (CC(vmcs12->host_cs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 2875 CC(vmcs12->host_ss_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 2876 CC(vmcs12->host_ds_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 2877 CC(vmcs12->host_es_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 2878 CC(vmcs12->host_fs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 2879 CC(vmcs12->host_gs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 2880 CC(vmcs12->host_tr_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 2881 CC(vmcs12->host_cs_selector == 0) || 2882 CC(vmcs12->host_tr_selector == 0) || 2883 CC(vmcs12->host_ss_selector == 0 && !ia32e)) 2884 return -EINVAL; 2885 2886 if (CC(is_noncanonical_address(vmcs12->host_fs_base, vcpu)) || 2887 CC(is_noncanonical_address(vmcs12->host_gs_base, vcpu)) || 2888 CC(is_noncanonical_address(vmcs12->host_gdtr_base, vcpu)) || 2889 CC(is_noncanonical_address(vmcs12->host_idtr_base, vcpu)) || 2890 CC(is_noncanonical_address(vmcs12->host_tr_base, vcpu)) || 2891 CC(is_noncanonical_address(vmcs12->host_rip, vcpu))) 2892 return -EINVAL; 2893 2894 /* 2895 * If the load IA32_EFER VM-exit control is 1, bits reserved in the 2896 * IA32_EFER MSR must be 0 in the field for that register. In addition, 2897 * the values of the LMA and LME bits in the field must each be that of 2898 * the host address-space size VM-exit control. 2899 */ 2900 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) { 2901 if (CC(!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer)) || 2902 CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA)) || 2903 CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LME))) 2904 return -EINVAL; 2905 } 2906 2907 return 0; 2908 } 2909 2910 static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu, 2911 struct vmcs12 *vmcs12) 2912 { 2913 int r = 0; 2914 struct vmcs12 *shadow; 2915 struct kvm_host_map map; 2916 2917 if (vmcs12->vmcs_link_pointer == -1ull) 2918 return 0; 2919 2920 if (CC(!page_address_valid(vcpu, vmcs12->vmcs_link_pointer))) 2921 return -EINVAL; 2922 2923 if (CC(kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map))) 2924 return -EINVAL; 2925 2926 shadow = map.hva; 2927 2928 if (CC(shadow->hdr.revision_id != VMCS12_REVISION) || 2929 CC(shadow->hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12))) 2930 r = -EINVAL; 2931 2932 kvm_vcpu_unmap(vcpu, &map, false); 2933 return r; 2934 } 2935 2936 /* 2937 * Checks related to Guest Non-register State 2938 */ 2939 static int nested_check_guest_non_reg_state(struct vmcs12 *vmcs12) 2940 { 2941 if (CC(vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE && 2942 vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT && 2943 vmcs12->guest_activity_state != GUEST_ACTIVITY_WAIT_SIPI)) 2944 return -EINVAL; 2945 2946 return 0; 2947 } 2948 2949 static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu, 2950 struct vmcs12 *vmcs12, 2951 enum vm_entry_failure_code *entry_failure_code) 2952 { 2953 bool ia32e; 2954 2955 *entry_failure_code = ENTRY_FAIL_DEFAULT; 2956 2957 if (CC(!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0)) || 2958 CC(!nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4))) 2959 return -EINVAL; 2960 2961 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) && 2962 CC(!kvm_dr7_valid(vmcs12->guest_dr7))) 2963 return -EINVAL; 2964 2965 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) && 2966 CC(!kvm_pat_valid(vmcs12->guest_ia32_pat))) 2967 return -EINVAL; 2968 2969 if (nested_vmx_check_vmcs_link_ptr(vcpu, vmcs12)) { 2970 *entry_failure_code = ENTRY_FAIL_VMCS_LINK_PTR; 2971 return -EINVAL; 2972 } 2973 2974 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) && 2975 CC(!kvm_valid_perf_global_ctrl(vcpu_to_pmu(vcpu), 2976 vmcs12->guest_ia32_perf_global_ctrl))) 2977 return -EINVAL; 2978 2979 /* 2980 * If the load IA32_EFER VM-entry control is 1, the following checks 2981 * are performed on the field for the IA32_EFER MSR: 2982 * - Bits reserved in the IA32_EFER MSR must be 0. 2983 * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of 2984 * the IA-32e mode guest VM-exit control. It must also be identical 2985 * to bit 8 (LME) if bit 31 in the CR0 field (corresponding to 2986 * CR0.PG) is 1. 2987 */ 2988 if (to_vmx(vcpu)->nested.nested_run_pending && 2989 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) { 2990 ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0; 2991 if (CC(!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer)) || 2992 CC(ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA)) || 2993 CC(((vmcs12->guest_cr0 & X86_CR0_PG) && 2994 ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME)))) 2995 return -EINVAL; 2996 } 2997 2998 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) && 2999 (CC(is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu)) || 3000 CC((vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD)))) 3001 return -EINVAL; 3002 3003 if (nested_check_guest_non_reg_state(vmcs12)) 3004 return -EINVAL; 3005 3006 return 0; 3007 } 3008 3009 static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu) 3010 { 3011 struct vcpu_vmx *vmx = to_vmx(vcpu); 3012 unsigned long cr3, cr4; 3013 bool vm_fail; 3014 3015 if (!nested_early_check) 3016 return 0; 3017 3018 if (vmx->msr_autoload.host.nr) 3019 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); 3020 if (vmx->msr_autoload.guest.nr) 3021 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0); 3022 3023 preempt_disable(); 3024 3025 vmx_prepare_switch_to_guest(vcpu); 3026 3027 /* 3028 * Induce a consistency check VMExit by clearing bit 1 in GUEST_RFLAGS, 3029 * which is reserved to '1' by hardware. GUEST_RFLAGS is guaranteed to 3030 * be written (by prepare_vmcs02()) before the "real" VMEnter, i.e. 3031 * there is no need to preserve other bits or save/restore the field. 3032 */ 3033 vmcs_writel(GUEST_RFLAGS, 0); 3034 3035 cr3 = __get_current_cr3_fast(); 3036 if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) { 3037 vmcs_writel(HOST_CR3, cr3); 3038 vmx->loaded_vmcs->host_state.cr3 = cr3; 3039 } 3040 3041 cr4 = cr4_read_shadow(); 3042 if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) { 3043 vmcs_writel(HOST_CR4, cr4); 3044 vmx->loaded_vmcs->host_state.cr4 = cr4; 3045 } 3046 3047 vm_fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs, 3048 vmx->loaded_vmcs->launched); 3049 3050 if (vmx->msr_autoload.host.nr) 3051 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); 3052 if (vmx->msr_autoload.guest.nr) 3053 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); 3054 3055 if (vm_fail) { 3056 u32 error = vmcs_read32(VM_INSTRUCTION_ERROR); 3057 3058 preempt_enable(); 3059 3060 trace_kvm_nested_vmenter_failed( 3061 "early hardware check VM-instruction error: ", error); 3062 WARN_ON_ONCE(error != VMXERR_ENTRY_INVALID_CONTROL_FIELD); 3063 return 1; 3064 } 3065 3066 /* 3067 * VMExit clears RFLAGS.IF and DR7, even on a consistency check. 3068 */ 3069 if (hw_breakpoint_active()) 3070 set_debugreg(__this_cpu_read(cpu_dr7), 7); 3071 local_irq_enable(); 3072 preempt_enable(); 3073 3074 /* 3075 * A non-failing VMEntry means we somehow entered guest mode with 3076 * an illegal RIP, and that's just the tip of the iceberg. There 3077 * is no telling what memory has been modified or what state has 3078 * been exposed to unknown code. Hitting this all but guarantees 3079 * a (very critical) hardware issue. 3080 */ 3081 WARN_ON(!(vmcs_read32(VM_EXIT_REASON) & 3082 VMX_EXIT_REASONS_FAILED_VMENTRY)); 3083 3084 return 0; 3085 } 3086 3087 static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu) 3088 { 3089 struct vcpu_vmx *vmx = to_vmx(vcpu); 3090 3091 /* 3092 * hv_evmcs may end up being not mapped after migration (when 3093 * L2 was running), map it here to make sure vmcs12 changes are 3094 * properly reflected. 3095 */ 3096 if (vmx->nested.enlightened_vmcs_enabled && !vmx->nested.hv_evmcs) { 3097 enum nested_evmptrld_status evmptrld_status = 3098 nested_vmx_handle_enlightened_vmptrld(vcpu, false); 3099 3100 if (evmptrld_status == EVMPTRLD_VMFAIL || 3101 evmptrld_status == EVMPTRLD_ERROR) { 3102 pr_debug_ratelimited("%s: enlightened vmptrld failed\n", 3103 __func__); 3104 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 3105 vcpu->run->internal.suberror = 3106 KVM_INTERNAL_ERROR_EMULATION; 3107 vcpu->run->internal.ndata = 0; 3108 return false; 3109 } 3110 } 3111 3112 return true; 3113 } 3114 3115 static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu) 3116 { 3117 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 3118 struct vcpu_vmx *vmx = to_vmx(vcpu); 3119 struct kvm_host_map *map; 3120 struct page *page; 3121 u64 hpa; 3122 3123 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { 3124 /* 3125 * Translate L1 physical address to host physical 3126 * address for vmcs02. Keep the page pinned, so this 3127 * physical address remains valid. We keep a reference 3128 * to it so we can release it later. 3129 */ 3130 if (vmx->nested.apic_access_page) { /* shouldn't happen */ 3131 kvm_release_page_clean(vmx->nested.apic_access_page); 3132 vmx->nested.apic_access_page = NULL; 3133 } 3134 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->apic_access_addr); 3135 if (!is_error_page(page)) { 3136 vmx->nested.apic_access_page = page; 3137 hpa = page_to_phys(vmx->nested.apic_access_page); 3138 vmcs_write64(APIC_ACCESS_ADDR, hpa); 3139 } else { 3140 pr_debug_ratelimited("%s: no backing 'struct page' for APIC-access address in vmcs12\n", 3141 __func__); 3142 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 3143 vcpu->run->internal.suberror = 3144 KVM_INTERNAL_ERROR_EMULATION; 3145 vcpu->run->internal.ndata = 0; 3146 return false; 3147 } 3148 } 3149 3150 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) { 3151 map = &vmx->nested.virtual_apic_map; 3152 3153 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->virtual_apic_page_addr), map)) { 3154 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, pfn_to_hpa(map->pfn)); 3155 } else if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING) && 3156 nested_cpu_has(vmcs12, CPU_BASED_CR8_STORE_EXITING) && 3157 !nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { 3158 /* 3159 * The processor will never use the TPR shadow, simply 3160 * clear the bit from the execution control. Such a 3161 * configuration is useless, but it happens in tests. 3162 * For any other configuration, failing the vm entry is 3163 * _not_ what the processor does but it's basically the 3164 * only possibility we have. 3165 */ 3166 exec_controls_clearbit(vmx, CPU_BASED_TPR_SHADOW); 3167 } else { 3168 /* 3169 * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR to 3170 * force VM-Entry to fail. 3171 */ 3172 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, -1ull); 3173 } 3174 } 3175 3176 if (nested_cpu_has_posted_intr(vmcs12)) { 3177 map = &vmx->nested.pi_desc_map; 3178 3179 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->posted_intr_desc_addr), map)) { 3180 vmx->nested.pi_desc = 3181 (struct pi_desc *)(((void *)map->hva) + 3182 offset_in_page(vmcs12->posted_intr_desc_addr)); 3183 vmcs_write64(POSTED_INTR_DESC_ADDR, 3184 pfn_to_hpa(map->pfn) + offset_in_page(vmcs12->posted_intr_desc_addr)); 3185 } 3186 } 3187 if (nested_vmx_prepare_msr_bitmap(vcpu, vmcs12)) 3188 exec_controls_setbit(vmx, CPU_BASED_USE_MSR_BITMAPS); 3189 else 3190 exec_controls_clearbit(vmx, CPU_BASED_USE_MSR_BITMAPS); 3191 3192 return true; 3193 } 3194 3195 static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu) 3196 { 3197 if (!nested_get_evmcs_page(vcpu)) 3198 return false; 3199 3200 if (is_guest_mode(vcpu) && !nested_get_vmcs12_pages(vcpu)) 3201 return false; 3202 3203 return true; 3204 } 3205 3206 static int nested_vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa) 3207 { 3208 struct vmcs12 *vmcs12; 3209 struct vcpu_vmx *vmx = to_vmx(vcpu); 3210 gpa_t dst; 3211 3212 if (WARN_ON_ONCE(!is_guest_mode(vcpu))) 3213 return 0; 3214 3215 if (WARN_ON_ONCE(vmx->nested.pml_full)) 3216 return 1; 3217 3218 /* 3219 * Check if PML is enabled for the nested guest. Whether eptp bit 6 is 3220 * set is already checked as part of A/D emulation. 3221 */ 3222 vmcs12 = get_vmcs12(vcpu); 3223 if (!nested_cpu_has_pml(vmcs12)) 3224 return 0; 3225 3226 if (vmcs12->guest_pml_index >= PML_ENTITY_NUM) { 3227 vmx->nested.pml_full = true; 3228 return 1; 3229 } 3230 3231 gpa &= ~0xFFFull; 3232 dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index; 3233 3234 if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa, 3235 offset_in_page(dst), sizeof(gpa))) 3236 return 0; 3237 3238 vmcs12->guest_pml_index--; 3239 3240 return 0; 3241 } 3242 3243 /* 3244 * Intel's VMX Instruction Reference specifies a common set of prerequisites 3245 * for running VMX instructions (except VMXON, whose prerequisites are 3246 * slightly different). It also specifies what exception to inject otherwise. 3247 * Note that many of these exceptions have priority over VM exits, so they 3248 * don't have to be checked again here. 3249 */ 3250 static int nested_vmx_check_permission(struct kvm_vcpu *vcpu) 3251 { 3252 if (!to_vmx(vcpu)->nested.vmxon) { 3253 kvm_queue_exception(vcpu, UD_VECTOR); 3254 return 0; 3255 } 3256 3257 if (vmx_get_cpl(vcpu)) { 3258 kvm_inject_gp(vcpu, 0); 3259 return 0; 3260 } 3261 3262 return 1; 3263 } 3264 3265 static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu) 3266 { 3267 u8 rvi = vmx_get_rvi(); 3268 u8 vppr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_PROCPRI); 3269 3270 return ((rvi & 0xf0) > (vppr & 0xf0)); 3271 } 3272 3273 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, 3274 struct vmcs12 *vmcs12); 3275 3276 /* 3277 * If from_vmentry is false, this is being called from state restore (either RSM 3278 * or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume. 3279 * 3280 * Returns: 3281 * NVMX_VMENTRY_SUCCESS: Entered VMX non-root mode 3282 * NVMX_VMENTRY_VMFAIL: Consistency check VMFail 3283 * NVMX_VMENTRY_VMEXIT: Consistency check VMExit 3284 * NVMX_VMENTRY_KVM_INTERNAL_ERROR: KVM internal error 3285 */ 3286 enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, 3287 bool from_vmentry) 3288 { 3289 struct vcpu_vmx *vmx = to_vmx(vcpu); 3290 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 3291 enum vm_entry_failure_code entry_failure_code; 3292 bool evaluate_pending_interrupts; 3293 union vmx_exit_reason exit_reason = { 3294 .basic = EXIT_REASON_INVALID_STATE, 3295 .failed_vmentry = 1, 3296 }; 3297 u32 failed_index; 3298 3299 if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu)) 3300 kvm_vcpu_flush_tlb_current(vcpu); 3301 3302 evaluate_pending_interrupts = exec_controls_get(vmx) & 3303 (CPU_BASED_INTR_WINDOW_EXITING | CPU_BASED_NMI_WINDOW_EXITING); 3304 if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu)) 3305 evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu); 3306 3307 if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) 3308 vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); 3309 if (kvm_mpx_supported() && 3310 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)) 3311 vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS); 3312 3313 /* 3314 * Overwrite vmcs01.GUEST_CR3 with L1's CR3 if EPT is disabled *and* 3315 * nested early checks are disabled. In the event of a "late" VM-Fail, 3316 * i.e. a VM-Fail detected by hardware but not KVM, KVM must unwind its 3317 * software model to the pre-VMEntry host state. When EPT is disabled, 3318 * GUEST_CR3 holds KVM's shadow CR3, not L1's "real" CR3, which causes 3319 * nested_vmx_restore_host_state() to corrupt vcpu->arch.cr3. Stuffing 3320 * vmcs01.GUEST_CR3 results in the unwind naturally setting arch.cr3 to 3321 * the correct value. Smashing vmcs01.GUEST_CR3 is safe because nested 3322 * VM-Exits, and the unwind, reset KVM's MMU, i.e. vmcs01.GUEST_CR3 is 3323 * guaranteed to be overwritten with a shadow CR3 prior to re-entering 3324 * L1. Don't stuff vmcs01.GUEST_CR3 when using nested early checks as 3325 * KVM modifies vcpu->arch.cr3 if and only if the early hardware checks 3326 * pass, and early VM-Fails do not reset KVM's MMU, i.e. the VM-Fail 3327 * path would need to manually save/restore vmcs01.GUEST_CR3. 3328 */ 3329 if (!enable_ept && !nested_early_check) 3330 vmcs_writel(GUEST_CR3, vcpu->arch.cr3); 3331 3332 vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02); 3333 3334 prepare_vmcs02_early(vmx, vmcs12); 3335 3336 if (from_vmentry) { 3337 if (unlikely(!nested_get_vmcs12_pages(vcpu))) { 3338 vmx_switch_vmcs(vcpu, &vmx->vmcs01); 3339 return NVMX_VMENTRY_KVM_INTERNAL_ERROR; 3340 } 3341 3342 if (nested_vmx_check_vmentry_hw(vcpu)) { 3343 vmx_switch_vmcs(vcpu, &vmx->vmcs01); 3344 return NVMX_VMENTRY_VMFAIL; 3345 } 3346 3347 if (nested_vmx_check_guest_state(vcpu, vmcs12, 3348 &entry_failure_code)) { 3349 exit_reason.basic = EXIT_REASON_INVALID_STATE; 3350 vmcs12->exit_qualification = entry_failure_code; 3351 goto vmentry_fail_vmexit; 3352 } 3353 } 3354 3355 enter_guest_mode(vcpu); 3356 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING) 3357 vcpu->arch.tsc_offset += vmcs12->tsc_offset; 3358 3359 if (prepare_vmcs02(vcpu, vmcs12, &entry_failure_code)) { 3360 exit_reason.basic = EXIT_REASON_INVALID_STATE; 3361 vmcs12->exit_qualification = entry_failure_code; 3362 goto vmentry_fail_vmexit_guest_mode; 3363 } 3364 3365 if (from_vmentry) { 3366 failed_index = nested_vmx_load_msr(vcpu, 3367 vmcs12->vm_entry_msr_load_addr, 3368 vmcs12->vm_entry_msr_load_count); 3369 if (failed_index) { 3370 exit_reason.basic = EXIT_REASON_MSR_LOAD_FAIL; 3371 vmcs12->exit_qualification = failed_index; 3372 goto vmentry_fail_vmexit_guest_mode; 3373 } 3374 } else { 3375 /* 3376 * The MMU is not initialized to point at the right entities yet and 3377 * "get pages" would need to read data from the guest (i.e. we will 3378 * need to perform gpa to hpa translation). Request a call 3379 * to nested_get_vmcs12_pages before the next VM-entry. The MSRs 3380 * have already been set at vmentry time and should not be reset. 3381 */ 3382 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); 3383 } 3384 3385 /* 3386 * If L1 had a pending IRQ/NMI until it executed 3387 * VMLAUNCH/VMRESUME which wasn't delivered because it was 3388 * disallowed (e.g. interrupts disabled), L0 needs to 3389 * evaluate if this pending event should cause an exit from L2 3390 * to L1 or delivered directly to L2 (e.g. In case L1 don't 3391 * intercept EXTERNAL_INTERRUPT). 3392 * 3393 * Usually this would be handled by the processor noticing an 3394 * IRQ/NMI window request, or checking RVI during evaluation of 3395 * pending virtual interrupts. However, this setting was done 3396 * on VMCS01 and now VMCS02 is active instead. Thus, we force L0 3397 * to perform pending event evaluation by requesting a KVM_REQ_EVENT. 3398 */ 3399 if (unlikely(evaluate_pending_interrupts)) 3400 kvm_make_request(KVM_REQ_EVENT, vcpu); 3401 3402 /* 3403 * Do not start the preemption timer hrtimer until after we know 3404 * we are successful, so that only nested_vmx_vmexit needs to cancel 3405 * the timer. 3406 */ 3407 vmx->nested.preemption_timer_expired = false; 3408 if (nested_cpu_has_preemption_timer(vmcs12)) { 3409 u64 timer_value = vmx_calc_preemption_timer_value(vcpu); 3410 vmx_start_preemption_timer(vcpu, timer_value); 3411 } 3412 3413 /* 3414 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point 3415 * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet 3416 * returned as far as L1 is concerned. It will only return (and set 3417 * the success flag) when L2 exits (see nested_vmx_vmexit()). 3418 */ 3419 return NVMX_VMENTRY_SUCCESS; 3420 3421 /* 3422 * A failed consistency check that leads to a VMExit during L1's 3423 * VMEnter to L2 is a variation of a normal VMexit, as explained in 3424 * 26.7 "VM-entry failures during or after loading guest state". 3425 */ 3426 vmentry_fail_vmexit_guest_mode: 3427 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING) 3428 vcpu->arch.tsc_offset -= vmcs12->tsc_offset; 3429 leave_guest_mode(vcpu); 3430 3431 vmentry_fail_vmexit: 3432 vmx_switch_vmcs(vcpu, &vmx->vmcs01); 3433 3434 if (!from_vmentry) 3435 return NVMX_VMENTRY_VMEXIT; 3436 3437 load_vmcs12_host_state(vcpu, vmcs12); 3438 vmcs12->vm_exit_reason = exit_reason.full; 3439 if (enable_shadow_vmcs || vmx->nested.hv_evmcs) 3440 vmx->nested.need_vmcs12_to_shadow_sync = true; 3441 return NVMX_VMENTRY_VMEXIT; 3442 } 3443 3444 /* 3445 * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1 3446 * for running an L2 nested guest. 3447 */ 3448 static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) 3449 { 3450 struct vmcs12 *vmcs12; 3451 enum nvmx_vmentry_status status; 3452 struct vcpu_vmx *vmx = to_vmx(vcpu); 3453 u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu); 3454 enum nested_evmptrld_status evmptrld_status; 3455 3456 if (!nested_vmx_check_permission(vcpu)) 3457 return 1; 3458 3459 evmptrld_status = nested_vmx_handle_enlightened_vmptrld(vcpu, launch); 3460 if (evmptrld_status == EVMPTRLD_ERROR) { 3461 kvm_queue_exception(vcpu, UD_VECTOR); 3462 return 1; 3463 } else if (CC(evmptrld_status == EVMPTRLD_VMFAIL)) { 3464 return nested_vmx_failInvalid(vcpu); 3465 } 3466 3467 if (CC(!vmx->nested.hv_evmcs && vmx->nested.current_vmptr == -1ull)) 3468 return nested_vmx_failInvalid(vcpu); 3469 3470 vmcs12 = get_vmcs12(vcpu); 3471 3472 /* 3473 * Can't VMLAUNCH or VMRESUME a shadow VMCS. Despite the fact 3474 * that there *is* a valid VMCS pointer, RFLAGS.CF is set 3475 * rather than RFLAGS.ZF, and no error number is stored to the 3476 * VM-instruction error field. 3477 */ 3478 if (CC(vmcs12->hdr.shadow_vmcs)) 3479 return nested_vmx_failInvalid(vcpu); 3480 3481 if (vmx->nested.hv_evmcs) { 3482 copy_enlightened_to_vmcs12(vmx); 3483 /* Enlightened VMCS doesn't have launch state */ 3484 vmcs12->launch_state = !launch; 3485 } else if (enable_shadow_vmcs) { 3486 copy_shadow_to_vmcs12(vmx); 3487 } 3488 3489 /* 3490 * The nested entry process starts with enforcing various prerequisites 3491 * on vmcs12 as required by the Intel SDM, and act appropriately when 3492 * they fail: As the SDM explains, some conditions should cause the 3493 * instruction to fail, while others will cause the instruction to seem 3494 * to succeed, but return an EXIT_REASON_INVALID_STATE. 3495 * To speed up the normal (success) code path, we should avoid checking 3496 * for misconfigurations which will anyway be caught by the processor 3497 * when using the merged vmcs02. 3498 */ 3499 if (CC(interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS)) 3500 return nested_vmx_fail(vcpu, VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS); 3501 3502 if (CC(vmcs12->launch_state == launch)) 3503 return nested_vmx_fail(vcpu, 3504 launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS 3505 : VMXERR_VMRESUME_NONLAUNCHED_VMCS); 3506 3507 if (nested_vmx_check_controls(vcpu, vmcs12)) 3508 return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); 3509 3510 if (nested_vmx_check_host_state(vcpu, vmcs12)) 3511 return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD); 3512 3513 /* 3514 * We're finally done with prerequisite checking, and can start with 3515 * the nested entry. 3516 */ 3517 vmx->nested.nested_run_pending = 1; 3518 vmx->nested.has_preemption_timer_deadline = false; 3519 status = nested_vmx_enter_non_root_mode(vcpu, true); 3520 if (unlikely(status != NVMX_VMENTRY_SUCCESS)) 3521 goto vmentry_failed; 3522 3523 /* Emulate processing of posted interrupts on VM-Enter. */ 3524 if (nested_cpu_has_posted_intr(vmcs12) && 3525 kvm_apic_has_interrupt(vcpu) == vmx->nested.posted_intr_nv) { 3526 vmx->nested.pi_pending = true; 3527 kvm_make_request(KVM_REQ_EVENT, vcpu); 3528 kvm_apic_clear_irr(vcpu, vmx->nested.posted_intr_nv); 3529 } 3530 3531 /* Hide L1D cache contents from the nested guest. */ 3532 vmx->vcpu.arch.l1tf_flush_l1d = true; 3533 3534 /* 3535 * Must happen outside of nested_vmx_enter_non_root_mode() as it will 3536 * also be used as part of restoring nVMX state for 3537 * snapshot restore (migration). 3538 * 3539 * In this flow, it is assumed that vmcs12 cache was 3540 * trasferred as part of captured nVMX state and should 3541 * therefore not be read from guest memory (which may not 3542 * exist on destination host yet). 3543 */ 3544 nested_cache_shadow_vmcs12(vcpu, vmcs12); 3545 3546 switch (vmcs12->guest_activity_state) { 3547 case GUEST_ACTIVITY_HLT: 3548 /* 3549 * If we're entering a halted L2 vcpu and the L2 vcpu won't be 3550 * awakened by event injection or by an NMI-window VM-exit or 3551 * by an interrupt-window VM-exit, halt the vcpu. 3552 */ 3553 if (!(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) && 3554 !nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING) && 3555 !(nested_cpu_has(vmcs12, CPU_BASED_INTR_WINDOW_EXITING) && 3556 (vmcs12->guest_rflags & X86_EFLAGS_IF))) { 3557 vmx->nested.nested_run_pending = 0; 3558 return kvm_vcpu_halt(vcpu); 3559 } 3560 break; 3561 case GUEST_ACTIVITY_WAIT_SIPI: 3562 vmx->nested.nested_run_pending = 0; 3563 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; 3564 break; 3565 default: 3566 break; 3567 } 3568 3569 return 1; 3570 3571 vmentry_failed: 3572 vmx->nested.nested_run_pending = 0; 3573 if (status == NVMX_VMENTRY_KVM_INTERNAL_ERROR) 3574 return 0; 3575 if (status == NVMX_VMENTRY_VMEXIT) 3576 return 1; 3577 WARN_ON_ONCE(status != NVMX_VMENTRY_VMFAIL); 3578 return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); 3579 } 3580 3581 /* 3582 * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date 3583 * because L2 may have changed some cr0 bits directly (CR0_GUEST_HOST_MASK). 3584 * This function returns the new value we should put in vmcs12.guest_cr0. 3585 * It's not enough to just return the vmcs02 GUEST_CR0. Rather, 3586 * 1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now 3587 * available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0 3588 * didn't trap the bit, because if L1 did, so would L0). 3589 * 2. Bits that L1 asked to trap (and therefore L0 also did) could not have 3590 * been modified by L2, and L1 knows it. So just leave the old value of 3591 * the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0 3592 * isn't relevant, because if L0 traps this bit it can set it to anything. 3593 * 3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have 3594 * changed these bits, and therefore they need to be updated, but L0 3595 * didn't necessarily allow them to be changed in GUEST_CR0 - and rather 3596 * put them in vmcs02 CR0_READ_SHADOW. So take these bits from there. 3597 */ 3598 static inline unsigned long 3599 vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) 3600 { 3601 return 3602 /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) | 3603 /*2*/ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) | 3604 /*3*/ (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask | 3605 vcpu->arch.cr0_guest_owned_bits)); 3606 } 3607 3608 static inline unsigned long 3609 vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) 3610 { 3611 return 3612 /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) | 3613 /*2*/ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) | 3614 /*3*/ (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask | 3615 vcpu->arch.cr4_guest_owned_bits)); 3616 } 3617 3618 static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu, 3619 struct vmcs12 *vmcs12) 3620 { 3621 u32 idt_vectoring; 3622 unsigned int nr; 3623 3624 if (vcpu->arch.exception.injected) { 3625 nr = vcpu->arch.exception.nr; 3626 idt_vectoring = nr | VECTORING_INFO_VALID_MASK; 3627 3628 if (kvm_exception_is_soft(nr)) { 3629 vmcs12->vm_exit_instruction_len = 3630 vcpu->arch.event_exit_inst_len; 3631 idt_vectoring |= INTR_TYPE_SOFT_EXCEPTION; 3632 } else 3633 idt_vectoring |= INTR_TYPE_HARD_EXCEPTION; 3634 3635 if (vcpu->arch.exception.has_error_code) { 3636 idt_vectoring |= VECTORING_INFO_DELIVER_CODE_MASK; 3637 vmcs12->idt_vectoring_error_code = 3638 vcpu->arch.exception.error_code; 3639 } 3640 3641 vmcs12->idt_vectoring_info_field = idt_vectoring; 3642 } else if (vcpu->arch.nmi_injected) { 3643 vmcs12->idt_vectoring_info_field = 3644 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR; 3645 } else if (vcpu->arch.interrupt.injected) { 3646 nr = vcpu->arch.interrupt.nr; 3647 idt_vectoring = nr | VECTORING_INFO_VALID_MASK; 3648 3649 if (vcpu->arch.interrupt.soft) { 3650 idt_vectoring |= INTR_TYPE_SOFT_INTR; 3651 vmcs12->vm_entry_instruction_len = 3652 vcpu->arch.event_exit_inst_len; 3653 } else 3654 idt_vectoring |= INTR_TYPE_EXT_INTR; 3655 3656 vmcs12->idt_vectoring_info_field = idt_vectoring; 3657 } 3658 } 3659 3660 3661 void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu) 3662 { 3663 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 3664 gfn_t gfn; 3665 3666 /* 3667 * Don't need to mark the APIC access page dirty; it is never 3668 * written to by the CPU during APIC virtualization. 3669 */ 3670 3671 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) { 3672 gfn = vmcs12->virtual_apic_page_addr >> PAGE_SHIFT; 3673 kvm_vcpu_mark_page_dirty(vcpu, gfn); 3674 } 3675 3676 if (nested_cpu_has_posted_intr(vmcs12)) { 3677 gfn = vmcs12->posted_intr_desc_addr >> PAGE_SHIFT; 3678 kvm_vcpu_mark_page_dirty(vcpu, gfn); 3679 } 3680 } 3681 3682 static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu) 3683 { 3684 struct vcpu_vmx *vmx = to_vmx(vcpu); 3685 int max_irr; 3686 void *vapic_page; 3687 u16 status; 3688 3689 if (!vmx->nested.pi_desc || !vmx->nested.pi_pending) 3690 return; 3691 3692 vmx->nested.pi_pending = false; 3693 if (!pi_test_and_clear_on(vmx->nested.pi_desc)) 3694 return; 3695 3696 max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256); 3697 if (max_irr != 256) { 3698 vapic_page = vmx->nested.virtual_apic_map.hva; 3699 if (!vapic_page) 3700 return; 3701 3702 __kvm_apic_update_irr(vmx->nested.pi_desc->pir, 3703 vapic_page, &max_irr); 3704 status = vmcs_read16(GUEST_INTR_STATUS); 3705 if ((u8)max_irr > ((u8)status & 0xff)) { 3706 status &= ~0xff; 3707 status |= (u8)max_irr; 3708 vmcs_write16(GUEST_INTR_STATUS, status); 3709 } 3710 } 3711 3712 nested_mark_vmcs12_pages_dirty(vcpu); 3713 } 3714 3715 static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu, 3716 unsigned long exit_qual) 3717 { 3718 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 3719 unsigned int nr = vcpu->arch.exception.nr; 3720 u32 intr_info = nr | INTR_INFO_VALID_MASK; 3721 3722 if (vcpu->arch.exception.has_error_code) { 3723 vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code; 3724 intr_info |= INTR_INFO_DELIVER_CODE_MASK; 3725 } 3726 3727 if (kvm_exception_is_soft(nr)) 3728 intr_info |= INTR_TYPE_SOFT_EXCEPTION; 3729 else 3730 intr_info |= INTR_TYPE_HARD_EXCEPTION; 3731 3732 if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) && 3733 vmx_get_nmi_mask(vcpu)) 3734 intr_info |= INTR_INFO_UNBLOCK_NMI; 3735 3736 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual); 3737 } 3738 3739 /* 3740 * Returns true if a debug trap is pending delivery. 3741 * 3742 * In KVM, debug traps bear an exception payload. As such, the class of a #DB 3743 * exception may be inferred from the presence of an exception payload. 3744 */ 3745 static inline bool vmx_pending_dbg_trap(struct kvm_vcpu *vcpu) 3746 { 3747 return vcpu->arch.exception.pending && 3748 vcpu->arch.exception.nr == DB_VECTOR && 3749 vcpu->arch.exception.payload; 3750 } 3751 3752 /* 3753 * Certain VM-exits set the 'pending debug exceptions' field to indicate a 3754 * recognized #DB (data or single-step) that has yet to be delivered. Since KVM 3755 * represents these debug traps with a payload that is said to be compatible 3756 * with the 'pending debug exceptions' field, write the payload to the VMCS 3757 * field if a VM-exit is delivered before the debug trap. 3758 */ 3759 static void nested_vmx_update_pending_dbg(struct kvm_vcpu *vcpu) 3760 { 3761 if (vmx_pending_dbg_trap(vcpu)) 3762 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, 3763 vcpu->arch.exception.payload); 3764 } 3765 3766 static bool nested_vmx_preemption_timer_pending(struct kvm_vcpu *vcpu) 3767 { 3768 return nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) && 3769 to_vmx(vcpu)->nested.preemption_timer_expired; 3770 } 3771 3772 static int vmx_check_nested_events(struct kvm_vcpu *vcpu) 3773 { 3774 struct vcpu_vmx *vmx = to_vmx(vcpu); 3775 unsigned long exit_qual; 3776 bool block_nested_events = 3777 vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu); 3778 bool mtf_pending = vmx->nested.mtf_pending; 3779 struct kvm_lapic *apic = vcpu->arch.apic; 3780 3781 /* 3782 * Clear the MTF state. If a higher priority VM-exit is delivered first, 3783 * this state is discarded. 3784 */ 3785 if (!block_nested_events) 3786 vmx->nested.mtf_pending = false; 3787 3788 if (lapic_in_kernel(vcpu) && 3789 test_bit(KVM_APIC_INIT, &apic->pending_events)) { 3790 if (block_nested_events) 3791 return -EBUSY; 3792 nested_vmx_update_pending_dbg(vcpu); 3793 clear_bit(KVM_APIC_INIT, &apic->pending_events); 3794 if (vcpu->arch.mp_state != KVM_MP_STATE_INIT_RECEIVED) 3795 nested_vmx_vmexit(vcpu, EXIT_REASON_INIT_SIGNAL, 0, 0); 3796 return 0; 3797 } 3798 3799 if (lapic_in_kernel(vcpu) && 3800 test_bit(KVM_APIC_SIPI, &apic->pending_events)) { 3801 if (block_nested_events) 3802 return -EBUSY; 3803 3804 clear_bit(KVM_APIC_SIPI, &apic->pending_events); 3805 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) 3806 nested_vmx_vmexit(vcpu, EXIT_REASON_SIPI_SIGNAL, 0, 3807 apic->sipi_vector & 0xFFUL); 3808 return 0; 3809 } 3810 3811 /* 3812 * Process any exceptions that are not debug traps before MTF. 3813 */ 3814 if (vcpu->arch.exception.pending && !vmx_pending_dbg_trap(vcpu)) { 3815 if (block_nested_events) 3816 return -EBUSY; 3817 if (!nested_vmx_check_exception(vcpu, &exit_qual)) 3818 goto no_vmexit; 3819 nested_vmx_inject_exception_vmexit(vcpu, exit_qual); 3820 return 0; 3821 } 3822 3823 if (mtf_pending) { 3824 if (block_nested_events) 3825 return -EBUSY; 3826 nested_vmx_update_pending_dbg(vcpu); 3827 nested_vmx_vmexit(vcpu, EXIT_REASON_MONITOR_TRAP_FLAG, 0, 0); 3828 return 0; 3829 } 3830 3831 if (vcpu->arch.exception.pending) { 3832 if (block_nested_events) 3833 return -EBUSY; 3834 if (!nested_vmx_check_exception(vcpu, &exit_qual)) 3835 goto no_vmexit; 3836 nested_vmx_inject_exception_vmexit(vcpu, exit_qual); 3837 return 0; 3838 } 3839 3840 if (nested_vmx_preemption_timer_pending(vcpu)) { 3841 if (block_nested_events) 3842 return -EBUSY; 3843 nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0); 3844 return 0; 3845 } 3846 3847 if (vcpu->arch.smi_pending && !is_smm(vcpu)) { 3848 if (block_nested_events) 3849 return -EBUSY; 3850 goto no_vmexit; 3851 } 3852 3853 if (vcpu->arch.nmi_pending && !vmx_nmi_blocked(vcpu)) { 3854 if (block_nested_events) 3855 return -EBUSY; 3856 if (!nested_exit_on_nmi(vcpu)) 3857 goto no_vmexit; 3858 3859 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, 3860 NMI_VECTOR | INTR_TYPE_NMI_INTR | 3861 INTR_INFO_VALID_MASK, 0); 3862 /* 3863 * The NMI-triggered VM exit counts as injection: 3864 * clear this one and block further NMIs. 3865 */ 3866 vcpu->arch.nmi_pending = 0; 3867 vmx_set_nmi_mask(vcpu, true); 3868 return 0; 3869 } 3870 3871 if (kvm_cpu_has_interrupt(vcpu) && !vmx_interrupt_blocked(vcpu)) { 3872 if (block_nested_events) 3873 return -EBUSY; 3874 if (!nested_exit_on_intr(vcpu)) 3875 goto no_vmexit; 3876 nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0); 3877 return 0; 3878 } 3879 3880 no_vmexit: 3881 vmx_complete_nested_posted_interrupt(vcpu); 3882 return 0; 3883 } 3884 3885 static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu) 3886 { 3887 ktime_t remaining = 3888 hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer); 3889 u64 value; 3890 3891 if (ktime_to_ns(remaining) <= 0) 3892 return 0; 3893 3894 value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz; 3895 do_div(value, 1000000); 3896 return value >> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; 3897 } 3898 3899 static bool is_vmcs12_ext_field(unsigned long field) 3900 { 3901 switch (field) { 3902 case GUEST_ES_SELECTOR: 3903 case GUEST_CS_SELECTOR: 3904 case GUEST_SS_SELECTOR: 3905 case GUEST_DS_SELECTOR: 3906 case GUEST_FS_SELECTOR: 3907 case GUEST_GS_SELECTOR: 3908 case GUEST_LDTR_SELECTOR: 3909 case GUEST_TR_SELECTOR: 3910 case GUEST_ES_LIMIT: 3911 case GUEST_CS_LIMIT: 3912 case GUEST_SS_LIMIT: 3913 case GUEST_DS_LIMIT: 3914 case GUEST_FS_LIMIT: 3915 case GUEST_GS_LIMIT: 3916 case GUEST_LDTR_LIMIT: 3917 case GUEST_TR_LIMIT: 3918 case GUEST_GDTR_LIMIT: 3919 case GUEST_IDTR_LIMIT: 3920 case GUEST_ES_AR_BYTES: 3921 case GUEST_DS_AR_BYTES: 3922 case GUEST_FS_AR_BYTES: 3923 case GUEST_GS_AR_BYTES: 3924 case GUEST_LDTR_AR_BYTES: 3925 case GUEST_TR_AR_BYTES: 3926 case GUEST_ES_BASE: 3927 case GUEST_CS_BASE: 3928 case GUEST_SS_BASE: 3929 case GUEST_DS_BASE: 3930 case GUEST_FS_BASE: 3931 case GUEST_GS_BASE: 3932 case GUEST_LDTR_BASE: 3933 case GUEST_TR_BASE: 3934 case GUEST_GDTR_BASE: 3935 case GUEST_IDTR_BASE: 3936 case GUEST_PENDING_DBG_EXCEPTIONS: 3937 case GUEST_BNDCFGS: 3938 return true; 3939 default: 3940 break; 3941 } 3942 3943 return false; 3944 } 3945 3946 static void sync_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu, 3947 struct vmcs12 *vmcs12) 3948 { 3949 struct vcpu_vmx *vmx = to_vmx(vcpu); 3950 3951 vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR); 3952 vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR); 3953 vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR); 3954 vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR); 3955 vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR); 3956 vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR); 3957 vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR); 3958 vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR); 3959 vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT); 3960 vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT); 3961 vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT); 3962 vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT); 3963 vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT); 3964 vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT); 3965 vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT); 3966 vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT); 3967 vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT); 3968 vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT); 3969 vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES); 3970 vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES); 3971 vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES); 3972 vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES); 3973 vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES); 3974 vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES); 3975 vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE); 3976 vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE); 3977 vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE); 3978 vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE); 3979 vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE); 3980 vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE); 3981 vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE); 3982 vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE); 3983 vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE); 3984 vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE); 3985 vmcs12->guest_pending_dbg_exceptions = 3986 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS); 3987 if (kvm_mpx_supported()) 3988 vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS); 3989 3990 vmx->nested.need_sync_vmcs02_to_vmcs12_rare = false; 3991 } 3992 3993 static void copy_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu, 3994 struct vmcs12 *vmcs12) 3995 { 3996 struct vcpu_vmx *vmx = to_vmx(vcpu); 3997 int cpu; 3998 3999 if (!vmx->nested.need_sync_vmcs02_to_vmcs12_rare) 4000 return; 4001 4002 4003 WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01); 4004 4005 cpu = get_cpu(); 4006 vmx->loaded_vmcs = &vmx->nested.vmcs02; 4007 vmx_vcpu_load_vmcs(vcpu, cpu, &vmx->vmcs01); 4008 4009 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12); 4010 4011 vmx->loaded_vmcs = &vmx->vmcs01; 4012 vmx_vcpu_load_vmcs(vcpu, cpu, &vmx->nested.vmcs02); 4013 put_cpu(); 4014 } 4015 4016 /* 4017 * Update the guest state fields of vmcs12 to reflect changes that 4018 * occurred while L2 was running. (The "IA-32e mode guest" bit of the 4019 * VM-entry controls is also updated, since this is really a guest 4020 * state bit.) 4021 */ 4022 static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) 4023 { 4024 struct vcpu_vmx *vmx = to_vmx(vcpu); 4025 4026 if (vmx->nested.hv_evmcs) 4027 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12); 4028 4029 vmx->nested.need_sync_vmcs02_to_vmcs12_rare = !vmx->nested.hv_evmcs; 4030 4031 vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12); 4032 vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12); 4033 4034 vmcs12->guest_rsp = kvm_rsp_read(vcpu); 4035 vmcs12->guest_rip = kvm_rip_read(vcpu); 4036 vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS); 4037 4038 vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES); 4039 vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES); 4040 4041 vmcs12->guest_interruptibility_info = 4042 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); 4043 4044 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) 4045 vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT; 4046 else if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) 4047 vmcs12->guest_activity_state = GUEST_ACTIVITY_WAIT_SIPI; 4048 else 4049 vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE; 4050 4051 if (nested_cpu_has_preemption_timer(vmcs12) && 4052 vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER && 4053 !vmx->nested.nested_run_pending) 4054 vmcs12->vmx_preemption_timer_value = 4055 vmx_get_preemption_timer_value(vcpu); 4056 4057 /* 4058 * In some cases (usually, nested EPT), L2 is allowed to change its 4059 * own CR3 without exiting. If it has changed it, we must keep it. 4060 * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined 4061 * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12. 4062 * 4063 * Additionally, restore L2's PDPTR to vmcs12. 4064 */ 4065 if (enable_ept) { 4066 vmcs12->guest_cr3 = vmcs_readl(GUEST_CR3); 4067 if (nested_cpu_has_ept(vmcs12) && is_pae_paging(vcpu)) { 4068 vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0); 4069 vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1); 4070 vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2); 4071 vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3); 4072 } 4073 } 4074 4075 vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS); 4076 4077 if (nested_cpu_has_vid(vmcs12)) 4078 vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS); 4079 4080 vmcs12->vm_entry_controls = 4081 (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) | 4082 (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE); 4083 4084 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) 4085 kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7); 4086 4087 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER) 4088 vmcs12->guest_ia32_efer = vcpu->arch.efer; 4089 } 4090 4091 /* 4092 * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits 4093 * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12), 4094 * and this function updates it to reflect the changes to the guest state while 4095 * L2 was running (and perhaps made some exits which were handled directly by L0 4096 * without going back to L1), and to reflect the exit reason. 4097 * Note that we do not have to copy here all VMCS fields, just those that 4098 * could have changed by the L2 guest or the exit - i.e., the guest-state and 4099 * exit-information fields only. Other fields are modified by L1 with VMWRITE, 4100 * which already writes to vmcs12 directly. 4101 */ 4102 static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, 4103 u32 vm_exit_reason, u32 exit_intr_info, 4104 unsigned long exit_qualification) 4105 { 4106 /* update exit information fields: */ 4107 vmcs12->vm_exit_reason = vm_exit_reason; 4108 vmcs12->exit_qualification = exit_qualification; 4109 vmcs12->vm_exit_intr_info = exit_intr_info; 4110 4111 vmcs12->idt_vectoring_info_field = 0; 4112 vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); 4113 vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 4114 4115 if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) { 4116 vmcs12->launch_state = 1; 4117 4118 /* vm_entry_intr_info_field is cleared on exit. Emulate this 4119 * instead of reading the real value. */ 4120 vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK; 4121 4122 /* 4123 * Transfer the event that L0 or L1 may wanted to inject into 4124 * L2 to IDT_VECTORING_INFO_FIELD. 4125 */ 4126 vmcs12_save_pending_event(vcpu, vmcs12); 4127 4128 /* 4129 * According to spec, there's no need to store the guest's 4130 * MSRs if the exit is due to a VM-entry failure that occurs 4131 * during or after loading the guest state. Since this exit 4132 * does not fall in that category, we need to save the MSRs. 4133 */ 4134 if (nested_vmx_store_msr(vcpu, 4135 vmcs12->vm_exit_msr_store_addr, 4136 vmcs12->vm_exit_msr_store_count)) 4137 nested_vmx_abort(vcpu, 4138 VMX_ABORT_SAVE_GUEST_MSR_FAIL); 4139 } 4140 4141 /* 4142 * Drop what we picked up for L2 via vmx_complete_interrupts. It is 4143 * preserved above and would only end up incorrectly in L1. 4144 */ 4145 vcpu->arch.nmi_injected = false; 4146 kvm_clear_exception_queue(vcpu); 4147 kvm_clear_interrupt_queue(vcpu); 4148 } 4149 4150 /* 4151 * A part of what we need to when the nested L2 guest exits and we want to 4152 * run its L1 parent, is to reset L1's guest state to the host state specified 4153 * in vmcs12. 4154 * This function is to be called not only on normal nested exit, but also on 4155 * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry 4156 * Failures During or After Loading Guest State"). 4157 * This function should be called when the active VMCS is L1's (vmcs01). 4158 */ 4159 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, 4160 struct vmcs12 *vmcs12) 4161 { 4162 enum vm_entry_failure_code ignored; 4163 struct kvm_segment seg; 4164 4165 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) 4166 vcpu->arch.efer = vmcs12->host_ia32_efer; 4167 else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) 4168 vcpu->arch.efer |= (EFER_LMA | EFER_LME); 4169 else 4170 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME); 4171 vmx_set_efer(vcpu, vcpu->arch.efer); 4172 4173 kvm_rsp_write(vcpu, vmcs12->host_rsp); 4174 kvm_rip_write(vcpu, vmcs12->host_rip); 4175 vmx_set_rflags(vcpu, X86_EFLAGS_FIXED); 4176 vmx_set_interrupt_shadow(vcpu, 0); 4177 4178 /* 4179 * Note that calling vmx_set_cr0 is important, even if cr0 hasn't 4180 * actually changed, because vmx_set_cr0 refers to efer set above. 4181 * 4182 * CR0_GUEST_HOST_MASK is already set in the original vmcs01 4183 * (KVM doesn't change it); 4184 */ 4185 vcpu->arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS; 4186 vmx_set_cr0(vcpu, vmcs12->host_cr0); 4187 4188 /* Same as above - no reason to call set_cr4_guest_host_mask(). */ 4189 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); 4190 vmx_set_cr4(vcpu, vmcs12->host_cr4); 4191 4192 nested_ept_uninit_mmu_context(vcpu); 4193 4194 /* 4195 * Only PDPTE load can fail as the value of cr3 was checked on entry and 4196 * couldn't have changed. 4197 */ 4198 if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &ignored)) 4199 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL); 4200 4201 nested_vmx_transition_tlb_flush(vcpu, vmcs12, false); 4202 4203 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs); 4204 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp); 4205 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip); 4206 vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base); 4207 vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base); 4208 vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF); 4209 vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF); 4210 4211 /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */ 4212 if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS) 4213 vmcs_write64(GUEST_BNDCFGS, 0); 4214 4215 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) { 4216 vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat); 4217 vcpu->arch.pat = vmcs12->host_ia32_pat; 4218 } 4219 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) 4220 WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL, 4221 vmcs12->host_ia32_perf_global_ctrl)); 4222 4223 /* Set L1 segment info according to Intel SDM 4224 27.5.2 Loading Host Segment and Descriptor-Table Registers */ 4225 seg = (struct kvm_segment) { 4226 .base = 0, 4227 .limit = 0xFFFFFFFF, 4228 .selector = vmcs12->host_cs_selector, 4229 .type = 11, 4230 .present = 1, 4231 .s = 1, 4232 .g = 1 4233 }; 4234 if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) 4235 seg.l = 1; 4236 else 4237 seg.db = 1; 4238 vmx_set_segment(vcpu, &seg, VCPU_SREG_CS); 4239 seg = (struct kvm_segment) { 4240 .base = 0, 4241 .limit = 0xFFFFFFFF, 4242 .type = 3, 4243 .present = 1, 4244 .s = 1, 4245 .db = 1, 4246 .g = 1 4247 }; 4248 seg.selector = vmcs12->host_ds_selector; 4249 vmx_set_segment(vcpu, &seg, VCPU_SREG_DS); 4250 seg.selector = vmcs12->host_es_selector; 4251 vmx_set_segment(vcpu, &seg, VCPU_SREG_ES); 4252 seg.selector = vmcs12->host_ss_selector; 4253 vmx_set_segment(vcpu, &seg, VCPU_SREG_SS); 4254 seg.selector = vmcs12->host_fs_selector; 4255 seg.base = vmcs12->host_fs_base; 4256 vmx_set_segment(vcpu, &seg, VCPU_SREG_FS); 4257 seg.selector = vmcs12->host_gs_selector; 4258 seg.base = vmcs12->host_gs_base; 4259 vmx_set_segment(vcpu, &seg, VCPU_SREG_GS); 4260 seg = (struct kvm_segment) { 4261 .base = vmcs12->host_tr_base, 4262 .limit = 0x67, 4263 .selector = vmcs12->host_tr_selector, 4264 .type = 11, 4265 .present = 1 4266 }; 4267 vmx_set_segment(vcpu, &seg, VCPU_SREG_TR); 4268 4269 kvm_set_dr(vcpu, 7, 0x400); 4270 vmcs_write64(GUEST_IA32_DEBUGCTL, 0); 4271 4272 if (cpu_has_vmx_msr_bitmap()) 4273 vmx_update_msr_bitmap(vcpu); 4274 4275 if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr, 4276 vmcs12->vm_exit_msr_load_count)) 4277 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL); 4278 } 4279 4280 static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx) 4281 { 4282 struct vmx_uret_msr *efer_msr; 4283 unsigned int i; 4284 4285 if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_EFER) 4286 return vmcs_read64(GUEST_IA32_EFER); 4287 4288 if (cpu_has_load_ia32_efer()) 4289 return host_efer; 4290 4291 for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) { 4292 if (vmx->msr_autoload.guest.val[i].index == MSR_EFER) 4293 return vmx->msr_autoload.guest.val[i].value; 4294 } 4295 4296 efer_msr = vmx_find_uret_msr(vmx, MSR_EFER); 4297 if (efer_msr) 4298 return efer_msr->data; 4299 4300 return host_efer; 4301 } 4302 4303 static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu) 4304 { 4305 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 4306 struct vcpu_vmx *vmx = to_vmx(vcpu); 4307 struct vmx_msr_entry g, h; 4308 gpa_t gpa; 4309 u32 i, j; 4310 4311 vcpu->arch.pat = vmcs_read64(GUEST_IA32_PAT); 4312 4313 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) { 4314 /* 4315 * L1's host DR7 is lost if KVM_GUESTDBG_USE_HW_BP is set 4316 * as vmcs01.GUEST_DR7 contains a userspace defined value 4317 * and vcpu->arch.dr7 is not squirreled away before the 4318 * nested VMENTER (not worth adding a variable in nested_vmx). 4319 */ 4320 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) 4321 kvm_set_dr(vcpu, 7, DR7_FIXED_1); 4322 else 4323 WARN_ON(kvm_set_dr(vcpu, 7, vmcs_readl(GUEST_DR7))); 4324 } 4325 4326 /* 4327 * Note that calling vmx_set_{efer,cr0,cr4} is important as they 4328 * handle a variety of side effects to KVM's software model. 4329 */ 4330 vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx)); 4331 4332 vcpu->arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS; 4333 vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW)); 4334 4335 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); 4336 vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW)); 4337 4338 nested_ept_uninit_mmu_context(vcpu); 4339 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); 4340 kvm_register_mark_available(vcpu, VCPU_EXREG_CR3); 4341 4342 /* 4343 * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs 4344 * from vmcs01 (if necessary). The PDPTRs are not loaded on 4345 * VMFail, like everything else we just need to ensure our 4346 * software model is up-to-date. 4347 */ 4348 if (enable_ept && is_pae_paging(vcpu)) 4349 ept_save_pdptrs(vcpu); 4350 4351 kvm_mmu_reset_context(vcpu); 4352 4353 if (cpu_has_vmx_msr_bitmap()) 4354 vmx_update_msr_bitmap(vcpu); 4355 4356 /* 4357 * This nasty bit of open coding is a compromise between blindly 4358 * loading L1's MSRs using the exit load lists (incorrect emulation 4359 * of VMFail), leaving the nested VM's MSRs in the software model 4360 * (incorrect behavior) and snapshotting the modified MSRs (too 4361 * expensive since the lists are unbound by hardware). For each 4362 * MSR that was (prematurely) loaded from the nested VMEntry load 4363 * list, reload it from the exit load list if it exists and differs 4364 * from the guest value. The intent is to stuff host state as 4365 * silently as possible, not to fully process the exit load list. 4366 */ 4367 for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) { 4368 gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g)); 4369 if (kvm_vcpu_read_guest(vcpu, gpa, &g, sizeof(g))) { 4370 pr_debug_ratelimited( 4371 "%s read MSR index failed (%u, 0x%08llx)\n", 4372 __func__, i, gpa); 4373 goto vmabort; 4374 } 4375 4376 for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) { 4377 gpa = vmcs12->vm_exit_msr_load_addr + (j * sizeof(h)); 4378 if (kvm_vcpu_read_guest(vcpu, gpa, &h, sizeof(h))) { 4379 pr_debug_ratelimited( 4380 "%s read MSR failed (%u, 0x%08llx)\n", 4381 __func__, j, gpa); 4382 goto vmabort; 4383 } 4384 if (h.index != g.index) 4385 continue; 4386 if (h.value == g.value) 4387 break; 4388 4389 if (nested_vmx_load_msr_check(vcpu, &h)) { 4390 pr_debug_ratelimited( 4391 "%s check failed (%u, 0x%x, 0x%x)\n", 4392 __func__, j, h.index, h.reserved); 4393 goto vmabort; 4394 } 4395 4396 if (kvm_set_msr(vcpu, h.index, h.value)) { 4397 pr_debug_ratelimited( 4398 "%s WRMSR failed (%u, 0x%x, 0x%llx)\n", 4399 __func__, j, h.index, h.value); 4400 goto vmabort; 4401 } 4402 } 4403 } 4404 4405 return; 4406 4407 vmabort: 4408 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL); 4409 } 4410 4411 /* 4412 * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1 4413 * and modify vmcs12 to make it see what it would expect to see there if 4414 * L2 was its real guest. Must only be called when in L2 (is_guest_mode()) 4415 */ 4416 void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason, 4417 u32 exit_intr_info, unsigned long exit_qualification) 4418 { 4419 struct vcpu_vmx *vmx = to_vmx(vcpu); 4420 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 4421 4422 /* trying to cancel vmlaunch/vmresume is a bug */ 4423 WARN_ON_ONCE(vmx->nested.nested_run_pending); 4424 4425 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); 4426 4427 /* Service the TLB flush request for L2 before switching to L1. */ 4428 if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu)) 4429 kvm_vcpu_flush_tlb_current(vcpu); 4430 4431 /* 4432 * VCPU_EXREG_PDPTR will be clobbered in arch/x86/kvm/vmx/vmx.h between 4433 * now and the new vmentry. Ensure that the VMCS02 PDPTR fields are 4434 * up-to-date before switching to L1. 4435 */ 4436 if (enable_ept && is_pae_paging(vcpu)) 4437 vmx_ept_load_pdptrs(vcpu); 4438 4439 leave_guest_mode(vcpu); 4440 4441 if (nested_cpu_has_preemption_timer(vmcs12)) 4442 hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer); 4443 4444 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING) 4445 vcpu->arch.tsc_offset -= vmcs12->tsc_offset; 4446 4447 if (likely(!vmx->fail)) { 4448 sync_vmcs02_to_vmcs12(vcpu, vmcs12); 4449 4450 if (vm_exit_reason != -1) 4451 prepare_vmcs12(vcpu, vmcs12, vm_exit_reason, 4452 exit_intr_info, exit_qualification); 4453 4454 /* 4455 * Must happen outside of sync_vmcs02_to_vmcs12() as it will 4456 * also be used to capture vmcs12 cache as part of 4457 * capturing nVMX state for snapshot (migration). 4458 * 4459 * Otherwise, this flush will dirty guest memory at a 4460 * point it is already assumed by user-space to be 4461 * immutable. 4462 */ 4463 nested_flush_cached_shadow_vmcs12(vcpu, vmcs12); 4464 } else { 4465 /* 4466 * The only expected VM-instruction error is "VM entry with 4467 * invalid control field(s)." Anything else indicates a 4468 * problem with L0. And we should never get here with a 4469 * VMFail of any type if early consistency checks are enabled. 4470 */ 4471 WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) != 4472 VMXERR_ENTRY_INVALID_CONTROL_FIELD); 4473 WARN_ON_ONCE(nested_early_check); 4474 } 4475 4476 vmx_switch_vmcs(vcpu, &vmx->vmcs01); 4477 4478 /* Update any VMCS fields that might have changed while L2 ran */ 4479 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); 4480 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); 4481 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); 4482 if (vmx->nested.l1_tpr_threshold != -1) 4483 vmcs_write32(TPR_THRESHOLD, vmx->nested.l1_tpr_threshold); 4484 4485 if (kvm_has_tsc_control) 4486 decache_tsc_multiplier(vmx); 4487 4488 if (vmx->nested.change_vmcs01_virtual_apic_mode) { 4489 vmx->nested.change_vmcs01_virtual_apic_mode = false; 4490 vmx_set_virtual_apic_mode(vcpu); 4491 } 4492 4493 if (vmx->nested.update_vmcs01_cpu_dirty_logging) { 4494 vmx->nested.update_vmcs01_cpu_dirty_logging = false; 4495 vmx_update_cpu_dirty_logging(vcpu); 4496 } 4497 4498 /* Unpin physical memory we referred to in vmcs02 */ 4499 if (vmx->nested.apic_access_page) { 4500 kvm_release_page_clean(vmx->nested.apic_access_page); 4501 vmx->nested.apic_access_page = NULL; 4502 } 4503 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true); 4504 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true); 4505 vmx->nested.pi_desc = NULL; 4506 4507 if (vmx->nested.reload_vmcs01_apic_access_page) { 4508 vmx->nested.reload_vmcs01_apic_access_page = false; 4509 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); 4510 } 4511 4512 if ((vm_exit_reason != -1) && 4513 (enable_shadow_vmcs || vmx->nested.hv_evmcs)) 4514 vmx->nested.need_vmcs12_to_shadow_sync = true; 4515 4516 /* in case we halted in L2 */ 4517 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 4518 4519 if (likely(!vmx->fail)) { 4520 if ((u16)vm_exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT && 4521 nested_exit_intr_ack_set(vcpu)) { 4522 int irq = kvm_cpu_get_interrupt(vcpu); 4523 WARN_ON(irq < 0); 4524 vmcs12->vm_exit_intr_info = irq | 4525 INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR; 4526 } 4527 4528 if (vm_exit_reason != -1) 4529 trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason, 4530 vmcs12->exit_qualification, 4531 vmcs12->idt_vectoring_info_field, 4532 vmcs12->vm_exit_intr_info, 4533 vmcs12->vm_exit_intr_error_code, 4534 KVM_ISA_VMX); 4535 4536 load_vmcs12_host_state(vcpu, vmcs12); 4537 4538 return; 4539 } 4540 4541 /* 4542 * After an early L2 VM-entry failure, we're now back 4543 * in L1 which thinks it just finished a VMLAUNCH or 4544 * VMRESUME instruction, so we need to set the failure 4545 * flag and the VM-instruction error field of the VMCS 4546 * accordingly, and skip the emulated instruction. 4547 */ 4548 (void)nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); 4549 4550 /* 4551 * Restore L1's host state to KVM's software model. We're here 4552 * because a consistency check was caught by hardware, which 4553 * means some amount of guest state has been propagated to KVM's 4554 * model and needs to be unwound to the host's state. 4555 */ 4556 nested_vmx_restore_host_state(vcpu); 4557 4558 vmx->fail = 0; 4559 } 4560 4561 /* 4562 * Decode the memory-address operand of a vmx instruction, as recorded on an 4563 * exit caused by such an instruction (run by a guest hypervisor). 4564 * On success, returns 0. When the operand is invalid, returns 1 and throws 4565 * #UD, #GP, or #SS. 4566 */ 4567 int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification, 4568 u32 vmx_instruction_info, bool wr, int len, gva_t *ret) 4569 { 4570 gva_t off; 4571 bool exn; 4572 struct kvm_segment s; 4573 4574 /* 4575 * According to Vol. 3B, "Information for VM Exits Due to Instruction 4576 * Execution", on an exit, vmx_instruction_info holds most of the 4577 * addressing components of the operand. Only the displacement part 4578 * is put in exit_qualification (see 3B, "Basic VM-Exit Information"). 4579 * For how an actual address is calculated from all these components, 4580 * refer to Vol. 1, "Operand Addressing". 4581 */ 4582 int scaling = vmx_instruction_info & 3; 4583 int addr_size = (vmx_instruction_info >> 7) & 7; 4584 bool is_reg = vmx_instruction_info & (1u << 10); 4585 int seg_reg = (vmx_instruction_info >> 15) & 7; 4586 int index_reg = (vmx_instruction_info >> 18) & 0xf; 4587 bool index_is_valid = !(vmx_instruction_info & (1u << 22)); 4588 int base_reg = (vmx_instruction_info >> 23) & 0xf; 4589 bool base_is_valid = !(vmx_instruction_info & (1u << 27)); 4590 4591 if (is_reg) { 4592 kvm_queue_exception(vcpu, UD_VECTOR); 4593 return 1; 4594 } 4595 4596 /* Addr = segment_base + offset */ 4597 /* offset = base + [index * scale] + displacement */ 4598 off = exit_qualification; /* holds the displacement */ 4599 if (addr_size == 1) 4600 off = (gva_t)sign_extend64(off, 31); 4601 else if (addr_size == 0) 4602 off = (gva_t)sign_extend64(off, 15); 4603 if (base_is_valid) 4604 off += kvm_register_read(vcpu, base_reg); 4605 if (index_is_valid) 4606 off += kvm_register_read(vcpu, index_reg) << scaling; 4607 vmx_get_segment(vcpu, &s, seg_reg); 4608 4609 /* 4610 * The effective address, i.e. @off, of a memory operand is truncated 4611 * based on the address size of the instruction. Note that this is 4612 * the *effective address*, i.e. the address prior to accounting for 4613 * the segment's base. 4614 */ 4615 if (addr_size == 1) /* 32 bit */ 4616 off &= 0xffffffff; 4617 else if (addr_size == 0) /* 16 bit */ 4618 off &= 0xffff; 4619 4620 /* Checks for #GP/#SS exceptions. */ 4621 exn = false; 4622 if (is_long_mode(vcpu)) { 4623 /* 4624 * The virtual/linear address is never truncated in 64-bit 4625 * mode, e.g. a 32-bit address size can yield a 64-bit virtual 4626 * address when using FS/GS with a non-zero base. 4627 */ 4628 if (seg_reg == VCPU_SREG_FS || seg_reg == VCPU_SREG_GS) 4629 *ret = s.base + off; 4630 else 4631 *ret = off; 4632 4633 /* Long mode: #GP(0)/#SS(0) if the memory address is in a 4634 * non-canonical form. This is the only check on the memory 4635 * destination for long mode! 4636 */ 4637 exn = is_noncanonical_address(*ret, vcpu); 4638 } else { 4639 /* 4640 * When not in long mode, the virtual/linear address is 4641 * unconditionally truncated to 32 bits regardless of the 4642 * address size. 4643 */ 4644 *ret = (s.base + off) & 0xffffffff; 4645 4646 /* Protected mode: apply checks for segment validity in the 4647 * following order: 4648 * - segment type check (#GP(0) may be thrown) 4649 * - usability check (#GP(0)/#SS(0)) 4650 * - limit check (#GP(0)/#SS(0)) 4651 */ 4652 if (wr) 4653 /* #GP(0) if the destination operand is located in a 4654 * read-only data segment or any code segment. 4655 */ 4656 exn = ((s.type & 0xa) == 0 || (s.type & 8)); 4657 else 4658 /* #GP(0) if the source operand is located in an 4659 * execute-only code segment 4660 */ 4661 exn = ((s.type & 0xa) == 8); 4662 if (exn) { 4663 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); 4664 return 1; 4665 } 4666 /* Protected mode: #GP(0)/#SS(0) if the segment is unusable. 4667 */ 4668 exn = (s.unusable != 0); 4669 4670 /* 4671 * Protected mode: #GP(0)/#SS(0) if the memory operand is 4672 * outside the segment limit. All CPUs that support VMX ignore 4673 * limit checks for flat segments, i.e. segments with base==0, 4674 * limit==0xffffffff and of type expand-up data or code. 4675 */ 4676 if (!(s.base == 0 && s.limit == 0xffffffff && 4677 ((s.type & 8) || !(s.type & 4)))) 4678 exn = exn || ((u64)off + len - 1 > s.limit); 4679 } 4680 if (exn) { 4681 kvm_queue_exception_e(vcpu, 4682 seg_reg == VCPU_SREG_SS ? 4683 SS_VECTOR : GP_VECTOR, 4684 0); 4685 return 1; 4686 } 4687 4688 return 0; 4689 } 4690 4691 void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu) 4692 { 4693 struct vcpu_vmx *vmx; 4694 4695 if (!nested_vmx_allowed(vcpu)) 4696 return; 4697 4698 vmx = to_vmx(vcpu); 4699 if (kvm_x86_ops.pmu_ops->is_valid_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL)) { 4700 vmx->nested.msrs.entry_ctls_high |= 4701 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL; 4702 vmx->nested.msrs.exit_ctls_high |= 4703 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL; 4704 } else { 4705 vmx->nested.msrs.entry_ctls_high &= 4706 ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL; 4707 vmx->nested.msrs.exit_ctls_high &= 4708 ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL; 4709 } 4710 } 4711 4712 static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer, 4713 int *ret) 4714 { 4715 gva_t gva; 4716 struct x86_exception e; 4717 int r; 4718 4719 if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu), 4720 vmcs_read32(VMX_INSTRUCTION_INFO), false, 4721 sizeof(*vmpointer), &gva)) { 4722 *ret = 1; 4723 return -EINVAL; 4724 } 4725 4726 r = kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e); 4727 if (r != X86EMUL_CONTINUE) { 4728 *ret = kvm_handle_memory_failure(vcpu, r, &e); 4729 return -EINVAL; 4730 } 4731 4732 return 0; 4733 } 4734 4735 /* 4736 * Allocate a shadow VMCS and associate it with the currently loaded 4737 * VMCS, unless such a shadow VMCS already exists. The newly allocated 4738 * VMCS is also VMCLEARed, so that it is ready for use. 4739 */ 4740 static struct vmcs *alloc_shadow_vmcs(struct kvm_vcpu *vcpu) 4741 { 4742 struct vcpu_vmx *vmx = to_vmx(vcpu); 4743 struct loaded_vmcs *loaded_vmcs = vmx->loaded_vmcs; 4744 4745 /* 4746 * We should allocate a shadow vmcs for vmcs01 only when L1 4747 * executes VMXON and free it when L1 executes VMXOFF. 4748 * As it is invalid to execute VMXON twice, we shouldn't reach 4749 * here when vmcs01 already have an allocated shadow vmcs. 4750 */ 4751 WARN_ON(loaded_vmcs == &vmx->vmcs01 && loaded_vmcs->shadow_vmcs); 4752 4753 if (!loaded_vmcs->shadow_vmcs) { 4754 loaded_vmcs->shadow_vmcs = alloc_vmcs(true); 4755 if (loaded_vmcs->shadow_vmcs) 4756 vmcs_clear(loaded_vmcs->shadow_vmcs); 4757 } 4758 return loaded_vmcs->shadow_vmcs; 4759 } 4760 4761 static int enter_vmx_operation(struct kvm_vcpu *vcpu) 4762 { 4763 struct vcpu_vmx *vmx = to_vmx(vcpu); 4764 int r; 4765 4766 r = alloc_loaded_vmcs(&vmx->nested.vmcs02); 4767 if (r < 0) 4768 goto out_vmcs02; 4769 4770 vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT); 4771 if (!vmx->nested.cached_vmcs12) 4772 goto out_cached_vmcs12; 4773 4774 vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT); 4775 if (!vmx->nested.cached_shadow_vmcs12) 4776 goto out_cached_shadow_vmcs12; 4777 4778 if (enable_shadow_vmcs && !alloc_shadow_vmcs(vcpu)) 4779 goto out_shadow_vmcs; 4780 4781 hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC, 4782 HRTIMER_MODE_ABS_PINNED); 4783 vmx->nested.preemption_timer.function = vmx_preemption_timer_fn; 4784 4785 vmx->nested.vpid02 = allocate_vpid(); 4786 4787 vmx->nested.vmcs02_initialized = false; 4788 vmx->nested.vmxon = true; 4789 4790 if (vmx_pt_mode_is_host_guest()) { 4791 vmx->pt_desc.guest.ctl = 0; 4792 pt_update_intercept_for_msr(vcpu); 4793 } 4794 4795 return 0; 4796 4797 out_shadow_vmcs: 4798 kfree(vmx->nested.cached_shadow_vmcs12); 4799 4800 out_cached_shadow_vmcs12: 4801 kfree(vmx->nested.cached_vmcs12); 4802 4803 out_cached_vmcs12: 4804 free_loaded_vmcs(&vmx->nested.vmcs02); 4805 4806 out_vmcs02: 4807 return -ENOMEM; 4808 } 4809 4810 /* 4811 * Emulate the VMXON instruction. 4812 * Currently, we just remember that VMX is active, and do not save or even 4813 * inspect the argument to VMXON (the so-called "VMXON pointer") because we 4814 * do not currently need to store anything in that guest-allocated memory 4815 * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their 4816 * argument is different from the VMXON pointer (which the spec says they do). 4817 */ 4818 static int handle_vmon(struct kvm_vcpu *vcpu) 4819 { 4820 int ret; 4821 gpa_t vmptr; 4822 uint32_t revision; 4823 struct vcpu_vmx *vmx = to_vmx(vcpu); 4824 const u64 VMXON_NEEDED_FEATURES = FEAT_CTL_LOCKED 4825 | FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX; 4826 4827 /* 4828 * The Intel VMX Instruction Reference lists a bunch of bits that are 4829 * prerequisite to running VMXON, most notably cr4.VMXE must be set to 4830 * 1 (see vmx_is_valid_cr4() for when we allow the guest to set this). 4831 * Otherwise, we should fail with #UD. But most faulting conditions 4832 * have already been checked by hardware, prior to the VM-exit for 4833 * VMXON. We do test guest cr4.VMXE because processor CR4 always has 4834 * that bit set to 1 in non-root mode. 4835 */ 4836 if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) { 4837 kvm_queue_exception(vcpu, UD_VECTOR); 4838 return 1; 4839 } 4840 4841 /* CPL=0 must be checked manually. */ 4842 if (vmx_get_cpl(vcpu)) { 4843 kvm_inject_gp(vcpu, 0); 4844 return 1; 4845 } 4846 4847 if (vmx->nested.vmxon) 4848 return nested_vmx_fail(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION); 4849 4850 if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES) 4851 != VMXON_NEEDED_FEATURES) { 4852 kvm_inject_gp(vcpu, 0); 4853 return 1; 4854 } 4855 4856 if (nested_vmx_get_vmptr(vcpu, &vmptr, &ret)) 4857 return ret; 4858 4859 /* 4860 * SDM 3: 24.11.5 4861 * The first 4 bytes of VMXON region contain the supported 4862 * VMCS revision identifier 4863 * 4864 * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case; 4865 * which replaces physical address width with 32 4866 */ 4867 if (!page_address_valid(vcpu, vmptr)) 4868 return nested_vmx_failInvalid(vcpu); 4869 4870 if (kvm_read_guest(vcpu->kvm, vmptr, &revision, sizeof(revision)) || 4871 revision != VMCS12_REVISION) 4872 return nested_vmx_failInvalid(vcpu); 4873 4874 vmx->nested.vmxon_ptr = vmptr; 4875 ret = enter_vmx_operation(vcpu); 4876 if (ret) 4877 return ret; 4878 4879 return nested_vmx_succeed(vcpu); 4880 } 4881 4882 static inline void nested_release_vmcs12(struct kvm_vcpu *vcpu) 4883 { 4884 struct vcpu_vmx *vmx = to_vmx(vcpu); 4885 4886 if (vmx->nested.current_vmptr == -1ull) 4887 return; 4888 4889 copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu)); 4890 4891 if (enable_shadow_vmcs) { 4892 /* copy to memory all shadowed fields in case 4893 they were modified */ 4894 copy_shadow_to_vmcs12(vmx); 4895 vmx_disable_shadow_vmcs(vmx); 4896 } 4897 vmx->nested.posted_intr_nv = -1; 4898 4899 /* Flush VMCS12 to guest memory */ 4900 kvm_vcpu_write_guest_page(vcpu, 4901 vmx->nested.current_vmptr >> PAGE_SHIFT, 4902 vmx->nested.cached_vmcs12, 0, VMCS12_SIZE); 4903 4904 kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); 4905 4906 vmx->nested.current_vmptr = -1ull; 4907 } 4908 4909 /* Emulate the VMXOFF instruction */ 4910 static int handle_vmoff(struct kvm_vcpu *vcpu) 4911 { 4912 if (!nested_vmx_check_permission(vcpu)) 4913 return 1; 4914 4915 free_nested(vcpu); 4916 4917 /* Process a latched INIT during time CPU was in VMX operation */ 4918 kvm_make_request(KVM_REQ_EVENT, vcpu); 4919 4920 return nested_vmx_succeed(vcpu); 4921 } 4922 4923 /* Emulate the VMCLEAR instruction */ 4924 static int handle_vmclear(struct kvm_vcpu *vcpu) 4925 { 4926 struct vcpu_vmx *vmx = to_vmx(vcpu); 4927 u32 zero = 0; 4928 gpa_t vmptr; 4929 u64 evmcs_gpa; 4930 int r; 4931 4932 if (!nested_vmx_check_permission(vcpu)) 4933 return 1; 4934 4935 if (nested_vmx_get_vmptr(vcpu, &vmptr, &r)) 4936 return r; 4937 4938 if (!page_address_valid(vcpu, vmptr)) 4939 return nested_vmx_fail(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS); 4940 4941 if (vmptr == vmx->nested.vmxon_ptr) 4942 return nested_vmx_fail(vcpu, VMXERR_VMCLEAR_VMXON_POINTER); 4943 4944 /* 4945 * When Enlightened VMEntry is enabled on the calling CPU we treat 4946 * memory area pointer by vmptr as Enlightened VMCS (as there's no good 4947 * way to distinguish it from VMCS12) and we must not corrupt it by 4948 * writing to the non-existent 'launch_state' field. The area doesn't 4949 * have to be the currently active EVMCS on the calling CPU and there's 4950 * nothing KVM has to do to transition it from 'active' to 'non-active' 4951 * state. It is possible that the area will stay mapped as 4952 * vmx->nested.hv_evmcs but this shouldn't be a problem. 4953 */ 4954 if (likely(!vmx->nested.enlightened_vmcs_enabled || 4955 !nested_enlightened_vmentry(vcpu, &evmcs_gpa))) { 4956 if (vmptr == vmx->nested.current_vmptr) 4957 nested_release_vmcs12(vcpu); 4958 4959 kvm_vcpu_write_guest(vcpu, 4960 vmptr + offsetof(struct vmcs12, 4961 launch_state), 4962 &zero, sizeof(zero)); 4963 } 4964 4965 return nested_vmx_succeed(vcpu); 4966 } 4967 4968 /* Emulate the VMLAUNCH instruction */ 4969 static int handle_vmlaunch(struct kvm_vcpu *vcpu) 4970 { 4971 return nested_vmx_run(vcpu, true); 4972 } 4973 4974 /* Emulate the VMRESUME instruction */ 4975 static int handle_vmresume(struct kvm_vcpu *vcpu) 4976 { 4977 4978 return nested_vmx_run(vcpu, false); 4979 } 4980 4981 static int handle_vmread(struct kvm_vcpu *vcpu) 4982 { 4983 struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu) 4984 : get_vmcs12(vcpu); 4985 unsigned long exit_qualification = vmx_get_exit_qual(vcpu); 4986 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO); 4987 struct vcpu_vmx *vmx = to_vmx(vcpu); 4988 struct x86_exception e; 4989 unsigned long field; 4990 u64 value; 4991 gva_t gva = 0; 4992 short offset; 4993 int len, r; 4994 4995 if (!nested_vmx_check_permission(vcpu)) 4996 return 1; 4997 4998 /* 4999 * In VMX non-root operation, when the VMCS-link pointer is -1ull, 5000 * any VMREAD sets the ALU flags for VMfailInvalid. 5001 */ 5002 if (vmx->nested.current_vmptr == -1ull || 5003 (is_guest_mode(vcpu) && 5004 get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)) 5005 return nested_vmx_failInvalid(vcpu); 5006 5007 /* Decode instruction info and find the field to read */ 5008 field = kvm_register_readl(vcpu, (((instr_info) >> 28) & 0xf)); 5009 5010 offset = vmcs_field_to_offset(field); 5011 if (offset < 0) 5012 return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); 5013 5014 if (!is_guest_mode(vcpu) && is_vmcs12_ext_field(field)) 5015 copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12); 5016 5017 /* Read the field, zero-extended to a u64 value */ 5018 value = vmcs12_read_any(vmcs12, field, offset); 5019 5020 /* 5021 * Now copy part of this value to register or memory, as requested. 5022 * Note that the number of bits actually copied is 32 or 64 depending 5023 * on the guest's mode (32 or 64 bit), not on the given field's length. 5024 */ 5025 if (instr_info & BIT(10)) { 5026 kvm_register_writel(vcpu, (((instr_info) >> 3) & 0xf), value); 5027 } else { 5028 len = is_64_bit_mode(vcpu) ? 8 : 4; 5029 if (get_vmx_mem_address(vcpu, exit_qualification, 5030 instr_info, true, len, &gva)) 5031 return 1; 5032 /* _system ok, nested_vmx_check_permission has verified cpl=0 */ 5033 r = kvm_write_guest_virt_system(vcpu, gva, &value, len, &e); 5034 if (r != X86EMUL_CONTINUE) 5035 return kvm_handle_memory_failure(vcpu, r, &e); 5036 } 5037 5038 return nested_vmx_succeed(vcpu); 5039 } 5040 5041 static bool is_shadow_field_rw(unsigned long field) 5042 { 5043 switch (field) { 5044 #define SHADOW_FIELD_RW(x, y) case x: 5045 #include "vmcs_shadow_fields.h" 5046 return true; 5047 default: 5048 break; 5049 } 5050 return false; 5051 } 5052 5053 static bool is_shadow_field_ro(unsigned long field) 5054 { 5055 switch (field) { 5056 #define SHADOW_FIELD_RO(x, y) case x: 5057 #include "vmcs_shadow_fields.h" 5058 return true; 5059 default: 5060 break; 5061 } 5062 return false; 5063 } 5064 5065 static int handle_vmwrite(struct kvm_vcpu *vcpu) 5066 { 5067 struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu) 5068 : get_vmcs12(vcpu); 5069 unsigned long exit_qualification = vmx_get_exit_qual(vcpu); 5070 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO); 5071 struct vcpu_vmx *vmx = to_vmx(vcpu); 5072 struct x86_exception e; 5073 unsigned long field; 5074 short offset; 5075 gva_t gva; 5076 int len, r; 5077 5078 /* 5079 * The value to write might be 32 or 64 bits, depending on L1's long 5080 * mode, and eventually we need to write that into a field of several 5081 * possible lengths. The code below first zero-extends the value to 64 5082 * bit (value), and then copies only the appropriate number of 5083 * bits into the vmcs12 field. 5084 */ 5085 u64 value = 0; 5086 5087 if (!nested_vmx_check_permission(vcpu)) 5088 return 1; 5089 5090 /* 5091 * In VMX non-root operation, when the VMCS-link pointer is -1ull, 5092 * any VMWRITE sets the ALU flags for VMfailInvalid. 5093 */ 5094 if (vmx->nested.current_vmptr == -1ull || 5095 (is_guest_mode(vcpu) && 5096 get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)) 5097 return nested_vmx_failInvalid(vcpu); 5098 5099 if (instr_info & BIT(10)) 5100 value = kvm_register_readl(vcpu, (((instr_info) >> 3) & 0xf)); 5101 else { 5102 len = is_64_bit_mode(vcpu) ? 8 : 4; 5103 if (get_vmx_mem_address(vcpu, exit_qualification, 5104 instr_info, false, len, &gva)) 5105 return 1; 5106 r = kvm_read_guest_virt(vcpu, gva, &value, len, &e); 5107 if (r != X86EMUL_CONTINUE) 5108 return kvm_handle_memory_failure(vcpu, r, &e); 5109 } 5110 5111 field = kvm_register_readl(vcpu, (((instr_info) >> 28) & 0xf)); 5112 5113 offset = vmcs_field_to_offset(field); 5114 if (offset < 0) 5115 return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); 5116 5117 /* 5118 * If the vCPU supports "VMWRITE to any supported field in the 5119 * VMCS," then the "read-only" fields are actually read/write. 5120 */ 5121 if (vmcs_field_readonly(field) && 5122 !nested_cpu_has_vmwrite_any_field(vcpu)) 5123 return nested_vmx_fail(vcpu, VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT); 5124 5125 /* 5126 * Ensure vmcs12 is up-to-date before any VMWRITE that dirties 5127 * vmcs12, else we may crush a field or consume a stale value. 5128 */ 5129 if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field)) 5130 copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12); 5131 5132 /* 5133 * Some Intel CPUs intentionally drop the reserved bits of the AR byte 5134 * fields on VMWRITE. Emulate this behavior to ensure consistent KVM 5135 * behavior regardless of the underlying hardware, e.g. if an AR_BYTE 5136 * field is intercepted for VMWRITE but not VMREAD (in L1), then VMREAD 5137 * from L1 will return a different value than VMREAD from L2 (L1 sees 5138 * the stripped down value, L2 sees the full value as stored by KVM). 5139 */ 5140 if (field >= GUEST_ES_AR_BYTES && field <= GUEST_TR_AR_BYTES) 5141 value &= 0x1f0ff; 5142 5143 vmcs12_write_any(vmcs12, field, offset, value); 5144 5145 /* 5146 * Do not track vmcs12 dirty-state if in guest-mode as we actually 5147 * dirty shadow vmcs12 instead of vmcs12. Fields that can be updated 5148 * by L1 without a vmexit are always updated in the vmcs02, i.e. don't 5149 * "dirty" vmcs12, all others go down the prepare_vmcs02() slow path. 5150 */ 5151 if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field)) { 5152 /* 5153 * L1 can read these fields without exiting, ensure the 5154 * shadow VMCS is up-to-date. 5155 */ 5156 if (enable_shadow_vmcs && is_shadow_field_ro(field)) { 5157 preempt_disable(); 5158 vmcs_load(vmx->vmcs01.shadow_vmcs); 5159 5160 __vmcs_writel(field, value); 5161 5162 vmcs_clear(vmx->vmcs01.shadow_vmcs); 5163 vmcs_load(vmx->loaded_vmcs->vmcs); 5164 preempt_enable(); 5165 } 5166 vmx->nested.dirty_vmcs12 = true; 5167 } 5168 5169 return nested_vmx_succeed(vcpu); 5170 } 5171 5172 static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr) 5173 { 5174 vmx->nested.current_vmptr = vmptr; 5175 if (enable_shadow_vmcs) { 5176 secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_SHADOW_VMCS); 5177 vmcs_write64(VMCS_LINK_POINTER, 5178 __pa(vmx->vmcs01.shadow_vmcs)); 5179 vmx->nested.need_vmcs12_to_shadow_sync = true; 5180 } 5181 vmx->nested.dirty_vmcs12 = true; 5182 } 5183 5184 /* Emulate the VMPTRLD instruction */ 5185 static int handle_vmptrld(struct kvm_vcpu *vcpu) 5186 { 5187 struct vcpu_vmx *vmx = to_vmx(vcpu); 5188 gpa_t vmptr; 5189 int r; 5190 5191 if (!nested_vmx_check_permission(vcpu)) 5192 return 1; 5193 5194 if (nested_vmx_get_vmptr(vcpu, &vmptr, &r)) 5195 return r; 5196 5197 if (!page_address_valid(vcpu, vmptr)) 5198 return nested_vmx_fail(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS); 5199 5200 if (vmptr == vmx->nested.vmxon_ptr) 5201 return nested_vmx_fail(vcpu, VMXERR_VMPTRLD_VMXON_POINTER); 5202 5203 /* Forbid normal VMPTRLD if Enlightened version was used */ 5204 if (vmx->nested.hv_evmcs) 5205 return 1; 5206 5207 if (vmx->nested.current_vmptr != vmptr) { 5208 struct kvm_host_map map; 5209 struct vmcs12 *new_vmcs12; 5210 5211 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmptr), &map)) { 5212 /* 5213 * Reads from an unbacked page return all 1s, 5214 * which means that the 32 bits located at the 5215 * given physical address won't match the required 5216 * VMCS12_REVISION identifier. 5217 */ 5218 return nested_vmx_fail(vcpu, 5219 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); 5220 } 5221 5222 new_vmcs12 = map.hva; 5223 5224 if (new_vmcs12->hdr.revision_id != VMCS12_REVISION || 5225 (new_vmcs12->hdr.shadow_vmcs && 5226 !nested_cpu_has_vmx_shadow_vmcs(vcpu))) { 5227 kvm_vcpu_unmap(vcpu, &map, false); 5228 return nested_vmx_fail(vcpu, 5229 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); 5230 } 5231 5232 nested_release_vmcs12(vcpu); 5233 5234 /* 5235 * Load VMCS12 from guest memory since it is not already 5236 * cached. 5237 */ 5238 memcpy(vmx->nested.cached_vmcs12, new_vmcs12, VMCS12_SIZE); 5239 kvm_vcpu_unmap(vcpu, &map, false); 5240 5241 set_current_vmptr(vmx, vmptr); 5242 } 5243 5244 return nested_vmx_succeed(vcpu); 5245 } 5246 5247 /* Emulate the VMPTRST instruction */ 5248 static int handle_vmptrst(struct kvm_vcpu *vcpu) 5249 { 5250 unsigned long exit_qual = vmx_get_exit_qual(vcpu); 5251 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO); 5252 gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr; 5253 struct x86_exception e; 5254 gva_t gva; 5255 int r; 5256 5257 if (!nested_vmx_check_permission(vcpu)) 5258 return 1; 5259 5260 if (unlikely(to_vmx(vcpu)->nested.hv_evmcs)) 5261 return 1; 5262 5263 if (get_vmx_mem_address(vcpu, exit_qual, instr_info, 5264 true, sizeof(gpa_t), &gva)) 5265 return 1; 5266 /* *_system ok, nested_vmx_check_permission has verified cpl=0 */ 5267 r = kvm_write_guest_virt_system(vcpu, gva, (void *)¤t_vmptr, 5268 sizeof(gpa_t), &e); 5269 if (r != X86EMUL_CONTINUE) 5270 return kvm_handle_memory_failure(vcpu, r, &e); 5271 5272 return nested_vmx_succeed(vcpu); 5273 } 5274 5275 #define EPTP_PA_MASK GENMASK_ULL(51, 12) 5276 5277 static bool nested_ept_root_matches(hpa_t root_hpa, u64 root_eptp, u64 eptp) 5278 { 5279 return VALID_PAGE(root_hpa) && 5280 ((root_eptp & EPTP_PA_MASK) == (eptp & EPTP_PA_MASK)); 5281 } 5282 5283 /* Emulate the INVEPT instruction */ 5284 static int handle_invept(struct kvm_vcpu *vcpu) 5285 { 5286 struct vcpu_vmx *vmx = to_vmx(vcpu); 5287 u32 vmx_instruction_info, types; 5288 unsigned long type, roots_to_free; 5289 struct kvm_mmu *mmu; 5290 gva_t gva; 5291 struct x86_exception e; 5292 struct { 5293 u64 eptp, gpa; 5294 } operand; 5295 int i, r; 5296 5297 if (!(vmx->nested.msrs.secondary_ctls_high & 5298 SECONDARY_EXEC_ENABLE_EPT) || 5299 !(vmx->nested.msrs.ept_caps & VMX_EPT_INVEPT_BIT)) { 5300 kvm_queue_exception(vcpu, UD_VECTOR); 5301 return 1; 5302 } 5303 5304 if (!nested_vmx_check_permission(vcpu)) 5305 return 1; 5306 5307 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 5308 type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); 5309 5310 types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; 5311 5312 if (type >= 32 || !(types & (1 << type))) 5313 return nested_vmx_fail(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 5314 5315 /* According to the Intel VMX instruction reference, the memory 5316 * operand is read even if it isn't needed (e.g., for type==global) 5317 */ 5318 if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu), 5319 vmx_instruction_info, false, sizeof(operand), &gva)) 5320 return 1; 5321 r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e); 5322 if (r != X86EMUL_CONTINUE) 5323 return kvm_handle_memory_failure(vcpu, r, &e); 5324 5325 /* 5326 * Nested EPT roots are always held through guest_mmu, 5327 * not root_mmu. 5328 */ 5329 mmu = &vcpu->arch.guest_mmu; 5330 5331 switch (type) { 5332 case VMX_EPT_EXTENT_CONTEXT: 5333 if (!nested_vmx_check_eptp(vcpu, operand.eptp)) 5334 return nested_vmx_fail(vcpu, 5335 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 5336 5337 roots_to_free = 0; 5338 if (nested_ept_root_matches(mmu->root_hpa, mmu->root_pgd, 5339 operand.eptp)) 5340 roots_to_free |= KVM_MMU_ROOT_CURRENT; 5341 5342 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { 5343 if (nested_ept_root_matches(mmu->prev_roots[i].hpa, 5344 mmu->prev_roots[i].pgd, 5345 operand.eptp)) 5346 roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i); 5347 } 5348 break; 5349 case VMX_EPT_EXTENT_GLOBAL: 5350 roots_to_free = KVM_MMU_ROOTS_ALL; 5351 break; 5352 default: 5353 BUG(); 5354 break; 5355 } 5356 5357 if (roots_to_free) 5358 kvm_mmu_free_roots(vcpu, mmu, roots_to_free); 5359 5360 return nested_vmx_succeed(vcpu); 5361 } 5362 5363 static int handle_invvpid(struct kvm_vcpu *vcpu) 5364 { 5365 struct vcpu_vmx *vmx = to_vmx(vcpu); 5366 u32 vmx_instruction_info; 5367 unsigned long type, types; 5368 gva_t gva; 5369 struct x86_exception e; 5370 struct { 5371 u64 vpid; 5372 u64 gla; 5373 } operand; 5374 u16 vpid02; 5375 int r; 5376 5377 if (!(vmx->nested.msrs.secondary_ctls_high & 5378 SECONDARY_EXEC_ENABLE_VPID) || 5379 !(vmx->nested.msrs.vpid_caps & VMX_VPID_INVVPID_BIT)) { 5380 kvm_queue_exception(vcpu, UD_VECTOR); 5381 return 1; 5382 } 5383 5384 if (!nested_vmx_check_permission(vcpu)) 5385 return 1; 5386 5387 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 5388 type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); 5389 5390 types = (vmx->nested.msrs.vpid_caps & 5391 VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8; 5392 5393 if (type >= 32 || !(types & (1 << type))) 5394 return nested_vmx_fail(vcpu, 5395 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 5396 5397 /* according to the intel vmx instruction reference, the memory 5398 * operand is read even if it isn't needed (e.g., for type==global) 5399 */ 5400 if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu), 5401 vmx_instruction_info, false, sizeof(operand), &gva)) 5402 return 1; 5403 r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e); 5404 if (r != X86EMUL_CONTINUE) 5405 return kvm_handle_memory_failure(vcpu, r, &e); 5406 5407 if (operand.vpid >> 16) 5408 return nested_vmx_fail(vcpu, 5409 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 5410 5411 vpid02 = nested_get_vpid02(vcpu); 5412 switch (type) { 5413 case VMX_VPID_EXTENT_INDIVIDUAL_ADDR: 5414 if (!operand.vpid || 5415 is_noncanonical_address(operand.gla, vcpu)) 5416 return nested_vmx_fail(vcpu, 5417 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 5418 vpid_sync_vcpu_addr(vpid02, operand.gla); 5419 break; 5420 case VMX_VPID_EXTENT_SINGLE_CONTEXT: 5421 case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL: 5422 if (!operand.vpid) 5423 return nested_vmx_fail(vcpu, 5424 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 5425 vpid_sync_context(vpid02); 5426 break; 5427 case VMX_VPID_EXTENT_ALL_CONTEXT: 5428 vpid_sync_context(vpid02); 5429 break; 5430 default: 5431 WARN_ON_ONCE(1); 5432 return kvm_skip_emulated_instruction(vcpu); 5433 } 5434 5435 /* 5436 * Sync the shadow page tables if EPT is disabled, L1 is invalidating 5437 * linear mappings for L2 (tagged with L2's VPID). Free all roots as 5438 * VPIDs are not tracked in the MMU role. 5439 * 5440 * Note, this operates on root_mmu, not guest_mmu, as L1 and L2 share 5441 * an MMU when EPT is disabled. 5442 * 5443 * TODO: sync only the affected SPTEs for INVDIVIDUAL_ADDR. 5444 */ 5445 if (!enable_ept) 5446 kvm_mmu_free_roots(vcpu, &vcpu->arch.root_mmu, 5447 KVM_MMU_ROOTS_ALL); 5448 5449 return nested_vmx_succeed(vcpu); 5450 } 5451 5452 static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu, 5453 struct vmcs12 *vmcs12) 5454 { 5455 u32 index = kvm_rcx_read(vcpu); 5456 u64 new_eptp; 5457 bool accessed_dirty; 5458 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 5459 5460 if (!nested_cpu_has_eptp_switching(vmcs12) || 5461 !nested_cpu_has_ept(vmcs12)) 5462 return 1; 5463 5464 if (index >= VMFUNC_EPTP_ENTRIES) 5465 return 1; 5466 5467 5468 if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT, 5469 &new_eptp, index * 8, 8)) 5470 return 1; 5471 5472 accessed_dirty = !!(new_eptp & VMX_EPTP_AD_ENABLE_BIT); 5473 5474 /* 5475 * If the (L2) guest does a vmfunc to the currently 5476 * active ept pointer, we don't have to do anything else 5477 */ 5478 if (vmcs12->ept_pointer != new_eptp) { 5479 if (!nested_vmx_check_eptp(vcpu, new_eptp)) 5480 return 1; 5481 5482 kvm_mmu_unload(vcpu); 5483 mmu->ept_ad = accessed_dirty; 5484 mmu->mmu_role.base.ad_disabled = !accessed_dirty; 5485 vmcs12->ept_pointer = new_eptp; 5486 /* 5487 * TODO: Check what's the correct approach in case 5488 * mmu reload fails. Currently, we just let the next 5489 * reload potentially fail 5490 */ 5491 kvm_mmu_reload(vcpu); 5492 } 5493 5494 return 0; 5495 } 5496 5497 static int handle_vmfunc(struct kvm_vcpu *vcpu) 5498 { 5499 struct vcpu_vmx *vmx = to_vmx(vcpu); 5500 struct vmcs12 *vmcs12; 5501 u32 function = kvm_rax_read(vcpu); 5502 5503 /* 5504 * VMFUNC is only supported for nested guests, but we always enable the 5505 * secondary control for simplicity; for non-nested mode, fake that we 5506 * didn't by injecting #UD. 5507 */ 5508 if (!is_guest_mode(vcpu)) { 5509 kvm_queue_exception(vcpu, UD_VECTOR); 5510 return 1; 5511 } 5512 5513 vmcs12 = get_vmcs12(vcpu); 5514 if ((vmcs12->vm_function_control & (1 << function)) == 0) 5515 goto fail; 5516 5517 switch (function) { 5518 case 0: 5519 if (nested_vmx_eptp_switching(vcpu, vmcs12)) 5520 goto fail; 5521 break; 5522 default: 5523 goto fail; 5524 } 5525 return kvm_skip_emulated_instruction(vcpu); 5526 5527 fail: 5528 /* 5529 * This is effectively a reflected VM-Exit, as opposed to a synthesized 5530 * nested VM-Exit. Pass the original exit reason, i.e. don't hardcode 5531 * EXIT_REASON_VMFUNC as the exit reason. 5532 */ 5533 nested_vmx_vmexit(vcpu, vmx->exit_reason.full, 5534 vmx_get_intr_info(vcpu), 5535 vmx_get_exit_qual(vcpu)); 5536 return 1; 5537 } 5538 5539 /* 5540 * Return true if an IO instruction with the specified port and size should cause 5541 * a VM-exit into L1. 5542 */ 5543 bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port, 5544 int size) 5545 { 5546 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 5547 gpa_t bitmap, last_bitmap; 5548 u8 b; 5549 5550 last_bitmap = (gpa_t)-1; 5551 b = -1; 5552 5553 while (size > 0) { 5554 if (port < 0x8000) 5555 bitmap = vmcs12->io_bitmap_a; 5556 else if (port < 0x10000) 5557 bitmap = vmcs12->io_bitmap_b; 5558 else 5559 return true; 5560 bitmap += (port & 0x7fff) / 8; 5561 5562 if (last_bitmap != bitmap) 5563 if (kvm_vcpu_read_guest(vcpu, bitmap, &b, 1)) 5564 return true; 5565 if (b & (1 << (port & 7))) 5566 return true; 5567 5568 port++; 5569 size--; 5570 last_bitmap = bitmap; 5571 } 5572 5573 return false; 5574 } 5575 5576 static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu, 5577 struct vmcs12 *vmcs12) 5578 { 5579 unsigned long exit_qualification; 5580 unsigned short port; 5581 int size; 5582 5583 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) 5584 return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING); 5585 5586 exit_qualification = vmx_get_exit_qual(vcpu); 5587 5588 port = exit_qualification >> 16; 5589 size = (exit_qualification & 7) + 1; 5590 5591 return nested_vmx_check_io_bitmaps(vcpu, port, size); 5592 } 5593 5594 /* 5595 * Return 1 if we should exit from L2 to L1 to handle an MSR access, 5596 * rather than handle it ourselves in L0. I.e., check whether L1 expressed 5597 * disinterest in the current event (read or write a specific MSR) by using an 5598 * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps. 5599 */ 5600 static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu, 5601 struct vmcs12 *vmcs12, 5602 union vmx_exit_reason exit_reason) 5603 { 5604 u32 msr_index = kvm_rcx_read(vcpu); 5605 gpa_t bitmap; 5606 5607 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) 5608 return true; 5609 5610 /* 5611 * The MSR_BITMAP page is divided into four 1024-byte bitmaps, 5612 * for the four combinations of read/write and low/high MSR numbers. 5613 * First we need to figure out which of the four to use: 5614 */ 5615 bitmap = vmcs12->msr_bitmap; 5616 if (exit_reason.basic == EXIT_REASON_MSR_WRITE) 5617 bitmap += 2048; 5618 if (msr_index >= 0xc0000000) { 5619 msr_index -= 0xc0000000; 5620 bitmap += 1024; 5621 } 5622 5623 /* Then read the msr_index'th bit from this bitmap: */ 5624 if (msr_index < 1024*8) { 5625 unsigned char b; 5626 if (kvm_vcpu_read_guest(vcpu, bitmap + msr_index/8, &b, 1)) 5627 return true; 5628 return 1 & (b >> (msr_index & 7)); 5629 } else 5630 return true; /* let L1 handle the wrong parameter */ 5631 } 5632 5633 /* 5634 * Return 1 if we should exit from L2 to L1 to handle a CR access exit, 5635 * rather than handle it ourselves in L0. I.e., check if L1 wanted to 5636 * intercept (via guest_host_mask etc.) the current event. 5637 */ 5638 static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, 5639 struct vmcs12 *vmcs12) 5640 { 5641 unsigned long exit_qualification = vmx_get_exit_qual(vcpu); 5642 int cr = exit_qualification & 15; 5643 int reg; 5644 unsigned long val; 5645 5646 switch ((exit_qualification >> 4) & 3) { 5647 case 0: /* mov to cr */ 5648 reg = (exit_qualification >> 8) & 15; 5649 val = kvm_register_readl(vcpu, reg); 5650 switch (cr) { 5651 case 0: 5652 if (vmcs12->cr0_guest_host_mask & 5653 (val ^ vmcs12->cr0_read_shadow)) 5654 return true; 5655 break; 5656 case 3: 5657 if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING)) 5658 return true; 5659 break; 5660 case 4: 5661 if (vmcs12->cr4_guest_host_mask & 5662 (vmcs12->cr4_read_shadow ^ val)) 5663 return true; 5664 break; 5665 case 8: 5666 if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING)) 5667 return true; 5668 break; 5669 } 5670 break; 5671 case 2: /* clts */ 5672 if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) && 5673 (vmcs12->cr0_read_shadow & X86_CR0_TS)) 5674 return true; 5675 break; 5676 case 1: /* mov from cr */ 5677 switch (cr) { 5678 case 3: 5679 if (vmcs12->cpu_based_vm_exec_control & 5680 CPU_BASED_CR3_STORE_EXITING) 5681 return true; 5682 break; 5683 case 8: 5684 if (vmcs12->cpu_based_vm_exec_control & 5685 CPU_BASED_CR8_STORE_EXITING) 5686 return true; 5687 break; 5688 } 5689 break; 5690 case 3: /* lmsw */ 5691 /* 5692 * lmsw can change bits 1..3 of cr0, and only set bit 0 of 5693 * cr0. Other attempted changes are ignored, with no exit. 5694 */ 5695 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f; 5696 if (vmcs12->cr0_guest_host_mask & 0xe & 5697 (val ^ vmcs12->cr0_read_shadow)) 5698 return true; 5699 if ((vmcs12->cr0_guest_host_mask & 0x1) && 5700 !(vmcs12->cr0_read_shadow & 0x1) && 5701 (val & 0x1)) 5702 return true; 5703 break; 5704 } 5705 return false; 5706 } 5707 5708 static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu, 5709 struct vmcs12 *vmcs12, gpa_t bitmap) 5710 { 5711 u32 vmx_instruction_info; 5712 unsigned long field; 5713 u8 b; 5714 5715 if (!nested_cpu_has_shadow_vmcs(vmcs12)) 5716 return true; 5717 5718 /* Decode instruction info and find the field to access */ 5719 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 5720 field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); 5721 5722 /* Out-of-range fields always cause a VM exit from L2 to L1 */ 5723 if (field >> 15) 5724 return true; 5725 5726 if (kvm_vcpu_read_guest(vcpu, bitmap + field/8, &b, 1)) 5727 return true; 5728 5729 return 1 & (b >> (field & 7)); 5730 } 5731 5732 static bool nested_vmx_exit_handled_mtf(struct vmcs12 *vmcs12) 5733 { 5734 u32 entry_intr_info = vmcs12->vm_entry_intr_info_field; 5735 5736 if (nested_cpu_has_mtf(vmcs12)) 5737 return true; 5738 5739 /* 5740 * An MTF VM-exit may be injected into the guest by setting the 5741 * interruption-type to 7 (other event) and the vector field to 0. Such 5742 * is the case regardless of the 'monitor trap flag' VM-execution 5743 * control. 5744 */ 5745 return entry_intr_info == (INTR_INFO_VALID_MASK 5746 | INTR_TYPE_OTHER_EVENT); 5747 } 5748 5749 /* 5750 * Return true if L0 wants to handle an exit from L2 regardless of whether or not 5751 * L1 wants the exit. Only call this when in is_guest_mode (L2). 5752 */ 5753 static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu, 5754 union vmx_exit_reason exit_reason) 5755 { 5756 u32 intr_info; 5757 5758 switch ((u16)exit_reason.basic) { 5759 case EXIT_REASON_EXCEPTION_NMI: 5760 intr_info = vmx_get_intr_info(vcpu); 5761 if (is_nmi(intr_info)) 5762 return true; 5763 else if (is_page_fault(intr_info)) 5764 return vcpu->arch.apf.host_apf_flags || !enable_ept; 5765 else if (is_debug(intr_info) && 5766 vcpu->guest_debug & 5767 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) 5768 return true; 5769 else if (is_breakpoint(intr_info) && 5770 vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) 5771 return true; 5772 return false; 5773 case EXIT_REASON_EXTERNAL_INTERRUPT: 5774 return true; 5775 case EXIT_REASON_MCE_DURING_VMENTRY: 5776 return true; 5777 case EXIT_REASON_EPT_VIOLATION: 5778 /* 5779 * L0 always deals with the EPT violation. If nested EPT is 5780 * used, and the nested mmu code discovers that the address is 5781 * missing in the guest EPT table (EPT12), the EPT violation 5782 * will be injected with nested_ept_inject_page_fault() 5783 */ 5784 return true; 5785 case EXIT_REASON_EPT_MISCONFIG: 5786 /* 5787 * L2 never uses directly L1's EPT, but rather L0's own EPT 5788 * table (shadow on EPT) or a merged EPT table that L0 built 5789 * (EPT on EPT). So any problems with the structure of the 5790 * table is L0's fault. 5791 */ 5792 return true; 5793 case EXIT_REASON_PREEMPTION_TIMER: 5794 return true; 5795 case EXIT_REASON_PML_FULL: 5796 /* 5797 * PML is emulated for an L1 VMM and should never be enabled in 5798 * vmcs02, always "handle" PML_FULL by exiting to userspace. 5799 */ 5800 return true; 5801 case EXIT_REASON_VMFUNC: 5802 /* VM functions are emulated through L2->L0 vmexits. */ 5803 return true; 5804 case EXIT_REASON_ENCLS: 5805 /* SGX is never exposed to L1 */ 5806 return true; 5807 default: 5808 break; 5809 } 5810 return false; 5811 } 5812 5813 /* 5814 * Return 1 if L1 wants to intercept an exit from L2. Only call this when in 5815 * is_guest_mode (L2). 5816 */ 5817 static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu, 5818 union vmx_exit_reason exit_reason) 5819 { 5820 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 5821 u32 intr_info; 5822 5823 switch ((u16)exit_reason.basic) { 5824 case EXIT_REASON_EXCEPTION_NMI: 5825 intr_info = vmx_get_intr_info(vcpu); 5826 if (is_nmi(intr_info)) 5827 return true; 5828 else if (is_page_fault(intr_info)) 5829 return true; 5830 return vmcs12->exception_bitmap & 5831 (1u << (intr_info & INTR_INFO_VECTOR_MASK)); 5832 case EXIT_REASON_EXTERNAL_INTERRUPT: 5833 return nested_exit_on_intr(vcpu); 5834 case EXIT_REASON_TRIPLE_FAULT: 5835 return true; 5836 case EXIT_REASON_INTERRUPT_WINDOW: 5837 return nested_cpu_has(vmcs12, CPU_BASED_INTR_WINDOW_EXITING); 5838 case EXIT_REASON_NMI_WINDOW: 5839 return nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING); 5840 case EXIT_REASON_TASK_SWITCH: 5841 return true; 5842 case EXIT_REASON_CPUID: 5843 return true; 5844 case EXIT_REASON_HLT: 5845 return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING); 5846 case EXIT_REASON_INVD: 5847 return true; 5848 case EXIT_REASON_INVLPG: 5849 return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING); 5850 case EXIT_REASON_RDPMC: 5851 return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING); 5852 case EXIT_REASON_RDRAND: 5853 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDRAND_EXITING); 5854 case EXIT_REASON_RDSEED: 5855 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDSEED_EXITING); 5856 case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP: 5857 return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING); 5858 case EXIT_REASON_VMREAD: 5859 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12, 5860 vmcs12->vmread_bitmap); 5861 case EXIT_REASON_VMWRITE: 5862 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12, 5863 vmcs12->vmwrite_bitmap); 5864 case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR: 5865 case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD: 5866 case EXIT_REASON_VMPTRST: case EXIT_REASON_VMRESUME: 5867 case EXIT_REASON_VMOFF: case EXIT_REASON_VMON: 5868 case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID: 5869 /* 5870 * VMX instructions trap unconditionally. This allows L1 to 5871 * emulate them for its L2 guest, i.e., allows 3-level nesting! 5872 */ 5873 return true; 5874 case EXIT_REASON_CR_ACCESS: 5875 return nested_vmx_exit_handled_cr(vcpu, vmcs12); 5876 case EXIT_REASON_DR_ACCESS: 5877 return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING); 5878 case EXIT_REASON_IO_INSTRUCTION: 5879 return nested_vmx_exit_handled_io(vcpu, vmcs12); 5880 case EXIT_REASON_GDTR_IDTR: case EXIT_REASON_LDTR_TR: 5881 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC); 5882 case EXIT_REASON_MSR_READ: 5883 case EXIT_REASON_MSR_WRITE: 5884 return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason); 5885 case EXIT_REASON_INVALID_STATE: 5886 return true; 5887 case EXIT_REASON_MWAIT_INSTRUCTION: 5888 return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING); 5889 case EXIT_REASON_MONITOR_TRAP_FLAG: 5890 return nested_vmx_exit_handled_mtf(vmcs12); 5891 case EXIT_REASON_MONITOR_INSTRUCTION: 5892 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING); 5893 case EXIT_REASON_PAUSE_INSTRUCTION: 5894 return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) || 5895 nested_cpu_has2(vmcs12, 5896 SECONDARY_EXEC_PAUSE_LOOP_EXITING); 5897 case EXIT_REASON_MCE_DURING_VMENTRY: 5898 return true; 5899 case EXIT_REASON_TPR_BELOW_THRESHOLD: 5900 return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW); 5901 case EXIT_REASON_APIC_ACCESS: 5902 case EXIT_REASON_APIC_WRITE: 5903 case EXIT_REASON_EOI_INDUCED: 5904 /* 5905 * The controls for "virtualize APIC accesses," "APIC- 5906 * register virtualization," and "virtual-interrupt 5907 * delivery" only come from vmcs12. 5908 */ 5909 return true; 5910 case EXIT_REASON_INVPCID: 5911 return 5912 nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_INVPCID) && 5913 nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING); 5914 case EXIT_REASON_WBINVD: 5915 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING); 5916 case EXIT_REASON_XSETBV: 5917 return true; 5918 case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS: 5919 /* 5920 * This should never happen, since it is not possible to 5921 * set XSS to a non-zero value---neither in L1 nor in L2. 5922 * If if it were, XSS would have to be checked against 5923 * the XSS exit bitmap in vmcs12. 5924 */ 5925 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES); 5926 case EXIT_REASON_UMWAIT: 5927 case EXIT_REASON_TPAUSE: 5928 return nested_cpu_has2(vmcs12, 5929 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE); 5930 default: 5931 return true; 5932 } 5933 } 5934 5935 /* 5936 * Conditionally reflect a VM-Exit into L1. Returns %true if the VM-Exit was 5937 * reflected into L1. 5938 */ 5939 bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu) 5940 { 5941 struct vcpu_vmx *vmx = to_vmx(vcpu); 5942 union vmx_exit_reason exit_reason = vmx->exit_reason; 5943 unsigned long exit_qual; 5944 u32 exit_intr_info; 5945 5946 WARN_ON_ONCE(vmx->nested.nested_run_pending); 5947 5948 /* 5949 * Late nested VM-Fail shares the same flow as nested VM-Exit since KVM 5950 * has already loaded L2's state. 5951 */ 5952 if (unlikely(vmx->fail)) { 5953 trace_kvm_nested_vmenter_failed( 5954 "hardware VM-instruction error: ", 5955 vmcs_read32(VM_INSTRUCTION_ERROR)); 5956 exit_intr_info = 0; 5957 exit_qual = 0; 5958 goto reflect_vmexit; 5959 } 5960 5961 trace_kvm_nested_vmexit(exit_reason.full, vcpu, KVM_ISA_VMX); 5962 5963 /* If L0 (KVM) wants the exit, it trumps L1's desires. */ 5964 if (nested_vmx_l0_wants_exit(vcpu, exit_reason)) 5965 return false; 5966 5967 /* If L1 doesn't want the exit, handle it in L0. */ 5968 if (!nested_vmx_l1_wants_exit(vcpu, exit_reason)) 5969 return false; 5970 5971 /* 5972 * vmcs.VM_EXIT_INTR_INFO is only valid for EXCEPTION_NMI exits. For 5973 * EXTERNAL_INTERRUPT, the value for vmcs12->vm_exit_intr_info would 5974 * need to be synthesized by querying the in-kernel LAPIC, but external 5975 * interrupts are never reflected to L1 so it's a non-issue. 5976 */ 5977 exit_intr_info = vmx_get_intr_info(vcpu); 5978 if (is_exception_with_error_code(exit_intr_info)) { 5979 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 5980 5981 vmcs12->vm_exit_intr_error_code = 5982 vmcs_read32(VM_EXIT_INTR_ERROR_CODE); 5983 } 5984 exit_qual = vmx_get_exit_qual(vcpu); 5985 5986 reflect_vmexit: 5987 nested_vmx_vmexit(vcpu, exit_reason.full, exit_intr_info, exit_qual); 5988 return true; 5989 } 5990 5991 static int vmx_get_nested_state(struct kvm_vcpu *vcpu, 5992 struct kvm_nested_state __user *user_kvm_nested_state, 5993 u32 user_data_size) 5994 { 5995 struct vcpu_vmx *vmx; 5996 struct vmcs12 *vmcs12; 5997 struct kvm_nested_state kvm_state = { 5998 .flags = 0, 5999 .format = KVM_STATE_NESTED_FORMAT_VMX, 6000 .size = sizeof(kvm_state), 6001 .hdr.vmx.flags = 0, 6002 .hdr.vmx.vmxon_pa = -1ull, 6003 .hdr.vmx.vmcs12_pa = -1ull, 6004 .hdr.vmx.preemption_timer_deadline = 0, 6005 }; 6006 struct kvm_vmx_nested_state_data __user *user_vmx_nested_state = 6007 &user_kvm_nested_state->data.vmx[0]; 6008 6009 if (!vcpu) 6010 return kvm_state.size + sizeof(*user_vmx_nested_state); 6011 6012 vmx = to_vmx(vcpu); 6013 vmcs12 = get_vmcs12(vcpu); 6014 6015 if (nested_vmx_allowed(vcpu) && 6016 (vmx->nested.vmxon || vmx->nested.smm.vmxon)) { 6017 kvm_state.hdr.vmx.vmxon_pa = vmx->nested.vmxon_ptr; 6018 kvm_state.hdr.vmx.vmcs12_pa = vmx->nested.current_vmptr; 6019 6020 if (vmx_has_valid_vmcs12(vcpu)) { 6021 kvm_state.size += sizeof(user_vmx_nested_state->vmcs12); 6022 6023 if (vmx->nested.hv_evmcs) 6024 kvm_state.flags |= KVM_STATE_NESTED_EVMCS; 6025 6026 if (is_guest_mode(vcpu) && 6027 nested_cpu_has_shadow_vmcs(vmcs12) && 6028 vmcs12->vmcs_link_pointer != -1ull) 6029 kvm_state.size += sizeof(user_vmx_nested_state->shadow_vmcs12); 6030 } 6031 6032 if (vmx->nested.smm.vmxon) 6033 kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON; 6034 6035 if (vmx->nested.smm.guest_mode) 6036 kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE; 6037 6038 if (is_guest_mode(vcpu)) { 6039 kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE; 6040 6041 if (vmx->nested.nested_run_pending) 6042 kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING; 6043 6044 if (vmx->nested.mtf_pending) 6045 kvm_state.flags |= KVM_STATE_NESTED_MTF_PENDING; 6046 6047 if (nested_cpu_has_preemption_timer(vmcs12) && 6048 vmx->nested.has_preemption_timer_deadline) { 6049 kvm_state.hdr.vmx.flags |= 6050 KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE; 6051 kvm_state.hdr.vmx.preemption_timer_deadline = 6052 vmx->nested.preemption_timer_deadline; 6053 } 6054 } 6055 } 6056 6057 if (user_data_size < kvm_state.size) 6058 goto out; 6059 6060 if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state))) 6061 return -EFAULT; 6062 6063 if (!vmx_has_valid_vmcs12(vcpu)) 6064 goto out; 6065 6066 /* 6067 * When running L2, the authoritative vmcs12 state is in the 6068 * vmcs02. When running L1, the authoritative vmcs12 state is 6069 * in the shadow or enlightened vmcs linked to vmcs01, unless 6070 * need_vmcs12_to_shadow_sync is set, in which case, the authoritative 6071 * vmcs12 state is in the vmcs12 already. 6072 */ 6073 if (is_guest_mode(vcpu)) { 6074 sync_vmcs02_to_vmcs12(vcpu, vmcs12); 6075 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12); 6076 } else { 6077 copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu)); 6078 if (!vmx->nested.need_vmcs12_to_shadow_sync) { 6079 if (vmx->nested.hv_evmcs) 6080 copy_enlightened_to_vmcs12(vmx); 6081 else if (enable_shadow_vmcs) 6082 copy_shadow_to_vmcs12(vmx); 6083 } 6084 } 6085 6086 BUILD_BUG_ON(sizeof(user_vmx_nested_state->vmcs12) < VMCS12_SIZE); 6087 BUILD_BUG_ON(sizeof(user_vmx_nested_state->shadow_vmcs12) < VMCS12_SIZE); 6088 6089 /* 6090 * Copy over the full allocated size of vmcs12 rather than just the size 6091 * of the struct. 6092 */ 6093 if (copy_to_user(user_vmx_nested_state->vmcs12, vmcs12, VMCS12_SIZE)) 6094 return -EFAULT; 6095 6096 if (nested_cpu_has_shadow_vmcs(vmcs12) && 6097 vmcs12->vmcs_link_pointer != -1ull) { 6098 if (copy_to_user(user_vmx_nested_state->shadow_vmcs12, 6099 get_shadow_vmcs12(vcpu), VMCS12_SIZE)) 6100 return -EFAULT; 6101 } 6102 out: 6103 return kvm_state.size; 6104 } 6105 6106 /* 6107 * Forcibly leave nested mode in order to be able to reset the VCPU later on. 6108 */ 6109 void vmx_leave_nested(struct kvm_vcpu *vcpu) 6110 { 6111 if (is_guest_mode(vcpu)) { 6112 to_vmx(vcpu)->nested.nested_run_pending = 0; 6113 nested_vmx_vmexit(vcpu, -1, 0, 0); 6114 } 6115 free_nested(vcpu); 6116 } 6117 6118 static int vmx_set_nested_state(struct kvm_vcpu *vcpu, 6119 struct kvm_nested_state __user *user_kvm_nested_state, 6120 struct kvm_nested_state *kvm_state) 6121 { 6122 struct vcpu_vmx *vmx = to_vmx(vcpu); 6123 struct vmcs12 *vmcs12; 6124 enum vm_entry_failure_code ignored; 6125 struct kvm_vmx_nested_state_data __user *user_vmx_nested_state = 6126 &user_kvm_nested_state->data.vmx[0]; 6127 int ret; 6128 6129 if (kvm_state->format != KVM_STATE_NESTED_FORMAT_VMX) 6130 return -EINVAL; 6131 6132 if (kvm_state->hdr.vmx.vmxon_pa == -1ull) { 6133 if (kvm_state->hdr.vmx.smm.flags) 6134 return -EINVAL; 6135 6136 if (kvm_state->hdr.vmx.vmcs12_pa != -1ull) 6137 return -EINVAL; 6138 6139 /* 6140 * KVM_STATE_NESTED_EVMCS used to signal that KVM should 6141 * enable eVMCS capability on vCPU. However, since then 6142 * code was changed such that flag signals vmcs12 should 6143 * be copied into eVMCS in guest memory. 6144 * 6145 * To preserve backwards compatability, allow user 6146 * to set this flag even when there is no VMXON region. 6147 */ 6148 if (kvm_state->flags & ~KVM_STATE_NESTED_EVMCS) 6149 return -EINVAL; 6150 } else { 6151 if (!nested_vmx_allowed(vcpu)) 6152 return -EINVAL; 6153 6154 if (!page_address_valid(vcpu, kvm_state->hdr.vmx.vmxon_pa)) 6155 return -EINVAL; 6156 } 6157 6158 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && 6159 (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) 6160 return -EINVAL; 6161 6162 if (kvm_state->hdr.vmx.smm.flags & 6163 ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON)) 6164 return -EINVAL; 6165 6166 if (kvm_state->hdr.vmx.flags & ~KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE) 6167 return -EINVAL; 6168 6169 /* 6170 * SMM temporarily disables VMX, so we cannot be in guest mode, 6171 * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags 6172 * must be zero. 6173 */ 6174 if (is_smm(vcpu) ? 6175 (kvm_state->flags & 6176 (KVM_STATE_NESTED_GUEST_MODE | KVM_STATE_NESTED_RUN_PENDING)) 6177 : kvm_state->hdr.vmx.smm.flags) 6178 return -EINVAL; 6179 6180 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && 6181 !(kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON)) 6182 return -EINVAL; 6183 6184 if ((kvm_state->flags & KVM_STATE_NESTED_EVMCS) && 6185 (!nested_vmx_allowed(vcpu) || !vmx->nested.enlightened_vmcs_enabled)) 6186 return -EINVAL; 6187 6188 vmx_leave_nested(vcpu); 6189 6190 if (kvm_state->hdr.vmx.vmxon_pa == -1ull) 6191 return 0; 6192 6193 vmx->nested.vmxon_ptr = kvm_state->hdr.vmx.vmxon_pa; 6194 ret = enter_vmx_operation(vcpu); 6195 if (ret) 6196 return ret; 6197 6198 /* Empty 'VMXON' state is permitted if no VMCS loaded */ 6199 if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12)) { 6200 /* See vmx_has_valid_vmcs12. */ 6201 if ((kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE) || 6202 (kvm_state->flags & KVM_STATE_NESTED_EVMCS) || 6203 (kvm_state->hdr.vmx.vmcs12_pa != -1ull)) 6204 return -EINVAL; 6205 else 6206 return 0; 6207 } 6208 6209 if (kvm_state->hdr.vmx.vmcs12_pa != -1ull) { 6210 if (kvm_state->hdr.vmx.vmcs12_pa == kvm_state->hdr.vmx.vmxon_pa || 6211 !page_address_valid(vcpu, kvm_state->hdr.vmx.vmcs12_pa)) 6212 return -EINVAL; 6213 6214 set_current_vmptr(vmx, kvm_state->hdr.vmx.vmcs12_pa); 6215 } else if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) { 6216 /* 6217 * nested_vmx_handle_enlightened_vmptrld() cannot be called 6218 * directly from here as HV_X64_MSR_VP_ASSIST_PAGE may not be 6219 * restored yet. EVMCS will be mapped from 6220 * nested_get_vmcs12_pages(). 6221 */ 6222 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); 6223 } else { 6224 return -EINVAL; 6225 } 6226 6227 if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) { 6228 vmx->nested.smm.vmxon = true; 6229 vmx->nested.vmxon = false; 6230 6231 if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) 6232 vmx->nested.smm.guest_mode = true; 6233 } 6234 6235 vmcs12 = get_vmcs12(vcpu); 6236 if (copy_from_user(vmcs12, user_vmx_nested_state->vmcs12, sizeof(*vmcs12))) 6237 return -EFAULT; 6238 6239 if (vmcs12->hdr.revision_id != VMCS12_REVISION) 6240 return -EINVAL; 6241 6242 if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) 6243 return 0; 6244 6245 vmx->nested.nested_run_pending = 6246 !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING); 6247 6248 vmx->nested.mtf_pending = 6249 !!(kvm_state->flags & KVM_STATE_NESTED_MTF_PENDING); 6250 6251 ret = -EINVAL; 6252 if (nested_cpu_has_shadow_vmcs(vmcs12) && 6253 vmcs12->vmcs_link_pointer != -1ull) { 6254 struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu); 6255 6256 if (kvm_state->size < 6257 sizeof(*kvm_state) + 6258 sizeof(user_vmx_nested_state->vmcs12) + sizeof(*shadow_vmcs12)) 6259 goto error_guest_mode; 6260 6261 if (copy_from_user(shadow_vmcs12, 6262 user_vmx_nested_state->shadow_vmcs12, 6263 sizeof(*shadow_vmcs12))) { 6264 ret = -EFAULT; 6265 goto error_guest_mode; 6266 } 6267 6268 if (shadow_vmcs12->hdr.revision_id != VMCS12_REVISION || 6269 !shadow_vmcs12->hdr.shadow_vmcs) 6270 goto error_guest_mode; 6271 } 6272 6273 vmx->nested.has_preemption_timer_deadline = false; 6274 if (kvm_state->hdr.vmx.flags & KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE) { 6275 vmx->nested.has_preemption_timer_deadline = true; 6276 vmx->nested.preemption_timer_deadline = 6277 kvm_state->hdr.vmx.preemption_timer_deadline; 6278 } 6279 6280 if (nested_vmx_check_controls(vcpu, vmcs12) || 6281 nested_vmx_check_host_state(vcpu, vmcs12) || 6282 nested_vmx_check_guest_state(vcpu, vmcs12, &ignored)) 6283 goto error_guest_mode; 6284 6285 vmx->nested.dirty_vmcs12 = true; 6286 ret = nested_vmx_enter_non_root_mode(vcpu, false); 6287 if (ret) 6288 goto error_guest_mode; 6289 6290 return 0; 6291 6292 error_guest_mode: 6293 vmx->nested.nested_run_pending = 0; 6294 return ret; 6295 } 6296 6297 void nested_vmx_set_vmcs_shadowing_bitmap(void) 6298 { 6299 if (enable_shadow_vmcs) { 6300 vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap)); 6301 vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap)); 6302 } 6303 } 6304 6305 /* 6306 * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be 6307 * returned for the various VMX controls MSRs when nested VMX is enabled. 6308 * The same values should also be used to verify that vmcs12 control fields are 6309 * valid during nested entry from L1 to L2. 6310 * Each of these control msrs has a low and high 32-bit half: A low bit is on 6311 * if the corresponding bit in the (32-bit) control field *must* be on, and a 6312 * bit in the high half is on if the corresponding bit in the control field 6313 * may be on. See also vmx_control_verify(). 6314 */ 6315 void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps) 6316 { 6317 /* 6318 * Note that as a general rule, the high half of the MSRs (bits in 6319 * the control fields which may be 1) should be initialized by the 6320 * intersection of the underlying hardware's MSR (i.e., features which 6321 * can be supported) and the list of features we want to expose - 6322 * because they are known to be properly supported in our code. 6323 * Also, usually, the low half of the MSRs (bits which must be 1) can 6324 * be set to 0, meaning that L1 may turn off any of these bits. The 6325 * reason is that if one of these bits is necessary, it will appear 6326 * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control 6327 * fields of vmcs01 and vmcs02, will turn these bits off - and 6328 * nested_vmx_l1_wants_exit() will not pass related exits to L1. 6329 * These rules have exceptions below. 6330 */ 6331 6332 /* pin-based controls */ 6333 rdmsr(MSR_IA32_VMX_PINBASED_CTLS, 6334 msrs->pinbased_ctls_low, 6335 msrs->pinbased_ctls_high); 6336 msrs->pinbased_ctls_low |= 6337 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR; 6338 msrs->pinbased_ctls_high &= 6339 PIN_BASED_EXT_INTR_MASK | 6340 PIN_BASED_NMI_EXITING | 6341 PIN_BASED_VIRTUAL_NMIS | 6342 (enable_apicv ? PIN_BASED_POSTED_INTR : 0); 6343 msrs->pinbased_ctls_high |= 6344 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR | 6345 PIN_BASED_VMX_PREEMPTION_TIMER; 6346 6347 /* exit controls */ 6348 rdmsr(MSR_IA32_VMX_EXIT_CTLS, 6349 msrs->exit_ctls_low, 6350 msrs->exit_ctls_high); 6351 msrs->exit_ctls_low = 6352 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR; 6353 6354 msrs->exit_ctls_high &= 6355 #ifdef CONFIG_X86_64 6356 VM_EXIT_HOST_ADDR_SPACE_SIZE | 6357 #endif 6358 VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT | 6359 VM_EXIT_CLEAR_BNDCFGS | VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL; 6360 msrs->exit_ctls_high |= 6361 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR | 6362 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER | 6363 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT; 6364 6365 /* We support free control of debug control saving. */ 6366 msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS; 6367 6368 /* entry controls */ 6369 rdmsr(MSR_IA32_VMX_ENTRY_CTLS, 6370 msrs->entry_ctls_low, 6371 msrs->entry_ctls_high); 6372 msrs->entry_ctls_low = 6373 VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR; 6374 msrs->entry_ctls_high &= 6375 #ifdef CONFIG_X86_64 6376 VM_ENTRY_IA32E_MODE | 6377 #endif 6378 VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_BNDCFGS | 6379 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL; 6380 msrs->entry_ctls_high |= 6381 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER); 6382 6383 /* We support free control of debug control loading. */ 6384 msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS; 6385 6386 /* cpu-based controls */ 6387 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, 6388 msrs->procbased_ctls_low, 6389 msrs->procbased_ctls_high); 6390 msrs->procbased_ctls_low = 6391 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR; 6392 msrs->procbased_ctls_high &= 6393 CPU_BASED_INTR_WINDOW_EXITING | 6394 CPU_BASED_NMI_WINDOW_EXITING | CPU_BASED_USE_TSC_OFFSETTING | 6395 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING | 6396 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING | 6397 CPU_BASED_CR3_STORE_EXITING | 6398 #ifdef CONFIG_X86_64 6399 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING | 6400 #endif 6401 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING | 6402 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG | 6403 CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING | 6404 CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING | 6405 CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; 6406 /* 6407 * We can allow some features even when not supported by the 6408 * hardware. For example, L1 can specify an MSR bitmap - and we 6409 * can use it to avoid exits to L1 - even when L0 runs L2 6410 * without MSR bitmaps. 6411 */ 6412 msrs->procbased_ctls_high |= 6413 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR | 6414 CPU_BASED_USE_MSR_BITMAPS; 6415 6416 /* We support free control of CR3 access interception. */ 6417 msrs->procbased_ctls_low &= 6418 ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING); 6419 6420 /* 6421 * secondary cpu-based controls. Do not include those that 6422 * depend on CPUID bits, they are added later by 6423 * vmx_vcpu_after_set_cpuid. 6424 */ 6425 if (msrs->procbased_ctls_high & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) 6426 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, 6427 msrs->secondary_ctls_low, 6428 msrs->secondary_ctls_high); 6429 6430 msrs->secondary_ctls_low = 0; 6431 msrs->secondary_ctls_high &= 6432 SECONDARY_EXEC_DESC | 6433 SECONDARY_EXEC_ENABLE_RDTSCP | 6434 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 6435 SECONDARY_EXEC_WBINVD_EXITING | 6436 SECONDARY_EXEC_APIC_REGISTER_VIRT | 6437 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 6438 SECONDARY_EXEC_RDRAND_EXITING | 6439 SECONDARY_EXEC_ENABLE_INVPCID | 6440 SECONDARY_EXEC_RDSEED_EXITING | 6441 SECONDARY_EXEC_XSAVES; 6442 6443 /* 6444 * We can emulate "VMCS shadowing," even if the hardware 6445 * doesn't support it. 6446 */ 6447 msrs->secondary_ctls_high |= 6448 SECONDARY_EXEC_SHADOW_VMCS; 6449 6450 if (enable_ept) { 6451 /* nested EPT: emulate EPT also to L1 */ 6452 msrs->secondary_ctls_high |= 6453 SECONDARY_EXEC_ENABLE_EPT; 6454 msrs->ept_caps = 6455 VMX_EPT_PAGE_WALK_4_BIT | 6456 VMX_EPT_PAGE_WALK_5_BIT | 6457 VMX_EPTP_WB_BIT | 6458 VMX_EPT_INVEPT_BIT | 6459 VMX_EPT_EXECUTE_ONLY_BIT; 6460 6461 msrs->ept_caps &= ept_caps; 6462 msrs->ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT | 6463 VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT | 6464 VMX_EPT_1GB_PAGE_BIT; 6465 if (enable_ept_ad_bits) { 6466 msrs->secondary_ctls_high |= 6467 SECONDARY_EXEC_ENABLE_PML; 6468 msrs->ept_caps |= VMX_EPT_AD_BIT; 6469 } 6470 } 6471 6472 if (cpu_has_vmx_vmfunc()) { 6473 msrs->secondary_ctls_high |= 6474 SECONDARY_EXEC_ENABLE_VMFUNC; 6475 /* 6476 * Advertise EPTP switching unconditionally 6477 * since we emulate it 6478 */ 6479 if (enable_ept) 6480 msrs->vmfunc_controls = 6481 VMX_VMFUNC_EPTP_SWITCHING; 6482 } 6483 6484 /* 6485 * Old versions of KVM use the single-context version without 6486 * checking for support, so declare that it is supported even 6487 * though it is treated as global context. The alternative is 6488 * not failing the single-context invvpid, and it is worse. 6489 */ 6490 if (enable_vpid) { 6491 msrs->secondary_ctls_high |= 6492 SECONDARY_EXEC_ENABLE_VPID; 6493 msrs->vpid_caps = VMX_VPID_INVVPID_BIT | 6494 VMX_VPID_EXTENT_SUPPORTED_MASK; 6495 } 6496 6497 if (enable_unrestricted_guest) 6498 msrs->secondary_ctls_high |= 6499 SECONDARY_EXEC_UNRESTRICTED_GUEST; 6500 6501 if (flexpriority_enabled) 6502 msrs->secondary_ctls_high |= 6503 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; 6504 6505 /* miscellaneous data */ 6506 rdmsr(MSR_IA32_VMX_MISC, 6507 msrs->misc_low, 6508 msrs->misc_high); 6509 msrs->misc_low &= VMX_MISC_SAVE_EFER_LMA; 6510 msrs->misc_low |= 6511 MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS | 6512 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE | 6513 VMX_MISC_ACTIVITY_HLT | 6514 VMX_MISC_ACTIVITY_WAIT_SIPI; 6515 msrs->misc_high = 0; 6516 6517 /* 6518 * This MSR reports some information about VMX support. We 6519 * should return information about the VMX we emulate for the 6520 * guest, and the VMCS structure we give it - not about the 6521 * VMX support of the underlying hardware. 6522 */ 6523 msrs->basic = 6524 VMCS12_REVISION | 6525 VMX_BASIC_TRUE_CTLS | 6526 ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) | 6527 (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT); 6528 6529 if (cpu_has_vmx_basic_inout()) 6530 msrs->basic |= VMX_BASIC_INOUT; 6531 6532 /* 6533 * These MSRs specify bits which the guest must keep fixed on 6534 * while L1 is in VMXON mode (in L1's root mode, or running an L2). 6535 * We picked the standard core2 setting. 6536 */ 6537 #define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE) 6538 #define VMXON_CR4_ALWAYSON X86_CR4_VMXE 6539 msrs->cr0_fixed0 = VMXON_CR0_ALWAYSON; 6540 msrs->cr4_fixed0 = VMXON_CR4_ALWAYSON; 6541 6542 /* These MSRs specify bits which the guest must keep fixed off. */ 6543 rdmsrl(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1); 6544 rdmsrl(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1); 6545 6546 /* highest index: VMX_PREEMPTION_TIMER_VALUE */ 6547 msrs->vmcs_enum = VMCS12_MAX_FIELD_INDEX << 1; 6548 } 6549 6550 void nested_vmx_hardware_unsetup(void) 6551 { 6552 int i; 6553 6554 if (enable_shadow_vmcs) { 6555 for (i = 0; i < VMX_BITMAP_NR; i++) 6556 free_page((unsigned long)vmx_bitmap[i]); 6557 } 6558 } 6559 6560 __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *)) 6561 { 6562 int i; 6563 6564 if (!cpu_has_vmx_shadow_vmcs()) 6565 enable_shadow_vmcs = 0; 6566 if (enable_shadow_vmcs) { 6567 for (i = 0; i < VMX_BITMAP_NR; i++) { 6568 /* 6569 * The vmx_bitmap is not tied to a VM and so should 6570 * not be charged to a memcg. 6571 */ 6572 vmx_bitmap[i] = (unsigned long *) 6573 __get_free_page(GFP_KERNEL); 6574 if (!vmx_bitmap[i]) { 6575 nested_vmx_hardware_unsetup(); 6576 return -ENOMEM; 6577 } 6578 } 6579 6580 init_vmcs_shadow_fields(); 6581 } 6582 6583 exit_handlers[EXIT_REASON_VMCLEAR] = handle_vmclear; 6584 exit_handlers[EXIT_REASON_VMLAUNCH] = handle_vmlaunch; 6585 exit_handlers[EXIT_REASON_VMPTRLD] = handle_vmptrld; 6586 exit_handlers[EXIT_REASON_VMPTRST] = handle_vmptrst; 6587 exit_handlers[EXIT_REASON_VMREAD] = handle_vmread; 6588 exit_handlers[EXIT_REASON_VMRESUME] = handle_vmresume; 6589 exit_handlers[EXIT_REASON_VMWRITE] = handle_vmwrite; 6590 exit_handlers[EXIT_REASON_VMOFF] = handle_vmoff; 6591 exit_handlers[EXIT_REASON_VMON] = handle_vmon; 6592 exit_handlers[EXIT_REASON_INVEPT] = handle_invept; 6593 exit_handlers[EXIT_REASON_INVVPID] = handle_invvpid; 6594 exit_handlers[EXIT_REASON_VMFUNC] = handle_vmfunc; 6595 6596 return 0; 6597 } 6598 6599 struct kvm_x86_nested_ops vmx_nested_ops = { 6600 .check_events = vmx_check_nested_events, 6601 .hv_timer_pending = nested_vmx_preemption_timer_pending, 6602 .get_state = vmx_get_nested_state, 6603 .set_state = vmx_set_nested_state, 6604 .get_nested_state_pages = vmx_get_nested_state_pages, 6605 .write_log_dirty = nested_vmx_write_pml_buffer, 6606 .enable_evmcs = nested_enable_evmcs, 6607 .get_evmcs_version = nested_get_evmcs_version, 6608 }; 6609