1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/frame.h> 4 #include <linux/percpu.h> 5 6 #include <asm/debugreg.h> 7 #include <asm/mmu_context.h> 8 9 #include "cpuid.h" 10 #include "hyperv.h" 11 #include "mmu.h" 12 #include "nested.h" 13 #include "trace.h" 14 #include "x86.h" 15 16 static bool __read_mostly enable_shadow_vmcs = 1; 17 module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO); 18 19 static bool __read_mostly nested_early_check = 0; 20 module_param(nested_early_check, bool, S_IRUGO); 21 22 /* 23 * Hyper-V requires all of these, so mark them as supported even though 24 * they are just treated the same as all-context. 25 */ 26 #define VMX_VPID_EXTENT_SUPPORTED_MASK \ 27 (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \ 28 VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \ 29 VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \ 30 VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT) 31 32 #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5 33 34 enum { 35 VMX_VMREAD_BITMAP, 36 VMX_VMWRITE_BITMAP, 37 VMX_BITMAP_NR 38 }; 39 static unsigned long *vmx_bitmap[VMX_BITMAP_NR]; 40 41 #define vmx_vmread_bitmap (vmx_bitmap[VMX_VMREAD_BITMAP]) 42 #define vmx_vmwrite_bitmap (vmx_bitmap[VMX_VMWRITE_BITMAP]) 43 44 static u16 shadow_read_only_fields[] = { 45 #define SHADOW_FIELD_RO(x) x, 46 #include "vmcs_shadow_fields.h" 47 }; 48 static int max_shadow_read_only_fields = 49 ARRAY_SIZE(shadow_read_only_fields); 50 51 static u16 shadow_read_write_fields[] = { 52 #define SHADOW_FIELD_RW(x) x, 53 #include "vmcs_shadow_fields.h" 54 }; 55 static int max_shadow_read_write_fields = 56 ARRAY_SIZE(shadow_read_write_fields); 57 58 static void init_vmcs_shadow_fields(void) 59 { 60 int i, j; 61 62 memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE); 63 memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE); 64 65 for (i = j = 0; i < max_shadow_read_only_fields; i++) { 66 u16 field = shadow_read_only_fields[i]; 67 68 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 && 69 (i + 1 == max_shadow_read_only_fields || 70 shadow_read_only_fields[i + 1] != field + 1)) 71 pr_err("Missing field from shadow_read_only_field %x\n", 72 field + 1); 73 74 clear_bit(field, vmx_vmread_bitmap); 75 #ifdef CONFIG_X86_64 76 if (field & 1) 77 continue; 78 #endif 79 if (j < i) 80 shadow_read_only_fields[j] = field; 81 j++; 82 } 83 max_shadow_read_only_fields = j; 84 85 for (i = j = 0; i < max_shadow_read_write_fields; i++) { 86 u16 field = shadow_read_write_fields[i]; 87 88 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 && 89 (i + 1 == max_shadow_read_write_fields || 90 shadow_read_write_fields[i + 1] != field + 1)) 91 pr_err("Missing field from shadow_read_write_field %x\n", 92 field + 1); 93 94 /* 95 * PML and the preemption timer can be emulated, but the 96 * processor cannot vmwrite to fields that don't exist 97 * on bare metal. 98 */ 99 switch (field) { 100 case GUEST_PML_INDEX: 101 if (!cpu_has_vmx_pml()) 102 continue; 103 break; 104 case VMX_PREEMPTION_TIMER_VALUE: 105 if (!cpu_has_vmx_preemption_timer()) 106 continue; 107 break; 108 case GUEST_INTR_STATUS: 109 if (!cpu_has_vmx_apicv()) 110 continue; 111 break; 112 default: 113 break; 114 } 115 116 clear_bit(field, vmx_vmwrite_bitmap); 117 clear_bit(field, vmx_vmread_bitmap); 118 #ifdef CONFIG_X86_64 119 if (field & 1) 120 continue; 121 #endif 122 if (j < i) 123 shadow_read_write_fields[j] = field; 124 j++; 125 } 126 max_shadow_read_write_fields = j; 127 } 128 129 /* 130 * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(), 131 * set the success or error code of an emulated VMX instruction (as specified 132 * by Vol 2B, VMX Instruction Reference, "Conventions"), and skip the emulated 133 * instruction. 134 */ 135 static int nested_vmx_succeed(struct kvm_vcpu *vcpu) 136 { 137 vmx_set_rflags(vcpu, vmx_get_rflags(vcpu) 138 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | 139 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF)); 140 return kvm_skip_emulated_instruction(vcpu); 141 } 142 143 static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu) 144 { 145 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) 146 & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF | 147 X86_EFLAGS_SF | X86_EFLAGS_OF)) 148 | X86_EFLAGS_CF); 149 return kvm_skip_emulated_instruction(vcpu); 150 } 151 152 static int nested_vmx_failValid(struct kvm_vcpu *vcpu, 153 u32 vm_instruction_error) 154 { 155 struct vcpu_vmx *vmx = to_vmx(vcpu); 156 157 /* 158 * failValid writes the error number to the current VMCS, which 159 * can't be done if there isn't a current VMCS. 160 */ 161 if (vmx->nested.current_vmptr == -1ull && !vmx->nested.hv_evmcs) 162 return nested_vmx_failInvalid(vcpu); 163 164 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) 165 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | 166 X86_EFLAGS_SF | X86_EFLAGS_OF)) 167 | X86_EFLAGS_ZF); 168 get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error; 169 /* 170 * We don't need to force a shadow sync because 171 * VM_INSTRUCTION_ERROR is not shadowed 172 */ 173 return kvm_skip_emulated_instruction(vcpu); 174 } 175 176 static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator) 177 { 178 /* TODO: not to reset guest simply here. */ 179 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 180 pr_debug_ratelimited("kvm: nested vmx abort, indicator %d\n", indicator); 181 } 182 183 static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx) 184 { 185 vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_SHADOW_VMCS); 186 vmcs_write64(VMCS_LINK_POINTER, -1ull); 187 } 188 189 static inline void nested_release_evmcs(struct kvm_vcpu *vcpu) 190 { 191 struct vcpu_vmx *vmx = to_vmx(vcpu); 192 193 if (!vmx->nested.hv_evmcs) 194 return; 195 196 kunmap(vmx->nested.hv_evmcs_page); 197 kvm_release_page_dirty(vmx->nested.hv_evmcs_page); 198 vmx->nested.hv_evmcs_vmptr = -1ull; 199 vmx->nested.hv_evmcs_page = NULL; 200 vmx->nested.hv_evmcs = NULL; 201 } 202 203 /* 204 * Free whatever needs to be freed from vmx->nested when L1 goes down, or 205 * just stops using VMX. 206 */ 207 static void free_nested(struct kvm_vcpu *vcpu) 208 { 209 struct vcpu_vmx *vmx = to_vmx(vcpu); 210 211 if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon) 212 return; 213 214 vmx->nested.vmxon = false; 215 vmx->nested.smm.vmxon = false; 216 free_vpid(vmx->nested.vpid02); 217 vmx->nested.posted_intr_nv = -1; 218 vmx->nested.current_vmptr = -1ull; 219 if (enable_shadow_vmcs) { 220 vmx_disable_shadow_vmcs(vmx); 221 vmcs_clear(vmx->vmcs01.shadow_vmcs); 222 free_vmcs(vmx->vmcs01.shadow_vmcs); 223 vmx->vmcs01.shadow_vmcs = NULL; 224 } 225 kfree(vmx->nested.cached_vmcs12); 226 kfree(vmx->nested.cached_shadow_vmcs12); 227 /* Unpin physical memory we referred to in the vmcs02 */ 228 if (vmx->nested.apic_access_page) { 229 kvm_release_page_dirty(vmx->nested.apic_access_page); 230 vmx->nested.apic_access_page = NULL; 231 } 232 if (vmx->nested.virtual_apic_page) { 233 kvm_release_page_dirty(vmx->nested.virtual_apic_page); 234 vmx->nested.virtual_apic_page = NULL; 235 } 236 if (vmx->nested.pi_desc_page) { 237 kunmap(vmx->nested.pi_desc_page); 238 kvm_release_page_dirty(vmx->nested.pi_desc_page); 239 vmx->nested.pi_desc_page = NULL; 240 vmx->nested.pi_desc = NULL; 241 } 242 243 kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); 244 245 nested_release_evmcs(vcpu); 246 247 free_loaded_vmcs(&vmx->nested.vmcs02); 248 } 249 250 static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs) 251 { 252 struct vcpu_vmx *vmx = to_vmx(vcpu); 253 int cpu; 254 255 if (vmx->loaded_vmcs == vmcs) 256 return; 257 258 cpu = get_cpu(); 259 vmx_vcpu_put(vcpu); 260 vmx->loaded_vmcs = vmcs; 261 vmx_vcpu_load(vcpu, cpu); 262 put_cpu(); 263 264 vm_entry_controls_reset_shadow(vmx); 265 vm_exit_controls_reset_shadow(vmx); 266 vmx_segment_cache_clear(vmx); 267 } 268 269 /* 270 * Ensure that the current vmcs of the logical processor is the 271 * vmcs01 of the vcpu before calling free_nested(). 272 */ 273 void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu) 274 { 275 vcpu_load(vcpu); 276 vmx_leave_nested(vcpu); 277 vmx_switch_vmcs(vcpu, &to_vmx(vcpu)->vmcs01); 278 free_nested(vcpu); 279 vcpu_put(vcpu); 280 } 281 282 static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu, 283 struct x86_exception *fault) 284 { 285 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 286 struct vcpu_vmx *vmx = to_vmx(vcpu); 287 u32 exit_reason; 288 unsigned long exit_qualification = vcpu->arch.exit_qualification; 289 290 if (vmx->nested.pml_full) { 291 exit_reason = EXIT_REASON_PML_FULL; 292 vmx->nested.pml_full = false; 293 exit_qualification &= INTR_INFO_UNBLOCK_NMI; 294 } else if (fault->error_code & PFERR_RSVD_MASK) 295 exit_reason = EXIT_REASON_EPT_MISCONFIG; 296 else 297 exit_reason = EXIT_REASON_EPT_VIOLATION; 298 299 nested_vmx_vmexit(vcpu, exit_reason, 0, exit_qualification); 300 vmcs12->guest_physical_address = fault->address; 301 } 302 303 static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) 304 { 305 WARN_ON(mmu_is_nested(vcpu)); 306 307 vcpu->arch.mmu = &vcpu->arch.guest_mmu; 308 kvm_init_shadow_ept_mmu(vcpu, 309 to_vmx(vcpu)->nested.msrs.ept_caps & 310 VMX_EPT_EXECUTE_ONLY_BIT, 311 nested_ept_ad_enabled(vcpu), 312 nested_ept_get_cr3(vcpu)); 313 vcpu->arch.mmu->set_cr3 = vmx_set_cr3; 314 vcpu->arch.mmu->get_cr3 = nested_ept_get_cr3; 315 vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault; 316 vcpu->arch.mmu->get_pdptr = kvm_pdptr_read; 317 318 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; 319 } 320 321 static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu) 322 { 323 vcpu->arch.mmu = &vcpu->arch.root_mmu; 324 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; 325 } 326 327 static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12, 328 u16 error_code) 329 { 330 bool inequality, bit; 331 332 bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0; 333 inequality = 334 (error_code & vmcs12->page_fault_error_code_mask) != 335 vmcs12->page_fault_error_code_match; 336 return inequality ^ bit; 337 } 338 339 340 /* 341 * KVM wants to inject page-faults which it got to the guest. This function 342 * checks whether in a nested guest, we need to inject them to L1 or L2. 343 */ 344 static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit_qual) 345 { 346 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 347 unsigned int nr = vcpu->arch.exception.nr; 348 bool has_payload = vcpu->arch.exception.has_payload; 349 unsigned long payload = vcpu->arch.exception.payload; 350 351 if (nr == PF_VECTOR) { 352 if (vcpu->arch.exception.nested_apf) { 353 *exit_qual = vcpu->arch.apf.nested_apf_token; 354 return 1; 355 } 356 if (nested_vmx_is_page_fault_vmexit(vmcs12, 357 vcpu->arch.exception.error_code)) { 358 *exit_qual = has_payload ? payload : vcpu->arch.cr2; 359 return 1; 360 } 361 } else if (vmcs12->exception_bitmap & (1u << nr)) { 362 if (nr == DB_VECTOR) { 363 if (!has_payload) { 364 payload = vcpu->arch.dr6; 365 payload &= ~(DR6_FIXED_1 | DR6_BT); 366 payload ^= DR6_RTM; 367 } 368 *exit_qual = payload; 369 } else 370 *exit_qual = 0; 371 return 1; 372 } 373 374 return 0; 375 } 376 377 378 static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu, 379 struct x86_exception *fault) 380 { 381 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 382 383 WARN_ON(!is_guest_mode(vcpu)); 384 385 if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code) && 386 !to_vmx(vcpu)->nested.nested_run_pending) { 387 vmcs12->vm_exit_intr_error_code = fault->error_code; 388 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, 389 PF_VECTOR | INTR_TYPE_HARD_EXCEPTION | 390 INTR_INFO_DELIVER_CODE_MASK | INTR_INFO_VALID_MASK, 391 fault->address); 392 } else { 393 kvm_inject_page_fault(vcpu, fault); 394 } 395 } 396 397 static bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa) 398 { 399 return PAGE_ALIGNED(gpa) && !(gpa >> cpuid_maxphyaddr(vcpu)); 400 } 401 402 static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu, 403 struct vmcs12 *vmcs12) 404 { 405 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) 406 return 0; 407 408 if (!page_address_valid(vcpu, vmcs12->io_bitmap_a) || 409 !page_address_valid(vcpu, vmcs12->io_bitmap_b)) 410 return -EINVAL; 411 412 return 0; 413 } 414 415 static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu, 416 struct vmcs12 *vmcs12) 417 { 418 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) 419 return 0; 420 421 if (!page_address_valid(vcpu, vmcs12->msr_bitmap)) 422 return -EINVAL; 423 424 return 0; 425 } 426 427 static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu, 428 struct vmcs12 *vmcs12) 429 { 430 if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) 431 return 0; 432 433 if (!page_address_valid(vcpu, vmcs12->virtual_apic_page_addr)) 434 return -EINVAL; 435 436 return 0; 437 } 438 439 /* 440 * Check if MSR is intercepted for L01 MSR bitmap. 441 */ 442 static bool msr_write_intercepted_l01(struct kvm_vcpu *vcpu, u32 msr) 443 { 444 unsigned long *msr_bitmap; 445 int f = sizeof(unsigned long); 446 447 if (!cpu_has_vmx_msr_bitmap()) 448 return true; 449 450 msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap; 451 452 if (msr <= 0x1fff) { 453 return !!test_bit(msr, msr_bitmap + 0x800 / f); 454 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { 455 msr &= 0x1fff; 456 return !!test_bit(msr, msr_bitmap + 0xc00 / f); 457 } 458 459 return true; 460 } 461 462 /* 463 * If a msr is allowed by L0, we should check whether it is allowed by L1. 464 * The corresponding bit will be cleared unless both of L0 and L1 allow it. 465 */ 466 static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1, 467 unsigned long *msr_bitmap_nested, 468 u32 msr, int type) 469 { 470 int f = sizeof(unsigned long); 471 472 /* 473 * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals 474 * have the write-low and read-high bitmap offsets the wrong way round. 475 * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. 476 */ 477 if (msr <= 0x1fff) { 478 if (type & MSR_TYPE_R && 479 !test_bit(msr, msr_bitmap_l1 + 0x000 / f)) 480 /* read-low */ 481 __clear_bit(msr, msr_bitmap_nested + 0x000 / f); 482 483 if (type & MSR_TYPE_W && 484 !test_bit(msr, msr_bitmap_l1 + 0x800 / f)) 485 /* write-low */ 486 __clear_bit(msr, msr_bitmap_nested + 0x800 / f); 487 488 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { 489 msr &= 0x1fff; 490 if (type & MSR_TYPE_R && 491 !test_bit(msr, msr_bitmap_l1 + 0x400 / f)) 492 /* read-high */ 493 __clear_bit(msr, msr_bitmap_nested + 0x400 / f); 494 495 if (type & MSR_TYPE_W && 496 !test_bit(msr, msr_bitmap_l1 + 0xc00 / f)) 497 /* write-high */ 498 __clear_bit(msr, msr_bitmap_nested + 0xc00 / f); 499 500 } 501 } 502 503 static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap) { 504 int msr; 505 506 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { 507 unsigned word = msr / BITS_PER_LONG; 508 509 msr_bitmap[word] = ~0; 510 msr_bitmap[word + (0x800 / sizeof(long))] = ~0; 511 } 512 } 513 514 /* 515 * Merge L0's and L1's MSR bitmap, return false to indicate that 516 * we do not use the hardware. 517 */ 518 static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu, 519 struct vmcs12 *vmcs12) 520 { 521 int msr; 522 struct page *page; 523 unsigned long *msr_bitmap_l1; 524 unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap; 525 /* 526 * pred_cmd & spec_ctrl are trying to verify two things: 527 * 528 * 1. L0 gave a permission to L1 to actually passthrough the MSR. This 529 * ensures that we do not accidentally generate an L02 MSR bitmap 530 * from the L12 MSR bitmap that is too permissive. 531 * 2. That L1 or L2s have actually used the MSR. This avoids 532 * unnecessarily merging of the bitmap if the MSR is unused. This 533 * works properly because we only update the L01 MSR bitmap lazily. 534 * So even if L0 should pass L1 these MSRs, the L01 bitmap is only 535 * updated to reflect this when L1 (or its L2s) actually write to 536 * the MSR. 537 */ 538 bool pred_cmd = !msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD); 539 bool spec_ctrl = !msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL); 540 541 /* Nothing to do if the MSR bitmap is not in use. */ 542 if (!cpu_has_vmx_msr_bitmap() || 543 !nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) 544 return false; 545 546 if (!nested_cpu_has_virt_x2apic_mode(vmcs12) && 547 !pred_cmd && !spec_ctrl) 548 return false; 549 550 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->msr_bitmap); 551 if (is_error_page(page)) 552 return false; 553 554 msr_bitmap_l1 = (unsigned long *)kmap(page); 555 556 /* 557 * To keep the control flow simple, pay eight 8-byte writes (sixteen 558 * 4-byte writes on 32-bit systems) up front to enable intercepts for 559 * the x2APIC MSR range and selectively disable them below. 560 */ 561 enable_x2apic_msr_intercepts(msr_bitmap_l0); 562 563 if (nested_cpu_has_virt_x2apic_mode(vmcs12)) { 564 if (nested_cpu_has_apic_reg_virt(vmcs12)) { 565 /* 566 * L0 need not intercept reads for MSRs between 0x800 567 * and 0x8ff, it just lets the processor take the value 568 * from the virtual-APIC page; take those 256 bits 569 * directly from the L1 bitmap. 570 */ 571 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { 572 unsigned word = msr / BITS_PER_LONG; 573 574 msr_bitmap_l0[word] = msr_bitmap_l1[word]; 575 } 576 } 577 578 nested_vmx_disable_intercept_for_msr( 579 msr_bitmap_l1, msr_bitmap_l0, 580 X2APIC_MSR(APIC_TASKPRI), 581 MSR_TYPE_R | MSR_TYPE_W); 582 583 if (nested_cpu_has_vid(vmcs12)) { 584 nested_vmx_disable_intercept_for_msr( 585 msr_bitmap_l1, msr_bitmap_l0, 586 X2APIC_MSR(APIC_EOI), 587 MSR_TYPE_W); 588 nested_vmx_disable_intercept_for_msr( 589 msr_bitmap_l1, msr_bitmap_l0, 590 X2APIC_MSR(APIC_SELF_IPI), 591 MSR_TYPE_W); 592 } 593 } 594 595 if (spec_ctrl) 596 nested_vmx_disable_intercept_for_msr( 597 msr_bitmap_l1, msr_bitmap_l0, 598 MSR_IA32_SPEC_CTRL, 599 MSR_TYPE_R | MSR_TYPE_W); 600 601 if (pred_cmd) 602 nested_vmx_disable_intercept_for_msr( 603 msr_bitmap_l1, msr_bitmap_l0, 604 MSR_IA32_PRED_CMD, 605 MSR_TYPE_W); 606 607 kunmap(page); 608 kvm_release_page_clean(page); 609 610 return true; 611 } 612 613 static void nested_cache_shadow_vmcs12(struct kvm_vcpu *vcpu, 614 struct vmcs12 *vmcs12) 615 { 616 struct vmcs12 *shadow; 617 struct page *page; 618 619 if (!nested_cpu_has_shadow_vmcs(vmcs12) || 620 vmcs12->vmcs_link_pointer == -1ull) 621 return; 622 623 shadow = get_shadow_vmcs12(vcpu); 624 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->vmcs_link_pointer); 625 626 memcpy(shadow, kmap(page), VMCS12_SIZE); 627 628 kunmap(page); 629 kvm_release_page_clean(page); 630 } 631 632 static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu *vcpu, 633 struct vmcs12 *vmcs12) 634 { 635 struct vcpu_vmx *vmx = to_vmx(vcpu); 636 637 if (!nested_cpu_has_shadow_vmcs(vmcs12) || 638 vmcs12->vmcs_link_pointer == -1ull) 639 return; 640 641 kvm_write_guest(vmx->vcpu.kvm, vmcs12->vmcs_link_pointer, 642 get_shadow_vmcs12(vcpu), VMCS12_SIZE); 643 } 644 645 /* 646 * In nested virtualization, check if L1 has set 647 * VM_EXIT_ACK_INTR_ON_EXIT 648 */ 649 static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu) 650 { 651 return get_vmcs12(vcpu)->vm_exit_controls & 652 VM_EXIT_ACK_INTR_ON_EXIT; 653 } 654 655 static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu) 656 { 657 return nested_cpu_has_nmi_exiting(get_vmcs12(vcpu)); 658 } 659 660 static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu, 661 struct vmcs12 *vmcs12) 662 { 663 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) && 664 !page_address_valid(vcpu, vmcs12->apic_access_addr)) 665 return -EINVAL; 666 else 667 return 0; 668 } 669 670 static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, 671 struct vmcs12 *vmcs12) 672 { 673 if (!nested_cpu_has_virt_x2apic_mode(vmcs12) && 674 !nested_cpu_has_apic_reg_virt(vmcs12) && 675 !nested_cpu_has_vid(vmcs12) && 676 !nested_cpu_has_posted_intr(vmcs12)) 677 return 0; 678 679 /* 680 * If virtualize x2apic mode is enabled, 681 * virtualize apic access must be disabled. 682 */ 683 if (nested_cpu_has_virt_x2apic_mode(vmcs12) && 684 nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) 685 return -EINVAL; 686 687 /* 688 * If virtual interrupt delivery is enabled, 689 * we must exit on external interrupts. 690 */ 691 if (nested_cpu_has_vid(vmcs12) && 692 !nested_exit_on_intr(vcpu)) 693 return -EINVAL; 694 695 /* 696 * bits 15:8 should be zero in posted_intr_nv, 697 * the descriptor address has been already checked 698 * in nested_get_vmcs12_pages. 699 * 700 * bits 5:0 of posted_intr_desc_addr should be zero. 701 */ 702 if (nested_cpu_has_posted_intr(vmcs12) && 703 (!nested_cpu_has_vid(vmcs12) || 704 !nested_exit_intr_ack_set(vcpu) || 705 (vmcs12->posted_intr_nv & 0xff00) || 706 (vmcs12->posted_intr_desc_addr & 0x3f) || 707 (vmcs12->posted_intr_desc_addr >> cpuid_maxphyaddr(vcpu)))) 708 return -EINVAL; 709 710 /* tpr shadow is needed by all apicv features. */ 711 if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) 712 return -EINVAL; 713 714 return 0; 715 } 716 717 static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu, 718 u32 count, u64 addr) 719 { 720 int maxphyaddr; 721 722 if (count == 0) 723 return 0; 724 maxphyaddr = cpuid_maxphyaddr(vcpu); 725 if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr || 726 (addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr) 727 return -EINVAL; 728 729 return 0; 730 } 731 732 static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu *vcpu, 733 struct vmcs12 *vmcs12) 734 { 735 if (nested_vmx_check_msr_switch(vcpu, vmcs12->vm_exit_msr_load_count, 736 vmcs12->vm_exit_msr_load_addr) || 737 nested_vmx_check_msr_switch(vcpu, vmcs12->vm_exit_msr_store_count, 738 vmcs12->vm_exit_msr_store_addr)) 739 return -EINVAL; 740 741 return 0; 742 } 743 744 static int nested_vmx_check_entry_msr_switch_controls(struct kvm_vcpu *vcpu, 745 struct vmcs12 *vmcs12) 746 { 747 if (nested_vmx_check_msr_switch(vcpu, vmcs12->vm_entry_msr_load_count, 748 vmcs12->vm_entry_msr_load_addr)) 749 return -EINVAL; 750 751 return 0; 752 } 753 754 static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu, 755 struct vmcs12 *vmcs12) 756 { 757 if (!nested_cpu_has_pml(vmcs12)) 758 return 0; 759 760 if (!nested_cpu_has_ept(vmcs12) || 761 !page_address_valid(vcpu, vmcs12->pml_address)) 762 return -EINVAL; 763 764 return 0; 765 } 766 767 static int nested_vmx_check_unrestricted_guest_controls(struct kvm_vcpu *vcpu, 768 struct vmcs12 *vmcs12) 769 { 770 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST) && 771 !nested_cpu_has_ept(vmcs12)) 772 return -EINVAL; 773 return 0; 774 } 775 776 static int nested_vmx_check_mode_based_ept_exec_controls(struct kvm_vcpu *vcpu, 777 struct vmcs12 *vmcs12) 778 { 779 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_MODE_BASED_EPT_EXEC) && 780 !nested_cpu_has_ept(vmcs12)) 781 return -EINVAL; 782 return 0; 783 } 784 785 static int nested_vmx_check_shadow_vmcs_controls(struct kvm_vcpu *vcpu, 786 struct vmcs12 *vmcs12) 787 { 788 if (!nested_cpu_has_shadow_vmcs(vmcs12)) 789 return 0; 790 791 if (!page_address_valid(vcpu, vmcs12->vmread_bitmap) || 792 !page_address_valid(vcpu, vmcs12->vmwrite_bitmap)) 793 return -EINVAL; 794 795 return 0; 796 } 797 798 static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu, 799 struct vmx_msr_entry *e) 800 { 801 /* x2APIC MSR accesses are not allowed */ 802 if (vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8) 803 return -EINVAL; 804 if (e->index == MSR_IA32_UCODE_WRITE || /* SDM Table 35-2 */ 805 e->index == MSR_IA32_UCODE_REV) 806 return -EINVAL; 807 if (e->reserved != 0) 808 return -EINVAL; 809 return 0; 810 } 811 812 static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu, 813 struct vmx_msr_entry *e) 814 { 815 if (e->index == MSR_FS_BASE || 816 e->index == MSR_GS_BASE || 817 e->index == MSR_IA32_SMM_MONITOR_CTL || /* SMM is not supported */ 818 nested_vmx_msr_check_common(vcpu, e)) 819 return -EINVAL; 820 return 0; 821 } 822 823 static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu, 824 struct vmx_msr_entry *e) 825 { 826 if (e->index == MSR_IA32_SMBASE || /* SMM is not supported */ 827 nested_vmx_msr_check_common(vcpu, e)) 828 return -EINVAL; 829 return 0; 830 } 831 832 /* 833 * Load guest's/host's msr at nested entry/exit. 834 * return 0 for success, entry index for failure. 835 */ 836 static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) 837 { 838 u32 i; 839 struct vmx_msr_entry e; 840 struct msr_data msr; 841 842 msr.host_initiated = false; 843 for (i = 0; i < count; i++) { 844 if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e), 845 &e, sizeof(e))) { 846 pr_debug_ratelimited( 847 "%s cannot read MSR entry (%u, 0x%08llx)\n", 848 __func__, i, gpa + i * sizeof(e)); 849 goto fail; 850 } 851 if (nested_vmx_load_msr_check(vcpu, &e)) { 852 pr_debug_ratelimited( 853 "%s check failed (%u, 0x%x, 0x%x)\n", 854 __func__, i, e.index, e.reserved); 855 goto fail; 856 } 857 msr.index = e.index; 858 msr.data = e.value; 859 if (kvm_set_msr(vcpu, &msr)) { 860 pr_debug_ratelimited( 861 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n", 862 __func__, i, e.index, e.value); 863 goto fail; 864 } 865 } 866 return 0; 867 fail: 868 return i + 1; 869 } 870 871 static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) 872 { 873 u32 i; 874 struct vmx_msr_entry e; 875 876 for (i = 0; i < count; i++) { 877 struct msr_data msr_info; 878 if (kvm_vcpu_read_guest(vcpu, 879 gpa + i * sizeof(e), 880 &e, 2 * sizeof(u32))) { 881 pr_debug_ratelimited( 882 "%s cannot read MSR entry (%u, 0x%08llx)\n", 883 __func__, i, gpa + i * sizeof(e)); 884 return -EINVAL; 885 } 886 if (nested_vmx_store_msr_check(vcpu, &e)) { 887 pr_debug_ratelimited( 888 "%s check failed (%u, 0x%x, 0x%x)\n", 889 __func__, i, e.index, e.reserved); 890 return -EINVAL; 891 } 892 msr_info.host_initiated = false; 893 msr_info.index = e.index; 894 if (kvm_get_msr(vcpu, &msr_info)) { 895 pr_debug_ratelimited( 896 "%s cannot read MSR (%u, 0x%x)\n", 897 __func__, i, e.index); 898 return -EINVAL; 899 } 900 if (kvm_vcpu_write_guest(vcpu, 901 gpa + i * sizeof(e) + 902 offsetof(struct vmx_msr_entry, value), 903 &msr_info.data, sizeof(msr_info.data))) { 904 pr_debug_ratelimited( 905 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n", 906 __func__, i, e.index, msr_info.data); 907 return -EINVAL; 908 } 909 } 910 return 0; 911 } 912 913 static bool nested_cr3_valid(struct kvm_vcpu *vcpu, unsigned long val) 914 { 915 unsigned long invalid_mask; 916 917 invalid_mask = (~0ULL) << cpuid_maxphyaddr(vcpu); 918 return (val & invalid_mask) == 0; 919 } 920 921 /* 922 * Load guest's/host's cr3 at nested entry/exit. nested_ept is true if we are 923 * emulating VM entry into a guest with EPT enabled. 924 * Returns 0 on success, 1 on failure. Invalid state exit qualification code 925 * is assigned to entry_failure_code on failure. 926 */ 927 static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept, 928 u32 *entry_failure_code) 929 { 930 if (cr3 != kvm_read_cr3(vcpu) || (!nested_ept && pdptrs_changed(vcpu))) { 931 if (!nested_cr3_valid(vcpu, cr3)) { 932 *entry_failure_code = ENTRY_FAIL_DEFAULT; 933 return 1; 934 } 935 936 /* 937 * If PAE paging and EPT are both on, CR3 is not used by the CPU and 938 * must not be dereferenced. 939 */ 940 if (!is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu) && 941 !nested_ept) { 942 if (!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) { 943 *entry_failure_code = ENTRY_FAIL_PDPTE; 944 return 1; 945 } 946 } 947 } 948 949 if (!nested_ept) 950 kvm_mmu_new_cr3(vcpu, cr3, false); 951 952 vcpu->arch.cr3 = cr3; 953 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); 954 955 kvm_init_mmu(vcpu, false); 956 957 return 0; 958 } 959 960 /* 961 * Returns if KVM is able to config CPU to tag TLB entries 962 * populated by L2 differently than TLB entries populated 963 * by L1. 964 * 965 * If L1 uses EPT, then TLB entries are tagged with different EPTP. 966 * 967 * If L1 uses VPID and we allocated a vpid02, TLB entries are tagged 968 * with different VPID (L1 entries are tagged with vmx->vpid 969 * while L2 entries are tagged with vmx->nested.vpid02). 970 */ 971 static bool nested_has_guest_tlb_tag(struct kvm_vcpu *vcpu) 972 { 973 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 974 975 return nested_cpu_has_ept(vmcs12) || 976 (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02); 977 } 978 979 static u16 nested_get_vpid02(struct kvm_vcpu *vcpu) 980 { 981 struct vcpu_vmx *vmx = to_vmx(vcpu); 982 983 return vmx->nested.vpid02 ? vmx->nested.vpid02 : vmx->vpid; 984 } 985 986 987 static inline bool vmx_control_verify(u32 control, u32 low, u32 high) 988 { 989 return fixed_bits_valid(control, low, high); 990 } 991 992 static inline u64 vmx_control_msr(u32 low, u32 high) 993 { 994 return low | ((u64)high << 32); 995 } 996 997 static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask) 998 { 999 superset &= mask; 1000 subset &= mask; 1001 1002 return (superset | subset) == superset; 1003 } 1004 1005 static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data) 1006 { 1007 const u64 feature_and_reserved = 1008 /* feature (except bit 48; see below) */ 1009 BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) | 1010 /* reserved */ 1011 BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56); 1012 u64 vmx_basic = vmx->nested.msrs.basic; 1013 1014 if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved)) 1015 return -EINVAL; 1016 1017 /* 1018 * KVM does not emulate a version of VMX that constrains physical 1019 * addresses of VMX structures (e.g. VMCS) to 32-bits. 1020 */ 1021 if (data & BIT_ULL(48)) 1022 return -EINVAL; 1023 1024 if (vmx_basic_vmcs_revision_id(vmx_basic) != 1025 vmx_basic_vmcs_revision_id(data)) 1026 return -EINVAL; 1027 1028 if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data)) 1029 return -EINVAL; 1030 1031 vmx->nested.msrs.basic = data; 1032 return 0; 1033 } 1034 1035 static int 1036 vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) 1037 { 1038 u64 supported; 1039 u32 *lowp, *highp; 1040 1041 switch (msr_index) { 1042 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: 1043 lowp = &vmx->nested.msrs.pinbased_ctls_low; 1044 highp = &vmx->nested.msrs.pinbased_ctls_high; 1045 break; 1046 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: 1047 lowp = &vmx->nested.msrs.procbased_ctls_low; 1048 highp = &vmx->nested.msrs.procbased_ctls_high; 1049 break; 1050 case MSR_IA32_VMX_TRUE_EXIT_CTLS: 1051 lowp = &vmx->nested.msrs.exit_ctls_low; 1052 highp = &vmx->nested.msrs.exit_ctls_high; 1053 break; 1054 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: 1055 lowp = &vmx->nested.msrs.entry_ctls_low; 1056 highp = &vmx->nested.msrs.entry_ctls_high; 1057 break; 1058 case MSR_IA32_VMX_PROCBASED_CTLS2: 1059 lowp = &vmx->nested.msrs.secondary_ctls_low; 1060 highp = &vmx->nested.msrs.secondary_ctls_high; 1061 break; 1062 default: 1063 BUG(); 1064 } 1065 1066 supported = vmx_control_msr(*lowp, *highp); 1067 1068 /* Check must-be-1 bits are still 1. */ 1069 if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0))) 1070 return -EINVAL; 1071 1072 /* Check must-be-0 bits are still 0. */ 1073 if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32))) 1074 return -EINVAL; 1075 1076 *lowp = data; 1077 *highp = data >> 32; 1078 return 0; 1079 } 1080 1081 static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data) 1082 { 1083 const u64 feature_and_reserved_bits = 1084 /* feature */ 1085 BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) | 1086 BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) | 1087 /* reserved */ 1088 GENMASK_ULL(13, 9) | BIT_ULL(31); 1089 u64 vmx_misc; 1090 1091 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low, 1092 vmx->nested.msrs.misc_high); 1093 1094 if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits)) 1095 return -EINVAL; 1096 1097 if ((vmx->nested.msrs.pinbased_ctls_high & 1098 PIN_BASED_VMX_PREEMPTION_TIMER) && 1099 vmx_misc_preemption_timer_rate(data) != 1100 vmx_misc_preemption_timer_rate(vmx_misc)) 1101 return -EINVAL; 1102 1103 if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc)) 1104 return -EINVAL; 1105 1106 if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc)) 1107 return -EINVAL; 1108 1109 if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc)) 1110 return -EINVAL; 1111 1112 vmx->nested.msrs.misc_low = data; 1113 vmx->nested.msrs.misc_high = data >> 32; 1114 1115 /* 1116 * If L1 has read-only VM-exit information fields, use the 1117 * less permissive vmx_vmwrite_bitmap to specify write 1118 * permissions for the shadow VMCS. 1119 */ 1120 if (enable_shadow_vmcs && !nested_cpu_has_vmwrite_any_field(&vmx->vcpu)) 1121 vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap)); 1122 1123 return 0; 1124 } 1125 1126 static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data) 1127 { 1128 u64 vmx_ept_vpid_cap; 1129 1130 vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.msrs.ept_caps, 1131 vmx->nested.msrs.vpid_caps); 1132 1133 /* Every bit is either reserved or a feature bit. */ 1134 if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL)) 1135 return -EINVAL; 1136 1137 vmx->nested.msrs.ept_caps = data; 1138 vmx->nested.msrs.vpid_caps = data >> 32; 1139 return 0; 1140 } 1141 1142 static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) 1143 { 1144 u64 *msr; 1145 1146 switch (msr_index) { 1147 case MSR_IA32_VMX_CR0_FIXED0: 1148 msr = &vmx->nested.msrs.cr0_fixed0; 1149 break; 1150 case MSR_IA32_VMX_CR4_FIXED0: 1151 msr = &vmx->nested.msrs.cr4_fixed0; 1152 break; 1153 default: 1154 BUG(); 1155 } 1156 1157 /* 1158 * 1 bits (which indicates bits which "must-be-1" during VMX operation) 1159 * must be 1 in the restored value. 1160 */ 1161 if (!is_bitwise_subset(data, *msr, -1ULL)) 1162 return -EINVAL; 1163 1164 *msr = data; 1165 return 0; 1166 } 1167 1168 /* 1169 * Called when userspace is restoring VMX MSRs. 1170 * 1171 * Returns 0 on success, non-0 otherwise. 1172 */ 1173 int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) 1174 { 1175 struct vcpu_vmx *vmx = to_vmx(vcpu); 1176 1177 /* 1178 * Don't allow changes to the VMX capability MSRs while the vCPU 1179 * is in VMX operation. 1180 */ 1181 if (vmx->nested.vmxon) 1182 return -EBUSY; 1183 1184 switch (msr_index) { 1185 case MSR_IA32_VMX_BASIC: 1186 return vmx_restore_vmx_basic(vmx, data); 1187 case MSR_IA32_VMX_PINBASED_CTLS: 1188 case MSR_IA32_VMX_PROCBASED_CTLS: 1189 case MSR_IA32_VMX_EXIT_CTLS: 1190 case MSR_IA32_VMX_ENTRY_CTLS: 1191 /* 1192 * The "non-true" VMX capability MSRs are generated from the 1193 * "true" MSRs, so we do not support restoring them directly. 1194 * 1195 * If userspace wants to emulate VMX_BASIC[55]=0, userspace 1196 * should restore the "true" MSRs with the must-be-1 bits 1197 * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND 1198 * DEFAULT SETTINGS". 1199 */ 1200 return -EINVAL; 1201 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: 1202 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: 1203 case MSR_IA32_VMX_TRUE_EXIT_CTLS: 1204 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: 1205 case MSR_IA32_VMX_PROCBASED_CTLS2: 1206 return vmx_restore_control_msr(vmx, msr_index, data); 1207 case MSR_IA32_VMX_MISC: 1208 return vmx_restore_vmx_misc(vmx, data); 1209 case MSR_IA32_VMX_CR0_FIXED0: 1210 case MSR_IA32_VMX_CR4_FIXED0: 1211 return vmx_restore_fixed0_msr(vmx, msr_index, data); 1212 case MSR_IA32_VMX_CR0_FIXED1: 1213 case MSR_IA32_VMX_CR4_FIXED1: 1214 /* 1215 * These MSRs are generated based on the vCPU's CPUID, so we 1216 * do not support restoring them directly. 1217 */ 1218 return -EINVAL; 1219 case MSR_IA32_VMX_EPT_VPID_CAP: 1220 return vmx_restore_vmx_ept_vpid_cap(vmx, data); 1221 case MSR_IA32_VMX_VMCS_ENUM: 1222 vmx->nested.msrs.vmcs_enum = data; 1223 return 0; 1224 default: 1225 /* 1226 * The rest of the VMX capability MSRs do not support restore. 1227 */ 1228 return -EINVAL; 1229 } 1230 } 1231 1232 /* Returns 0 on success, non-0 otherwise. */ 1233 int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata) 1234 { 1235 switch (msr_index) { 1236 case MSR_IA32_VMX_BASIC: 1237 *pdata = msrs->basic; 1238 break; 1239 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: 1240 case MSR_IA32_VMX_PINBASED_CTLS: 1241 *pdata = vmx_control_msr( 1242 msrs->pinbased_ctls_low, 1243 msrs->pinbased_ctls_high); 1244 if (msr_index == MSR_IA32_VMX_PINBASED_CTLS) 1245 *pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR; 1246 break; 1247 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: 1248 case MSR_IA32_VMX_PROCBASED_CTLS: 1249 *pdata = vmx_control_msr( 1250 msrs->procbased_ctls_low, 1251 msrs->procbased_ctls_high); 1252 if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS) 1253 *pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR; 1254 break; 1255 case MSR_IA32_VMX_TRUE_EXIT_CTLS: 1256 case MSR_IA32_VMX_EXIT_CTLS: 1257 *pdata = vmx_control_msr( 1258 msrs->exit_ctls_low, 1259 msrs->exit_ctls_high); 1260 if (msr_index == MSR_IA32_VMX_EXIT_CTLS) 1261 *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR; 1262 break; 1263 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: 1264 case MSR_IA32_VMX_ENTRY_CTLS: 1265 *pdata = vmx_control_msr( 1266 msrs->entry_ctls_low, 1267 msrs->entry_ctls_high); 1268 if (msr_index == MSR_IA32_VMX_ENTRY_CTLS) 1269 *pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR; 1270 break; 1271 case MSR_IA32_VMX_MISC: 1272 *pdata = vmx_control_msr( 1273 msrs->misc_low, 1274 msrs->misc_high); 1275 break; 1276 case MSR_IA32_VMX_CR0_FIXED0: 1277 *pdata = msrs->cr0_fixed0; 1278 break; 1279 case MSR_IA32_VMX_CR0_FIXED1: 1280 *pdata = msrs->cr0_fixed1; 1281 break; 1282 case MSR_IA32_VMX_CR4_FIXED0: 1283 *pdata = msrs->cr4_fixed0; 1284 break; 1285 case MSR_IA32_VMX_CR4_FIXED1: 1286 *pdata = msrs->cr4_fixed1; 1287 break; 1288 case MSR_IA32_VMX_VMCS_ENUM: 1289 *pdata = msrs->vmcs_enum; 1290 break; 1291 case MSR_IA32_VMX_PROCBASED_CTLS2: 1292 *pdata = vmx_control_msr( 1293 msrs->secondary_ctls_low, 1294 msrs->secondary_ctls_high); 1295 break; 1296 case MSR_IA32_VMX_EPT_VPID_CAP: 1297 *pdata = msrs->ept_caps | 1298 ((u64)msrs->vpid_caps << 32); 1299 break; 1300 case MSR_IA32_VMX_VMFUNC: 1301 *pdata = msrs->vmfunc_controls; 1302 break; 1303 default: 1304 return 1; 1305 } 1306 1307 return 0; 1308 } 1309 1310 /* 1311 * Copy the writable VMCS shadow fields back to the VMCS12, in case 1312 * they have been modified by the L1 guest. Note that the "read-only" 1313 * VM-exit information fields are actually writable if the vCPU is 1314 * configured to support "VMWRITE to any supported field in the VMCS." 1315 */ 1316 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx) 1317 { 1318 const u16 *fields[] = { 1319 shadow_read_write_fields, 1320 shadow_read_only_fields 1321 }; 1322 const int max_fields[] = { 1323 max_shadow_read_write_fields, 1324 max_shadow_read_only_fields 1325 }; 1326 int i, q; 1327 unsigned long field; 1328 u64 field_value; 1329 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; 1330 1331 preempt_disable(); 1332 1333 vmcs_load(shadow_vmcs); 1334 1335 for (q = 0; q < ARRAY_SIZE(fields); q++) { 1336 for (i = 0; i < max_fields[q]; i++) { 1337 field = fields[q][i]; 1338 field_value = __vmcs_readl(field); 1339 vmcs12_write_any(get_vmcs12(&vmx->vcpu), field, field_value); 1340 } 1341 /* 1342 * Skip the VM-exit information fields if they are read-only. 1343 */ 1344 if (!nested_cpu_has_vmwrite_any_field(&vmx->vcpu)) 1345 break; 1346 } 1347 1348 vmcs_clear(shadow_vmcs); 1349 vmcs_load(vmx->loaded_vmcs->vmcs); 1350 1351 preempt_enable(); 1352 } 1353 1354 static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx) 1355 { 1356 const u16 *fields[] = { 1357 shadow_read_write_fields, 1358 shadow_read_only_fields 1359 }; 1360 const int max_fields[] = { 1361 max_shadow_read_write_fields, 1362 max_shadow_read_only_fields 1363 }; 1364 int i, q; 1365 unsigned long field; 1366 u64 field_value = 0; 1367 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; 1368 1369 vmcs_load(shadow_vmcs); 1370 1371 for (q = 0; q < ARRAY_SIZE(fields); q++) { 1372 for (i = 0; i < max_fields[q]; i++) { 1373 field = fields[q][i]; 1374 vmcs12_read_any(get_vmcs12(&vmx->vcpu), field, &field_value); 1375 __vmcs_writel(field, field_value); 1376 } 1377 } 1378 1379 vmcs_clear(shadow_vmcs); 1380 vmcs_load(vmx->loaded_vmcs->vmcs); 1381 } 1382 1383 static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx) 1384 { 1385 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; 1386 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; 1387 1388 /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */ 1389 vmcs12->tpr_threshold = evmcs->tpr_threshold; 1390 vmcs12->guest_rip = evmcs->guest_rip; 1391 1392 if (unlikely(!(evmcs->hv_clean_fields & 1393 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC))) { 1394 vmcs12->guest_rsp = evmcs->guest_rsp; 1395 vmcs12->guest_rflags = evmcs->guest_rflags; 1396 vmcs12->guest_interruptibility_info = 1397 evmcs->guest_interruptibility_info; 1398 } 1399 1400 if (unlikely(!(evmcs->hv_clean_fields & 1401 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) { 1402 vmcs12->cpu_based_vm_exec_control = 1403 evmcs->cpu_based_vm_exec_control; 1404 } 1405 1406 if (unlikely(!(evmcs->hv_clean_fields & 1407 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) { 1408 vmcs12->exception_bitmap = evmcs->exception_bitmap; 1409 } 1410 1411 if (unlikely(!(evmcs->hv_clean_fields & 1412 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY))) { 1413 vmcs12->vm_entry_controls = evmcs->vm_entry_controls; 1414 } 1415 1416 if (unlikely(!(evmcs->hv_clean_fields & 1417 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT))) { 1418 vmcs12->vm_entry_intr_info_field = 1419 evmcs->vm_entry_intr_info_field; 1420 vmcs12->vm_entry_exception_error_code = 1421 evmcs->vm_entry_exception_error_code; 1422 vmcs12->vm_entry_instruction_len = 1423 evmcs->vm_entry_instruction_len; 1424 } 1425 1426 if (unlikely(!(evmcs->hv_clean_fields & 1427 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) { 1428 vmcs12->host_ia32_pat = evmcs->host_ia32_pat; 1429 vmcs12->host_ia32_efer = evmcs->host_ia32_efer; 1430 vmcs12->host_cr0 = evmcs->host_cr0; 1431 vmcs12->host_cr3 = evmcs->host_cr3; 1432 vmcs12->host_cr4 = evmcs->host_cr4; 1433 vmcs12->host_ia32_sysenter_esp = evmcs->host_ia32_sysenter_esp; 1434 vmcs12->host_ia32_sysenter_eip = evmcs->host_ia32_sysenter_eip; 1435 vmcs12->host_rip = evmcs->host_rip; 1436 vmcs12->host_ia32_sysenter_cs = evmcs->host_ia32_sysenter_cs; 1437 vmcs12->host_es_selector = evmcs->host_es_selector; 1438 vmcs12->host_cs_selector = evmcs->host_cs_selector; 1439 vmcs12->host_ss_selector = evmcs->host_ss_selector; 1440 vmcs12->host_ds_selector = evmcs->host_ds_selector; 1441 vmcs12->host_fs_selector = evmcs->host_fs_selector; 1442 vmcs12->host_gs_selector = evmcs->host_gs_selector; 1443 vmcs12->host_tr_selector = evmcs->host_tr_selector; 1444 } 1445 1446 if (unlikely(!(evmcs->hv_clean_fields & 1447 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) { 1448 vmcs12->pin_based_vm_exec_control = 1449 evmcs->pin_based_vm_exec_control; 1450 vmcs12->vm_exit_controls = evmcs->vm_exit_controls; 1451 vmcs12->secondary_vm_exec_control = 1452 evmcs->secondary_vm_exec_control; 1453 } 1454 1455 if (unlikely(!(evmcs->hv_clean_fields & 1456 HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP))) { 1457 vmcs12->io_bitmap_a = evmcs->io_bitmap_a; 1458 vmcs12->io_bitmap_b = evmcs->io_bitmap_b; 1459 } 1460 1461 if (unlikely(!(evmcs->hv_clean_fields & 1462 HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP))) { 1463 vmcs12->msr_bitmap = evmcs->msr_bitmap; 1464 } 1465 1466 if (unlikely(!(evmcs->hv_clean_fields & 1467 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2))) { 1468 vmcs12->guest_es_base = evmcs->guest_es_base; 1469 vmcs12->guest_cs_base = evmcs->guest_cs_base; 1470 vmcs12->guest_ss_base = evmcs->guest_ss_base; 1471 vmcs12->guest_ds_base = evmcs->guest_ds_base; 1472 vmcs12->guest_fs_base = evmcs->guest_fs_base; 1473 vmcs12->guest_gs_base = evmcs->guest_gs_base; 1474 vmcs12->guest_ldtr_base = evmcs->guest_ldtr_base; 1475 vmcs12->guest_tr_base = evmcs->guest_tr_base; 1476 vmcs12->guest_gdtr_base = evmcs->guest_gdtr_base; 1477 vmcs12->guest_idtr_base = evmcs->guest_idtr_base; 1478 vmcs12->guest_es_limit = evmcs->guest_es_limit; 1479 vmcs12->guest_cs_limit = evmcs->guest_cs_limit; 1480 vmcs12->guest_ss_limit = evmcs->guest_ss_limit; 1481 vmcs12->guest_ds_limit = evmcs->guest_ds_limit; 1482 vmcs12->guest_fs_limit = evmcs->guest_fs_limit; 1483 vmcs12->guest_gs_limit = evmcs->guest_gs_limit; 1484 vmcs12->guest_ldtr_limit = evmcs->guest_ldtr_limit; 1485 vmcs12->guest_tr_limit = evmcs->guest_tr_limit; 1486 vmcs12->guest_gdtr_limit = evmcs->guest_gdtr_limit; 1487 vmcs12->guest_idtr_limit = evmcs->guest_idtr_limit; 1488 vmcs12->guest_es_ar_bytes = evmcs->guest_es_ar_bytes; 1489 vmcs12->guest_cs_ar_bytes = evmcs->guest_cs_ar_bytes; 1490 vmcs12->guest_ss_ar_bytes = evmcs->guest_ss_ar_bytes; 1491 vmcs12->guest_ds_ar_bytes = evmcs->guest_ds_ar_bytes; 1492 vmcs12->guest_fs_ar_bytes = evmcs->guest_fs_ar_bytes; 1493 vmcs12->guest_gs_ar_bytes = evmcs->guest_gs_ar_bytes; 1494 vmcs12->guest_ldtr_ar_bytes = evmcs->guest_ldtr_ar_bytes; 1495 vmcs12->guest_tr_ar_bytes = evmcs->guest_tr_ar_bytes; 1496 vmcs12->guest_es_selector = evmcs->guest_es_selector; 1497 vmcs12->guest_cs_selector = evmcs->guest_cs_selector; 1498 vmcs12->guest_ss_selector = evmcs->guest_ss_selector; 1499 vmcs12->guest_ds_selector = evmcs->guest_ds_selector; 1500 vmcs12->guest_fs_selector = evmcs->guest_fs_selector; 1501 vmcs12->guest_gs_selector = evmcs->guest_gs_selector; 1502 vmcs12->guest_ldtr_selector = evmcs->guest_ldtr_selector; 1503 vmcs12->guest_tr_selector = evmcs->guest_tr_selector; 1504 } 1505 1506 if (unlikely(!(evmcs->hv_clean_fields & 1507 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2))) { 1508 vmcs12->tsc_offset = evmcs->tsc_offset; 1509 vmcs12->virtual_apic_page_addr = evmcs->virtual_apic_page_addr; 1510 vmcs12->xss_exit_bitmap = evmcs->xss_exit_bitmap; 1511 } 1512 1513 if (unlikely(!(evmcs->hv_clean_fields & 1514 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR))) { 1515 vmcs12->cr0_guest_host_mask = evmcs->cr0_guest_host_mask; 1516 vmcs12->cr4_guest_host_mask = evmcs->cr4_guest_host_mask; 1517 vmcs12->cr0_read_shadow = evmcs->cr0_read_shadow; 1518 vmcs12->cr4_read_shadow = evmcs->cr4_read_shadow; 1519 vmcs12->guest_cr0 = evmcs->guest_cr0; 1520 vmcs12->guest_cr3 = evmcs->guest_cr3; 1521 vmcs12->guest_cr4 = evmcs->guest_cr4; 1522 vmcs12->guest_dr7 = evmcs->guest_dr7; 1523 } 1524 1525 if (unlikely(!(evmcs->hv_clean_fields & 1526 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER))) { 1527 vmcs12->host_fs_base = evmcs->host_fs_base; 1528 vmcs12->host_gs_base = evmcs->host_gs_base; 1529 vmcs12->host_tr_base = evmcs->host_tr_base; 1530 vmcs12->host_gdtr_base = evmcs->host_gdtr_base; 1531 vmcs12->host_idtr_base = evmcs->host_idtr_base; 1532 vmcs12->host_rsp = evmcs->host_rsp; 1533 } 1534 1535 if (unlikely(!(evmcs->hv_clean_fields & 1536 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT))) { 1537 vmcs12->ept_pointer = evmcs->ept_pointer; 1538 vmcs12->virtual_processor_id = evmcs->virtual_processor_id; 1539 } 1540 1541 if (unlikely(!(evmcs->hv_clean_fields & 1542 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1))) { 1543 vmcs12->vmcs_link_pointer = evmcs->vmcs_link_pointer; 1544 vmcs12->guest_ia32_debugctl = evmcs->guest_ia32_debugctl; 1545 vmcs12->guest_ia32_pat = evmcs->guest_ia32_pat; 1546 vmcs12->guest_ia32_efer = evmcs->guest_ia32_efer; 1547 vmcs12->guest_pdptr0 = evmcs->guest_pdptr0; 1548 vmcs12->guest_pdptr1 = evmcs->guest_pdptr1; 1549 vmcs12->guest_pdptr2 = evmcs->guest_pdptr2; 1550 vmcs12->guest_pdptr3 = evmcs->guest_pdptr3; 1551 vmcs12->guest_pending_dbg_exceptions = 1552 evmcs->guest_pending_dbg_exceptions; 1553 vmcs12->guest_sysenter_esp = evmcs->guest_sysenter_esp; 1554 vmcs12->guest_sysenter_eip = evmcs->guest_sysenter_eip; 1555 vmcs12->guest_bndcfgs = evmcs->guest_bndcfgs; 1556 vmcs12->guest_activity_state = evmcs->guest_activity_state; 1557 vmcs12->guest_sysenter_cs = evmcs->guest_sysenter_cs; 1558 } 1559 1560 /* 1561 * Not used? 1562 * vmcs12->vm_exit_msr_store_addr = evmcs->vm_exit_msr_store_addr; 1563 * vmcs12->vm_exit_msr_load_addr = evmcs->vm_exit_msr_load_addr; 1564 * vmcs12->vm_entry_msr_load_addr = evmcs->vm_entry_msr_load_addr; 1565 * vmcs12->cr3_target_value0 = evmcs->cr3_target_value0; 1566 * vmcs12->cr3_target_value1 = evmcs->cr3_target_value1; 1567 * vmcs12->cr3_target_value2 = evmcs->cr3_target_value2; 1568 * vmcs12->cr3_target_value3 = evmcs->cr3_target_value3; 1569 * vmcs12->page_fault_error_code_mask = 1570 * evmcs->page_fault_error_code_mask; 1571 * vmcs12->page_fault_error_code_match = 1572 * evmcs->page_fault_error_code_match; 1573 * vmcs12->cr3_target_count = evmcs->cr3_target_count; 1574 * vmcs12->vm_exit_msr_store_count = evmcs->vm_exit_msr_store_count; 1575 * vmcs12->vm_exit_msr_load_count = evmcs->vm_exit_msr_load_count; 1576 * vmcs12->vm_entry_msr_load_count = evmcs->vm_entry_msr_load_count; 1577 */ 1578 1579 /* 1580 * Read only fields: 1581 * vmcs12->guest_physical_address = evmcs->guest_physical_address; 1582 * vmcs12->vm_instruction_error = evmcs->vm_instruction_error; 1583 * vmcs12->vm_exit_reason = evmcs->vm_exit_reason; 1584 * vmcs12->vm_exit_intr_info = evmcs->vm_exit_intr_info; 1585 * vmcs12->vm_exit_intr_error_code = evmcs->vm_exit_intr_error_code; 1586 * vmcs12->idt_vectoring_info_field = evmcs->idt_vectoring_info_field; 1587 * vmcs12->idt_vectoring_error_code = evmcs->idt_vectoring_error_code; 1588 * vmcs12->vm_exit_instruction_len = evmcs->vm_exit_instruction_len; 1589 * vmcs12->vmx_instruction_info = evmcs->vmx_instruction_info; 1590 * vmcs12->exit_qualification = evmcs->exit_qualification; 1591 * vmcs12->guest_linear_address = evmcs->guest_linear_address; 1592 * 1593 * Not present in struct vmcs12: 1594 * vmcs12->exit_io_instruction_ecx = evmcs->exit_io_instruction_ecx; 1595 * vmcs12->exit_io_instruction_esi = evmcs->exit_io_instruction_esi; 1596 * vmcs12->exit_io_instruction_edi = evmcs->exit_io_instruction_edi; 1597 * vmcs12->exit_io_instruction_eip = evmcs->exit_io_instruction_eip; 1598 */ 1599 1600 return 0; 1601 } 1602 1603 static int copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx) 1604 { 1605 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; 1606 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; 1607 1608 /* 1609 * Should not be changed by KVM: 1610 * 1611 * evmcs->host_es_selector = vmcs12->host_es_selector; 1612 * evmcs->host_cs_selector = vmcs12->host_cs_selector; 1613 * evmcs->host_ss_selector = vmcs12->host_ss_selector; 1614 * evmcs->host_ds_selector = vmcs12->host_ds_selector; 1615 * evmcs->host_fs_selector = vmcs12->host_fs_selector; 1616 * evmcs->host_gs_selector = vmcs12->host_gs_selector; 1617 * evmcs->host_tr_selector = vmcs12->host_tr_selector; 1618 * evmcs->host_ia32_pat = vmcs12->host_ia32_pat; 1619 * evmcs->host_ia32_efer = vmcs12->host_ia32_efer; 1620 * evmcs->host_cr0 = vmcs12->host_cr0; 1621 * evmcs->host_cr3 = vmcs12->host_cr3; 1622 * evmcs->host_cr4 = vmcs12->host_cr4; 1623 * evmcs->host_ia32_sysenter_esp = vmcs12->host_ia32_sysenter_esp; 1624 * evmcs->host_ia32_sysenter_eip = vmcs12->host_ia32_sysenter_eip; 1625 * evmcs->host_rip = vmcs12->host_rip; 1626 * evmcs->host_ia32_sysenter_cs = vmcs12->host_ia32_sysenter_cs; 1627 * evmcs->host_fs_base = vmcs12->host_fs_base; 1628 * evmcs->host_gs_base = vmcs12->host_gs_base; 1629 * evmcs->host_tr_base = vmcs12->host_tr_base; 1630 * evmcs->host_gdtr_base = vmcs12->host_gdtr_base; 1631 * evmcs->host_idtr_base = vmcs12->host_idtr_base; 1632 * evmcs->host_rsp = vmcs12->host_rsp; 1633 * sync_vmcs12() doesn't read these: 1634 * evmcs->io_bitmap_a = vmcs12->io_bitmap_a; 1635 * evmcs->io_bitmap_b = vmcs12->io_bitmap_b; 1636 * evmcs->msr_bitmap = vmcs12->msr_bitmap; 1637 * evmcs->ept_pointer = vmcs12->ept_pointer; 1638 * evmcs->xss_exit_bitmap = vmcs12->xss_exit_bitmap; 1639 * evmcs->vm_exit_msr_store_addr = vmcs12->vm_exit_msr_store_addr; 1640 * evmcs->vm_exit_msr_load_addr = vmcs12->vm_exit_msr_load_addr; 1641 * evmcs->vm_entry_msr_load_addr = vmcs12->vm_entry_msr_load_addr; 1642 * evmcs->cr3_target_value0 = vmcs12->cr3_target_value0; 1643 * evmcs->cr3_target_value1 = vmcs12->cr3_target_value1; 1644 * evmcs->cr3_target_value2 = vmcs12->cr3_target_value2; 1645 * evmcs->cr3_target_value3 = vmcs12->cr3_target_value3; 1646 * evmcs->tpr_threshold = vmcs12->tpr_threshold; 1647 * evmcs->virtual_processor_id = vmcs12->virtual_processor_id; 1648 * evmcs->exception_bitmap = vmcs12->exception_bitmap; 1649 * evmcs->vmcs_link_pointer = vmcs12->vmcs_link_pointer; 1650 * evmcs->pin_based_vm_exec_control = vmcs12->pin_based_vm_exec_control; 1651 * evmcs->vm_exit_controls = vmcs12->vm_exit_controls; 1652 * evmcs->secondary_vm_exec_control = vmcs12->secondary_vm_exec_control; 1653 * evmcs->page_fault_error_code_mask = 1654 * vmcs12->page_fault_error_code_mask; 1655 * evmcs->page_fault_error_code_match = 1656 * vmcs12->page_fault_error_code_match; 1657 * evmcs->cr3_target_count = vmcs12->cr3_target_count; 1658 * evmcs->virtual_apic_page_addr = vmcs12->virtual_apic_page_addr; 1659 * evmcs->tsc_offset = vmcs12->tsc_offset; 1660 * evmcs->guest_ia32_debugctl = vmcs12->guest_ia32_debugctl; 1661 * evmcs->cr0_guest_host_mask = vmcs12->cr0_guest_host_mask; 1662 * evmcs->cr4_guest_host_mask = vmcs12->cr4_guest_host_mask; 1663 * evmcs->cr0_read_shadow = vmcs12->cr0_read_shadow; 1664 * evmcs->cr4_read_shadow = vmcs12->cr4_read_shadow; 1665 * evmcs->vm_exit_msr_store_count = vmcs12->vm_exit_msr_store_count; 1666 * evmcs->vm_exit_msr_load_count = vmcs12->vm_exit_msr_load_count; 1667 * evmcs->vm_entry_msr_load_count = vmcs12->vm_entry_msr_load_count; 1668 * 1669 * Not present in struct vmcs12: 1670 * evmcs->exit_io_instruction_ecx = vmcs12->exit_io_instruction_ecx; 1671 * evmcs->exit_io_instruction_esi = vmcs12->exit_io_instruction_esi; 1672 * evmcs->exit_io_instruction_edi = vmcs12->exit_io_instruction_edi; 1673 * evmcs->exit_io_instruction_eip = vmcs12->exit_io_instruction_eip; 1674 */ 1675 1676 evmcs->guest_es_selector = vmcs12->guest_es_selector; 1677 evmcs->guest_cs_selector = vmcs12->guest_cs_selector; 1678 evmcs->guest_ss_selector = vmcs12->guest_ss_selector; 1679 evmcs->guest_ds_selector = vmcs12->guest_ds_selector; 1680 evmcs->guest_fs_selector = vmcs12->guest_fs_selector; 1681 evmcs->guest_gs_selector = vmcs12->guest_gs_selector; 1682 evmcs->guest_ldtr_selector = vmcs12->guest_ldtr_selector; 1683 evmcs->guest_tr_selector = vmcs12->guest_tr_selector; 1684 1685 evmcs->guest_es_limit = vmcs12->guest_es_limit; 1686 evmcs->guest_cs_limit = vmcs12->guest_cs_limit; 1687 evmcs->guest_ss_limit = vmcs12->guest_ss_limit; 1688 evmcs->guest_ds_limit = vmcs12->guest_ds_limit; 1689 evmcs->guest_fs_limit = vmcs12->guest_fs_limit; 1690 evmcs->guest_gs_limit = vmcs12->guest_gs_limit; 1691 evmcs->guest_ldtr_limit = vmcs12->guest_ldtr_limit; 1692 evmcs->guest_tr_limit = vmcs12->guest_tr_limit; 1693 evmcs->guest_gdtr_limit = vmcs12->guest_gdtr_limit; 1694 evmcs->guest_idtr_limit = vmcs12->guest_idtr_limit; 1695 1696 evmcs->guest_es_ar_bytes = vmcs12->guest_es_ar_bytes; 1697 evmcs->guest_cs_ar_bytes = vmcs12->guest_cs_ar_bytes; 1698 evmcs->guest_ss_ar_bytes = vmcs12->guest_ss_ar_bytes; 1699 evmcs->guest_ds_ar_bytes = vmcs12->guest_ds_ar_bytes; 1700 evmcs->guest_fs_ar_bytes = vmcs12->guest_fs_ar_bytes; 1701 evmcs->guest_gs_ar_bytes = vmcs12->guest_gs_ar_bytes; 1702 evmcs->guest_ldtr_ar_bytes = vmcs12->guest_ldtr_ar_bytes; 1703 evmcs->guest_tr_ar_bytes = vmcs12->guest_tr_ar_bytes; 1704 1705 evmcs->guest_es_base = vmcs12->guest_es_base; 1706 evmcs->guest_cs_base = vmcs12->guest_cs_base; 1707 evmcs->guest_ss_base = vmcs12->guest_ss_base; 1708 evmcs->guest_ds_base = vmcs12->guest_ds_base; 1709 evmcs->guest_fs_base = vmcs12->guest_fs_base; 1710 evmcs->guest_gs_base = vmcs12->guest_gs_base; 1711 evmcs->guest_ldtr_base = vmcs12->guest_ldtr_base; 1712 evmcs->guest_tr_base = vmcs12->guest_tr_base; 1713 evmcs->guest_gdtr_base = vmcs12->guest_gdtr_base; 1714 evmcs->guest_idtr_base = vmcs12->guest_idtr_base; 1715 1716 evmcs->guest_ia32_pat = vmcs12->guest_ia32_pat; 1717 evmcs->guest_ia32_efer = vmcs12->guest_ia32_efer; 1718 1719 evmcs->guest_pdptr0 = vmcs12->guest_pdptr0; 1720 evmcs->guest_pdptr1 = vmcs12->guest_pdptr1; 1721 evmcs->guest_pdptr2 = vmcs12->guest_pdptr2; 1722 evmcs->guest_pdptr3 = vmcs12->guest_pdptr3; 1723 1724 evmcs->guest_pending_dbg_exceptions = 1725 vmcs12->guest_pending_dbg_exceptions; 1726 evmcs->guest_sysenter_esp = vmcs12->guest_sysenter_esp; 1727 evmcs->guest_sysenter_eip = vmcs12->guest_sysenter_eip; 1728 1729 evmcs->guest_activity_state = vmcs12->guest_activity_state; 1730 evmcs->guest_sysenter_cs = vmcs12->guest_sysenter_cs; 1731 1732 evmcs->guest_cr0 = vmcs12->guest_cr0; 1733 evmcs->guest_cr3 = vmcs12->guest_cr3; 1734 evmcs->guest_cr4 = vmcs12->guest_cr4; 1735 evmcs->guest_dr7 = vmcs12->guest_dr7; 1736 1737 evmcs->guest_physical_address = vmcs12->guest_physical_address; 1738 1739 evmcs->vm_instruction_error = vmcs12->vm_instruction_error; 1740 evmcs->vm_exit_reason = vmcs12->vm_exit_reason; 1741 evmcs->vm_exit_intr_info = vmcs12->vm_exit_intr_info; 1742 evmcs->vm_exit_intr_error_code = vmcs12->vm_exit_intr_error_code; 1743 evmcs->idt_vectoring_info_field = vmcs12->idt_vectoring_info_field; 1744 evmcs->idt_vectoring_error_code = vmcs12->idt_vectoring_error_code; 1745 evmcs->vm_exit_instruction_len = vmcs12->vm_exit_instruction_len; 1746 evmcs->vmx_instruction_info = vmcs12->vmx_instruction_info; 1747 1748 evmcs->exit_qualification = vmcs12->exit_qualification; 1749 1750 evmcs->guest_linear_address = vmcs12->guest_linear_address; 1751 evmcs->guest_rsp = vmcs12->guest_rsp; 1752 evmcs->guest_rflags = vmcs12->guest_rflags; 1753 1754 evmcs->guest_interruptibility_info = 1755 vmcs12->guest_interruptibility_info; 1756 evmcs->cpu_based_vm_exec_control = vmcs12->cpu_based_vm_exec_control; 1757 evmcs->vm_entry_controls = vmcs12->vm_entry_controls; 1758 evmcs->vm_entry_intr_info_field = vmcs12->vm_entry_intr_info_field; 1759 evmcs->vm_entry_exception_error_code = 1760 vmcs12->vm_entry_exception_error_code; 1761 evmcs->vm_entry_instruction_len = vmcs12->vm_entry_instruction_len; 1762 1763 evmcs->guest_rip = vmcs12->guest_rip; 1764 1765 evmcs->guest_bndcfgs = vmcs12->guest_bndcfgs; 1766 1767 return 0; 1768 } 1769 1770 /* 1771 * This is an equivalent of the nested hypervisor executing the vmptrld 1772 * instruction. 1773 */ 1774 static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu, 1775 bool from_launch) 1776 { 1777 struct vcpu_vmx *vmx = to_vmx(vcpu); 1778 struct hv_vp_assist_page assist_page; 1779 1780 if (likely(!vmx->nested.enlightened_vmcs_enabled)) 1781 return 1; 1782 1783 if (unlikely(!kvm_hv_get_assist_page(vcpu, &assist_page))) 1784 return 1; 1785 1786 if (unlikely(!assist_page.enlighten_vmentry)) 1787 return 1; 1788 1789 if (unlikely(assist_page.current_nested_vmcs != 1790 vmx->nested.hv_evmcs_vmptr)) { 1791 1792 if (!vmx->nested.hv_evmcs) 1793 vmx->nested.current_vmptr = -1ull; 1794 1795 nested_release_evmcs(vcpu); 1796 1797 vmx->nested.hv_evmcs_page = kvm_vcpu_gpa_to_page( 1798 vcpu, assist_page.current_nested_vmcs); 1799 1800 if (unlikely(is_error_page(vmx->nested.hv_evmcs_page))) 1801 return 0; 1802 1803 vmx->nested.hv_evmcs = kmap(vmx->nested.hv_evmcs_page); 1804 1805 /* 1806 * Currently, KVM only supports eVMCS version 1 1807 * (== KVM_EVMCS_VERSION) and thus we expect guest to set this 1808 * value to first u32 field of eVMCS which should specify eVMCS 1809 * VersionNumber. 1810 * 1811 * Guest should be aware of supported eVMCS versions by host by 1812 * examining CPUID.0x4000000A.EAX[0:15]. Host userspace VMM is 1813 * expected to set this CPUID leaf according to the value 1814 * returned in vmcs_version from nested_enable_evmcs(). 1815 * 1816 * However, it turns out that Microsoft Hyper-V fails to comply 1817 * to their own invented interface: When Hyper-V use eVMCS, it 1818 * just sets first u32 field of eVMCS to revision_id specified 1819 * in MSR_IA32_VMX_BASIC. Instead of used eVMCS version number 1820 * which is one of the supported versions specified in 1821 * CPUID.0x4000000A.EAX[0:15]. 1822 * 1823 * To overcome Hyper-V bug, we accept here either a supported 1824 * eVMCS version or VMCS12 revision_id as valid values for first 1825 * u32 field of eVMCS. 1826 */ 1827 if ((vmx->nested.hv_evmcs->revision_id != KVM_EVMCS_VERSION) && 1828 (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION)) { 1829 nested_release_evmcs(vcpu); 1830 return 0; 1831 } 1832 1833 vmx->nested.dirty_vmcs12 = true; 1834 /* 1835 * As we keep L2 state for one guest only 'hv_clean_fields' mask 1836 * can't be used when we switch between them. Reset it here for 1837 * simplicity. 1838 */ 1839 vmx->nested.hv_evmcs->hv_clean_fields &= 1840 ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; 1841 vmx->nested.hv_evmcs_vmptr = assist_page.current_nested_vmcs; 1842 1843 /* 1844 * Unlike normal vmcs12, enlightened vmcs12 is not fully 1845 * reloaded from guest's memory (read only fields, fields not 1846 * present in struct hv_enlightened_vmcs, ...). Make sure there 1847 * are no leftovers. 1848 */ 1849 if (from_launch) { 1850 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 1851 memset(vmcs12, 0, sizeof(*vmcs12)); 1852 vmcs12->hdr.revision_id = VMCS12_REVISION; 1853 } 1854 1855 } 1856 return 1; 1857 } 1858 1859 void nested_sync_from_vmcs12(struct kvm_vcpu *vcpu) 1860 { 1861 struct vcpu_vmx *vmx = to_vmx(vcpu); 1862 1863 /* 1864 * hv_evmcs may end up being not mapped after migration (when 1865 * L2 was running), map it here to make sure vmcs12 changes are 1866 * properly reflected. 1867 */ 1868 if (vmx->nested.enlightened_vmcs_enabled && !vmx->nested.hv_evmcs) 1869 nested_vmx_handle_enlightened_vmptrld(vcpu, false); 1870 1871 if (vmx->nested.hv_evmcs) { 1872 copy_vmcs12_to_enlightened(vmx); 1873 /* All fields are clean */ 1874 vmx->nested.hv_evmcs->hv_clean_fields |= 1875 HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; 1876 } else { 1877 copy_vmcs12_to_shadow(vmx); 1878 } 1879 1880 vmx->nested.need_vmcs12_sync = false; 1881 } 1882 1883 static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer) 1884 { 1885 struct vcpu_vmx *vmx = 1886 container_of(timer, struct vcpu_vmx, nested.preemption_timer); 1887 1888 vmx->nested.preemption_timer_expired = true; 1889 kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu); 1890 kvm_vcpu_kick(&vmx->vcpu); 1891 1892 return HRTIMER_NORESTART; 1893 } 1894 1895 static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu) 1896 { 1897 u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value; 1898 struct vcpu_vmx *vmx = to_vmx(vcpu); 1899 1900 /* 1901 * A timer value of zero is architecturally guaranteed to cause 1902 * a VMExit prior to executing any instructions in the guest. 1903 */ 1904 if (preemption_timeout == 0) { 1905 vmx_preemption_timer_fn(&vmx->nested.preemption_timer); 1906 return; 1907 } 1908 1909 if (vcpu->arch.virtual_tsc_khz == 0) 1910 return; 1911 1912 preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; 1913 preemption_timeout *= 1000000; 1914 do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz); 1915 hrtimer_start(&vmx->nested.preemption_timer, 1916 ns_to_ktime(preemption_timeout), HRTIMER_MODE_REL); 1917 } 1918 1919 static u64 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) 1920 { 1921 if (vmx->nested.nested_run_pending && 1922 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) 1923 return vmcs12->guest_ia32_efer; 1924 else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) 1925 return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME); 1926 else 1927 return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME); 1928 } 1929 1930 static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx) 1931 { 1932 /* 1933 * If vmcs02 hasn't been initialized, set the constant vmcs02 state 1934 * according to L0's settings (vmcs12 is irrelevant here). Host 1935 * fields that come from L0 and are not constant, e.g. HOST_CR3, 1936 * will be set as needed prior to VMLAUNCH/VMRESUME. 1937 */ 1938 if (vmx->nested.vmcs02_initialized) 1939 return; 1940 vmx->nested.vmcs02_initialized = true; 1941 1942 /* 1943 * We don't care what the EPTP value is we just need to guarantee 1944 * it's valid so we don't get a false positive when doing early 1945 * consistency checks. 1946 */ 1947 if (enable_ept && nested_early_check) 1948 vmcs_write64(EPT_POINTER, construct_eptp(&vmx->vcpu, 0)); 1949 1950 /* All VMFUNCs are currently emulated through L0 vmexits. */ 1951 if (cpu_has_vmx_vmfunc()) 1952 vmcs_write64(VM_FUNCTION_CONTROL, 0); 1953 1954 if (cpu_has_vmx_posted_intr()) 1955 vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR); 1956 1957 if (cpu_has_vmx_msr_bitmap()) 1958 vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap)); 1959 1960 if (enable_pml) 1961 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); 1962 1963 /* 1964 * Set the MSR load/store lists to match L0's settings. Only the 1965 * addresses are constant (for vmcs02), the counts can change based 1966 * on L2's behavior, e.g. switching to/from long mode. 1967 */ 1968 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); 1969 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val)); 1970 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val)); 1971 1972 vmx_set_constant_host_state(vmx); 1973 } 1974 1975 static void prepare_vmcs02_early_full(struct vcpu_vmx *vmx, 1976 struct vmcs12 *vmcs12) 1977 { 1978 prepare_vmcs02_constant_state(vmx); 1979 1980 vmcs_write64(VMCS_LINK_POINTER, -1ull); 1981 1982 if (enable_vpid) { 1983 if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) 1984 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02); 1985 else 1986 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); 1987 } 1988 } 1989 1990 static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) 1991 { 1992 u32 exec_control, vmcs12_exec_ctrl; 1993 u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12); 1994 1995 if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs) 1996 prepare_vmcs02_early_full(vmx, vmcs12); 1997 1998 /* 1999 * PIN CONTROLS 2000 */ 2001 exec_control = vmcs12->pin_based_vm_exec_control; 2002 2003 /* Preemption timer setting is computed directly in vmx_vcpu_run. */ 2004 exec_control |= vmcs_config.pin_based_exec_ctrl; 2005 exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER; 2006 vmx->loaded_vmcs->hv_timer_armed = false; 2007 2008 /* Posted interrupts setting is only taken from vmcs12. */ 2009 if (nested_cpu_has_posted_intr(vmcs12)) { 2010 vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv; 2011 vmx->nested.pi_pending = false; 2012 } else { 2013 exec_control &= ~PIN_BASED_POSTED_INTR; 2014 } 2015 vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, exec_control); 2016 2017 /* 2018 * EXEC CONTROLS 2019 */ 2020 exec_control = vmx_exec_control(vmx); /* L0's desires */ 2021 exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING; 2022 exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING; 2023 exec_control &= ~CPU_BASED_TPR_SHADOW; 2024 exec_control |= vmcs12->cpu_based_vm_exec_control; 2025 2026 /* 2027 * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR. Later, if 2028 * nested_get_vmcs12_pages can't fix it up, the illegal value 2029 * will result in a VM entry failure. 2030 */ 2031 if (exec_control & CPU_BASED_TPR_SHADOW) { 2032 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, -1ull); 2033 vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold); 2034 } else { 2035 #ifdef CONFIG_X86_64 2036 exec_control |= CPU_BASED_CR8_LOAD_EXITING | 2037 CPU_BASED_CR8_STORE_EXITING; 2038 #endif 2039 } 2040 2041 /* 2042 * A vmexit (to either L1 hypervisor or L0 userspace) is always needed 2043 * for I/O port accesses. 2044 */ 2045 exec_control &= ~CPU_BASED_USE_IO_BITMAPS; 2046 exec_control |= CPU_BASED_UNCOND_IO_EXITING; 2047 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control); 2048 2049 /* 2050 * SECONDARY EXEC CONTROLS 2051 */ 2052 if (cpu_has_secondary_exec_ctrls()) { 2053 exec_control = vmx->secondary_exec_control; 2054 2055 /* Take the following fields only from vmcs12 */ 2056 exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2057 SECONDARY_EXEC_ENABLE_INVPCID | 2058 SECONDARY_EXEC_RDTSCP | 2059 SECONDARY_EXEC_XSAVES | 2060 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2061 SECONDARY_EXEC_APIC_REGISTER_VIRT | 2062 SECONDARY_EXEC_ENABLE_VMFUNC); 2063 if (nested_cpu_has(vmcs12, 2064 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) { 2065 vmcs12_exec_ctrl = vmcs12->secondary_vm_exec_control & 2066 ~SECONDARY_EXEC_ENABLE_PML; 2067 exec_control |= vmcs12_exec_ctrl; 2068 } 2069 2070 /* VMCS shadowing for L2 is emulated for now */ 2071 exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS; 2072 2073 if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) 2074 vmcs_write16(GUEST_INTR_STATUS, 2075 vmcs12->guest_intr_status); 2076 2077 /* 2078 * Write an illegal value to APIC_ACCESS_ADDR. Later, 2079 * nested_get_vmcs12_pages will either fix it up or 2080 * remove the VM execution control. 2081 */ 2082 if (exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) 2083 vmcs_write64(APIC_ACCESS_ADDR, -1ull); 2084 2085 if (exec_control & SECONDARY_EXEC_ENCLS_EXITING) 2086 vmcs_write64(ENCLS_EXITING_BITMAP, -1ull); 2087 2088 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); 2089 } 2090 2091 /* 2092 * ENTRY CONTROLS 2093 * 2094 * vmcs12's VM_{ENTRY,EXIT}_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE 2095 * are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate 2096 * on the related bits (if supported by the CPU) in the hope that 2097 * we can avoid VMWrites during vmx_set_efer(). 2098 */ 2099 exec_control = (vmcs12->vm_entry_controls | vmx_vmentry_ctrl()) & 2100 ~VM_ENTRY_IA32E_MODE & ~VM_ENTRY_LOAD_IA32_EFER; 2101 if (cpu_has_load_ia32_efer()) { 2102 if (guest_efer & EFER_LMA) 2103 exec_control |= VM_ENTRY_IA32E_MODE; 2104 if (guest_efer != host_efer) 2105 exec_control |= VM_ENTRY_LOAD_IA32_EFER; 2106 } 2107 vm_entry_controls_init(vmx, exec_control); 2108 2109 /* 2110 * EXIT CONTROLS 2111 * 2112 * L2->L1 exit controls are emulated - the hardware exit is to L0 so 2113 * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER 2114 * bits may be modified by vmx_set_efer() in prepare_vmcs02(). 2115 */ 2116 exec_control = vmx_vmexit_ctrl(); 2117 if (cpu_has_load_ia32_efer() && guest_efer != host_efer) 2118 exec_control |= VM_EXIT_LOAD_IA32_EFER; 2119 vm_exit_controls_init(vmx, exec_control); 2120 2121 /* 2122 * Conceptually we want to copy the PML address and index from 2123 * vmcs01 here, and then back to vmcs01 on nested vmexit. But, 2124 * since we always flush the log on each vmexit and never change 2125 * the PML address (once set), this happens to be equivalent to 2126 * simply resetting the index in vmcs02. 2127 */ 2128 if (enable_pml) 2129 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); 2130 2131 /* 2132 * Interrupt/Exception Fields 2133 */ 2134 if (vmx->nested.nested_run_pending) { 2135 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 2136 vmcs12->vm_entry_intr_info_field); 2137 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, 2138 vmcs12->vm_entry_exception_error_code); 2139 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 2140 vmcs12->vm_entry_instruction_len); 2141 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 2142 vmcs12->guest_interruptibility_info); 2143 vmx->loaded_vmcs->nmi_known_unmasked = 2144 !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI); 2145 } else { 2146 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); 2147 } 2148 } 2149 2150 static void prepare_vmcs02_full(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) 2151 { 2152 struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs; 2153 2154 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields & 2155 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) { 2156 vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector); 2157 vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector); 2158 vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector); 2159 vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector); 2160 vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector); 2161 vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector); 2162 vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector); 2163 vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector); 2164 vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit); 2165 vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit); 2166 vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit); 2167 vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit); 2168 vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit); 2169 vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit); 2170 vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit); 2171 vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit); 2172 vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit); 2173 vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit); 2174 vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes); 2175 vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes); 2176 vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes); 2177 vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes); 2178 vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes); 2179 vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes); 2180 vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base); 2181 vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base); 2182 vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base); 2183 vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base); 2184 vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base); 2185 vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base); 2186 vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base); 2187 vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base); 2188 vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base); 2189 vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base); 2190 } 2191 2192 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields & 2193 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1)) { 2194 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs); 2195 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, 2196 vmcs12->guest_pending_dbg_exceptions); 2197 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp); 2198 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip); 2199 2200 /* 2201 * L1 may access the L2's PDPTR, so save them to construct 2202 * vmcs12 2203 */ 2204 if (enable_ept) { 2205 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0); 2206 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); 2207 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); 2208 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); 2209 } 2210 } 2211 2212 if (nested_cpu_has_xsaves(vmcs12)) 2213 vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap); 2214 2215 /* 2216 * Whether page-faults are trapped is determined by a combination of 2217 * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF. 2218 * If enable_ept, L0 doesn't care about page faults and we should 2219 * set all of these to L1's desires. However, if !enable_ept, L0 does 2220 * care about (at least some) page faults, and because it is not easy 2221 * (if at all possible?) to merge L0 and L1's desires, we simply ask 2222 * to exit on each and every L2 page fault. This is done by setting 2223 * MASK=MATCH=0 and (see below) EB.PF=1. 2224 * Note that below we don't need special code to set EB.PF beyond the 2225 * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept, 2226 * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when 2227 * !enable_ept, EB.PF is 1, so the "or" will always be 1. 2228 */ 2229 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 2230 enable_ept ? vmcs12->page_fault_error_code_mask : 0); 2231 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 2232 enable_ept ? vmcs12->page_fault_error_code_match : 0); 2233 2234 if (cpu_has_vmx_apicv()) { 2235 vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0); 2236 vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1); 2237 vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2); 2238 vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3); 2239 } 2240 2241 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); 2242 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); 2243 2244 set_cr4_guest_host_mask(vmx); 2245 2246 if (kvm_mpx_supported()) { 2247 if (vmx->nested.nested_run_pending && 2248 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)) 2249 vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs); 2250 else 2251 vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs); 2252 } 2253 } 2254 2255 /* 2256 * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested 2257 * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it 2258 * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2 2259 * guest in a way that will both be appropriate to L1's requests, and our 2260 * needs. In addition to modifying the active vmcs (which is vmcs02), this 2261 * function also has additional necessary side-effects, like setting various 2262 * vcpu->arch fields. 2263 * Returns 0 on success, 1 on failure. Invalid state exit qualification code 2264 * is assigned to entry_failure_code on failure. 2265 */ 2266 static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, 2267 u32 *entry_failure_code) 2268 { 2269 struct vcpu_vmx *vmx = to_vmx(vcpu); 2270 struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs; 2271 2272 if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs) { 2273 prepare_vmcs02_full(vmx, vmcs12); 2274 vmx->nested.dirty_vmcs12 = false; 2275 } 2276 2277 /* 2278 * First, the fields that are shadowed. This must be kept in sync 2279 * with vmcs_shadow_fields.h. 2280 */ 2281 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields & 2282 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) { 2283 vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes); 2284 vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes); 2285 } 2286 2287 if (vmx->nested.nested_run_pending && 2288 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) { 2289 kvm_set_dr(vcpu, 7, vmcs12->guest_dr7); 2290 vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl); 2291 } else { 2292 kvm_set_dr(vcpu, 7, vcpu->arch.dr7); 2293 vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl); 2294 } 2295 vmx_set_rflags(vcpu, vmcs12->guest_rflags); 2296 2297 /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the 2298 * bitwise-or of what L1 wants to trap for L2, and what we want to 2299 * trap. Note that CR0.TS also needs updating - we do this later. 2300 */ 2301 update_exception_bitmap(vcpu); 2302 vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask; 2303 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); 2304 2305 if (vmx->nested.nested_run_pending && 2306 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) { 2307 vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat); 2308 vcpu->arch.pat = vmcs12->guest_ia32_pat; 2309 } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { 2310 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); 2311 } 2312 2313 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); 2314 2315 if (kvm_has_tsc_control) 2316 decache_tsc_multiplier(vmx); 2317 2318 if (enable_vpid) { 2319 /* 2320 * There is no direct mapping between vpid02 and vpid12, the 2321 * vpid02 is per-vCPU for L0 and reused while the value of 2322 * vpid12 is changed w/ one invvpid during nested vmentry. 2323 * The vpid12 is allocated by L1 for L2, so it will not 2324 * influence global bitmap(for vpid01 and vpid02 allocation) 2325 * even if spawn a lot of nested vCPUs. 2326 */ 2327 if (nested_cpu_has_vpid(vmcs12) && nested_has_guest_tlb_tag(vcpu)) { 2328 if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) { 2329 vmx->nested.last_vpid = vmcs12->virtual_processor_id; 2330 __vmx_flush_tlb(vcpu, nested_get_vpid02(vcpu), false); 2331 } 2332 } else { 2333 /* 2334 * If L1 use EPT, then L0 needs to execute INVEPT on 2335 * EPTP02 instead of EPTP01. Therefore, delay TLB 2336 * flush until vmcs02->eptp is fully updated by 2337 * KVM_REQ_LOAD_CR3. Note that this assumes 2338 * KVM_REQ_TLB_FLUSH is evaluated after 2339 * KVM_REQ_LOAD_CR3 in vcpu_enter_guest(). 2340 */ 2341 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 2342 } 2343 } 2344 2345 if (nested_cpu_has_ept(vmcs12)) 2346 nested_ept_init_mmu_context(vcpu); 2347 else if (nested_cpu_has2(vmcs12, 2348 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) 2349 vmx_flush_tlb(vcpu, true); 2350 2351 /* 2352 * This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those 2353 * bits which we consider mandatory enabled. 2354 * The CR0_READ_SHADOW is what L2 should have expected to read given 2355 * the specifications by L1; It's not enough to take 2356 * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we 2357 * have more bits than L1 expected. 2358 */ 2359 vmx_set_cr0(vcpu, vmcs12->guest_cr0); 2360 vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12)); 2361 2362 vmx_set_cr4(vcpu, vmcs12->guest_cr4); 2363 vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12)); 2364 2365 vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12); 2366 /* Note: may modify VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */ 2367 vmx_set_efer(vcpu, vcpu->arch.efer); 2368 2369 /* 2370 * Guest state is invalid and unrestricted guest is disabled, 2371 * which means L1 attempted VMEntry to L2 with invalid state. 2372 * Fail the VMEntry. 2373 */ 2374 if (vmx->emulation_required) { 2375 *entry_failure_code = ENTRY_FAIL_DEFAULT; 2376 return 1; 2377 } 2378 2379 /* Shadow page tables on either EPT or shadow page tables. */ 2380 if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12), 2381 entry_failure_code)) 2382 return 1; 2383 2384 if (!enable_ept) 2385 vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested; 2386 2387 kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp); 2388 kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip); 2389 return 0; 2390 } 2391 2392 static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12) 2393 { 2394 if (!nested_cpu_has_nmi_exiting(vmcs12) && 2395 nested_cpu_has_virtual_nmis(vmcs12)) 2396 return -EINVAL; 2397 2398 if (!nested_cpu_has_virtual_nmis(vmcs12) && 2399 nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING)) 2400 return -EINVAL; 2401 2402 return 0; 2403 } 2404 2405 static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address) 2406 { 2407 struct vcpu_vmx *vmx = to_vmx(vcpu); 2408 int maxphyaddr = cpuid_maxphyaddr(vcpu); 2409 2410 /* Check for memory type validity */ 2411 switch (address & VMX_EPTP_MT_MASK) { 2412 case VMX_EPTP_MT_UC: 2413 if (!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT)) 2414 return false; 2415 break; 2416 case VMX_EPTP_MT_WB: 2417 if (!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT)) 2418 return false; 2419 break; 2420 default: 2421 return false; 2422 } 2423 2424 /* only 4 levels page-walk length are valid */ 2425 if ((address & VMX_EPTP_PWL_MASK) != VMX_EPTP_PWL_4) 2426 return false; 2427 2428 /* Reserved bits should not be set */ 2429 if (address >> maxphyaddr || ((address >> 7) & 0x1f)) 2430 return false; 2431 2432 /* AD, if set, should be supported */ 2433 if (address & VMX_EPTP_AD_ENABLE_BIT) { 2434 if (!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT)) 2435 return false; 2436 } 2437 2438 return true; 2439 } 2440 2441 /* 2442 * Checks related to VM-Execution Control Fields 2443 */ 2444 static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu, 2445 struct vmcs12 *vmcs12) 2446 { 2447 struct vcpu_vmx *vmx = to_vmx(vcpu); 2448 2449 if (!vmx_control_verify(vmcs12->pin_based_vm_exec_control, 2450 vmx->nested.msrs.pinbased_ctls_low, 2451 vmx->nested.msrs.pinbased_ctls_high) || 2452 !vmx_control_verify(vmcs12->cpu_based_vm_exec_control, 2453 vmx->nested.msrs.procbased_ctls_low, 2454 vmx->nested.msrs.procbased_ctls_high)) 2455 return -EINVAL; 2456 2457 if (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) && 2458 !vmx_control_verify(vmcs12->secondary_vm_exec_control, 2459 vmx->nested.msrs.secondary_ctls_low, 2460 vmx->nested.msrs.secondary_ctls_high)) 2461 return -EINVAL; 2462 2463 if (vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu) || 2464 nested_vmx_check_io_bitmap_controls(vcpu, vmcs12) || 2465 nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12) || 2466 nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12) || 2467 nested_vmx_check_apic_access_controls(vcpu, vmcs12) || 2468 nested_vmx_check_apicv_controls(vcpu, vmcs12) || 2469 nested_vmx_check_nmi_controls(vmcs12) || 2470 nested_vmx_check_pml_controls(vcpu, vmcs12) || 2471 nested_vmx_check_unrestricted_guest_controls(vcpu, vmcs12) || 2472 nested_vmx_check_mode_based_ept_exec_controls(vcpu, vmcs12) || 2473 nested_vmx_check_shadow_vmcs_controls(vcpu, vmcs12) || 2474 (nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id)) 2475 return -EINVAL; 2476 2477 if (!nested_cpu_has_preemption_timer(vmcs12) && 2478 nested_cpu_has_save_preemption_timer(vmcs12)) 2479 return -EINVAL; 2480 2481 if (nested_cpu_has_ept(vmcs12) && 2482 !valid_ept_address(vcpu, vmcs12->ept_pointer)) 2483 return -EINVAL; 2484 2485 if (nested_cpu_has_vmfunc(vmcs12)) { 2486 if (vmcs12->vm_function_control & 2487 ~vmx->nested.msrs.vmfunc_controls) 2488 return -EINVAL; 2489 2490 if (nested_cpu_has_eptp_switching(vmcs12)) { 2491 if (!nested_cpu_has_ept(vmcs12) || 2492 !page_address_valid(vcpu, vmcs12->eptp_list_address)) 2493 return -EINVAL; 2494 } 2495 } 2496 2497 return 0; 2498 } 2499 2500 /* 2501 * Checks related to VM-Exit Control Fields 2502 */ 2503 static int nested_check_vm_exit_controls(struct kvm_vcpu *vcpu, 2504 struct vmcs12 *vmcs12) 2505 { 2506 struct vcpu_vmx *vmx = to_vmx(vcpu); 2507 2508 if (!vmx_control_verify(vmcs12->vm_exit_controls, 2509 vmx->nested.msrs.exit_ctls_low, 2510 vmx->nested.msrs.exit_ctls_high) || 2511 nested_vmx_check_exit_msr_switch_controls(vcpu, vmcs12)) 2512 return -EINVAL; 2513 2514 return 0; 2515 } 2516 2517 /* 2518 * Checks related to VM-Entry Control Fields 2519 */ 2520 static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu, 2521 struct vmcs12 *vmcs12) 2522 { 2523 struct vcpu_vmx *vmx = to_vmx(vcpu); 2524 2525 if (!vmx_control_verify(vmcs12->vm_entry_controls, 2526 vmx->nested.msrs.entry_ctls_low, 2527 vmx->nested.msrs.entry_ctls_high)) 2528 return -EINVAL; 2529 2530 /* 2531 * From the Intel SDM, volume 3: 2532 * Fields relevant to VM-entry event injection must be set properly. 2533 * These fields are the VM-entry interruption-information field, the 2534 * VM-entry exception error code, and the VM-entry instruction length. 2535 */ 2536 if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) { 2537 u32 intr_info = vmcs12->vm_entry_intr_info_field; 2538 u8 vector = intr_info & INTR_INFO_VECTOR_MASK; 2539 u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK; 2540 bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK; 2541 bool should_have_error_code; 2542 bool urg = nested_cpu_has2(vmcs12, 2543 SECONDARY_EXEC_UNRESTRICTED_GUEST); 2544 bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE; 2545 2546 /* VM-entry interruption-info field: interruption type */ 2547 if (intr_type == INTR_TYPE_RESERVED || 2548 (intr_type == INTR_TYPE_OTHER_EVENT && 2549 !nested_cpu_supports_monitor_trap_flag(vcpu))) 2550 return -EINVAL; 2551 2552 /* VM-entry interruption-info field: vector */ 2553 if ((intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) || 2554 (intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) || 2555 (intr_type == INTR_TYPE_OTHER_EVENT && vector != 0)) 2556 return -EINVAL; 2557 2558 /* VM-entry interruption-info field: deliver error code */ 2559 should_have_error_code = 2560 intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode && 2561 x86_exception_has_error_code(vector); 2562 if (has_error_code != should_have_error_code) 2563 return -EINVAL; 2564 2565 /* VM-entry exception error code */ 2566 if (has_error_code && 2567 vmcs12->vm_entry_exception_error_code & GENMASK(31, 15)) 2568 return -EINVAL; 2569 2570 /* VM-entry interruption-info field: reserved bits */ 2571 if (intr_info & INTR_INFO_RESVD_BITS_MASK) 2572 return -EINVAL; 2573 2574 /* VM-entry instruction length */ 2575 switch (intr_type) { 2576 case INTR_TYPE_SOFT_EXCEPTION: 2577 case INTR_TYPE_SOFT_INTR: 2578 case INTR_TYPE_PRIV_SW_EXCEPTION: 2579 if ((vmcs12->vm_entry_instruction_len > 15) || 2580 (vmcs12->vm_entry_instruction_len == 0 && 2581 !nested_cpu_has_zero_length_injection(vcpu))) 2582 return -EINVAL; 2583 } 2584 } 2585 2586 if (nested_vmx_check_entry_msr_switch_controls(vcpu, vmcs12)) 2587 return -EINVAL; 2588 2589 return 0; 2590 } 2591 2592 /* 2593 * Checks related to Host Control Registers and MSRs 2594 */ 2595 static int nested_check_host_control_regs(struct kvm_vcpu *vcpu, 2596 struct vmcs12 *vmcs12) 2597 { 2598 bool ia32e; 2599 2600 if (!nested_host_cr0_valid(vcpu, vmcs12->host_cr0) || 2601 !nested_host_cr4_valid(vcpu, vmcs12->host_cr4) || 2602 !nested_cr3_valid(vcpu, vmcs12->host_cr3)) 2603 return -EINVAL; 2604 2605 if (is_noncanonical_address(vmcs12->host_ia32_sysenter_esp, vcpu) || 2606 is_noncanonical_address(vmcs12->host_ia32_sysenter_eip, vcpu)) 2607 return -EINVAL; 2608 2609 /* 2610 * If the load IA32_EFER VM-exit control is 1, bits reserved in the 2611 * IA32_EFER MSR must be 0 in the field for that register. In addition, 2612 * the values of the LMA and LME bits in the field must each be that of 2613 * the host address-space size VM-exit control. 2614 */ 2615 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) { 2616 ia32e = (vmcs12->vm_exit_controls & 2617 VM_EXIT_HOST_ADDR_SPACE_SIZE) != 0; 2618 if (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) || 2619 ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA) || 2620 ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)) 2621 return -EINVAL; 2622 } 2623 2624 return 0; 2625 } 2626 2627 /* 2628 * Checks related to Guest Non-register State 2629 */ 2630 static int nested_check_guest_non_reg_state(struct vmcs12 *vmcs12) 2631 { 2632 if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE && 2633 vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT) 2634 return -EINVAL; 2635 2636 return 0; 2637 } 2638 2639 static int nested_vmx_check_vmentry_prereqs(struct kvm_vcpu *vcpu, 2640 struct vmcs12 *vmcs12) 2641 { 2642 if (nested_check_vm_execution_controls(vcpu, vmcs12) || 2643 nested_check_vm_exit_controls(vcpu, vmcs12) || 2644 nested_check_vm_entry_controls(vcpu, vmcs12)) 2645 return VMXERR_ENTRY_INVALID_CONTROL_FIELD; 2646 2647 if (nested_check_host_control_regs(vcpu, vmcs12)) 2648 return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD; 2649 2650 if (nested_check_guest_non_reg_state(vmcs12)) 2651 return VMXERR_ENTRY_INVALID_CONTROL_FIELD; 2652 2653 return 0; 2654 } 2655 2656 static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu, 2657 struct vmcs12 *vmcs12) 2658 { 2659 int r; 2660 struct page *page; 2661 struct vmcs12 *shadow; 2662 2663 if (vmcs12->vmcs_link_pointer == -1ull) 2664 return 0; 2665 2666 if (!page_address_valid(vcpu, vmcs12->vmcs_link_pointer)) 2667 return -EINVAL; 2668 2669 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->vmcs_link_pointer); 2670 if (is_error_page(page)) 2671 return -EINVAL; 2672 2673 r = 0; 2674 shadow = kmap(page); 2675 if (shadow->hdr.revision_id != VMCS12_REVISION || 2676 shadow->hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12)) 2677 r = -EINVAL; 2678 kunmap(page); 2679 kvm_release_page_clean(page); 2680 return r; 2681 } 2682 2683 static int nested_vmx_check_vmentry_postreqs(struct kvm_vcpu *vcpu, 2684 struct vmcs12 *vmcs12, 2685 u32 *exit_qual) 2686 { 2687 bool ia32e; 2688 2689 *exit_qual = ENTRY_FAIL_DEFAULT; 2690 2691 if (!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0) || 2692 !nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4)) 2693 return 1; 2694 2695 if (nested_vmx_check_vmcs_link_ptr(vcpu, vmcs12)) { 2696 *exit_qual = ENTRY_FAIL_VMCS_LINK_PTR; 2697 return 1; 2698 } 2699 2700 /* 2701 * If the load IA32_EFER VM-entry control is 1, the following checks 2702 * are performed on the field for the IA32_EFER MSR: 2703 * - Bits reserved in the IA32_EFER MSR must be 0. 2704 * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of 2705 * the IA-32e mode guest VM-exit control. It must also be identical 2706 * to bit 8 (LME) if bit 31 in the CR0 field (corresponding to 2707 * CR0.PG) is 1. 2708 */ 2709 if (to_vmx(vcpu)->nested.nested_run_pending && 2710 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) { 2711 ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0; 2712 if (!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer) || 2713 ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA) || 2714 ((vmcs12->guest_cr0 & X86_CR0_PG) && 2715 ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))) 2716 return 1; 2717 } 2718 2719 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) && 2720 (is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu) || 2721 (vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD))) 2722 return 1; 2723 2724 return 0; 2725 } 2726 2727 static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu) 2728 { 2729 struct vcpu_vmx *vmx = to_vmx(vcpu); 2730 unsigned long cr3, cr4; 2731 bool vm_fail; 2732 2733 if (!nested_early_check) 2734 return 0; 2735 2736 if (vmx->msr_autoload.host.nr) 2737 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); 2738 if (vmx->msr_autoload.guest.nr) 2739 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0); 2740 2741 preempt_disable(); 2742 2743 vmx_prepare_switch_to_guest(vcpu); 2744 2745 /* 2746 * Induce a consistency check VMExit by clearing bit 1 in GUEST_RFLAGS, 2747 * which is reserved to '1' by hardware. GUEST_RFLAGS is guaranteed to 2748 * be written (by preparve_vmcs02()) before the "real" VMEnter, i.e. 2749 * there is no need to preserve other bits or save/restore the field. 2750 */ 2751 vmcs_writel(GUEST_RFLAGS, 0); 2752 2753 cr3 = __get_current_cr3_fast(); 2754 if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) { 2755 vmcs_writel(HOST_CR3, cr3); 2756 vmx->loaded_vmcs->host_state.cr3 = cr3; 2757 } 2758 2759 cr4 = cr4_read_shadow(); 2760 if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) { 2761 vmcs_writel(HOST_CR4, cr4); 2762 vmx->loaded_vmcs->host_state.cr4 = cr4; 2763 } 2764 2765 asm( 2766 "sub $%c[wordsize], %%" _ASM_SP "\n\t" /* temporarily adjust RSP for CALL */ 2767 "cmp %%" _ASM_SP ", %c[host_state_rsp](%[loaded_vmcs]) \n\t" 2768 "je 1f \n\t" 2769 __ex("vmwrite %%" _ASM_SP ", %[HOST_RSP]") "\n\t" 2770 "mov %%" _ASM_SP ", %c[host_state_rsp](%[loaded_vmcs]) \n\t" 2771 "1: \n\t" 2772 "add $%c[wordsize], %%" _ASM_SP "\n\t" /* un-adjust RSP */ 2773 2774 /* Check if vmlaunch or vmresume is needed */ 2775 "cmpb $0, %c[launched](%[loaded_vmcs])\n\t" 2776 2777 /* 2778 * VMLAUNCH and VMRESUME clear RFLAGS.{CF,ZF} on VM-Exit, set 2779 * RFLAGS.CF on VM-Fail Invalid and set RFLAGS.ZF on VM-Fail 2780 * Valid. vmx_vmenter() directly "returns" RFLAGS, and so the 2781 * results of VM-Enter is captured via CC_{SET,OUT} to vm_fail. 2782 */ 2783 "call vmx_vmenter\n\t" 2784 2785 CC_SET(be) 2786 : ASM_CALL_CONSTRAINT, CC_OUT(be) (vm_fail) 2787 : [HOST_RSP]"r"((unsigned long)HOST_RSP), 2788 [loaded_vmcs]"r"(vmx->loaded_vmcs), 2789 [launched]"i"(offsetof(struct loaded_vmcs, launched)), 2790 [host_state_rsp]"i"(offsetof(struct loaded_vmcs, host_state.rsp)), 2791 [wordsize]"i"(sizeof(ulong)) 2792 : "cc", "memory" 2793 ); 2794 2795 preempt_enable(); 2796 2797 if (vmx->msr_autoload.host.nr) 2798 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); 2799 if (vmx->msr_autoload.guest.nr) 2800 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); 2801 2802 if (vm_fail) { 2803 WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) != 2804 VMXERR_ENTRY_INVALID_CONTROL_FIELD); 2805 return 1; 2806 } 2807 2808 /* 2809 * VMExit clears RFLAGS.IF and DR7, even on a consistency check. 2810 */ 2811 local_irq_enable(); 2812 if (hw_breakpoint_active()) 2813 set_debugreg(__this_cpu_read(cpu_dr7), 7); 2814 2815 /* 2816 * A non-failing VMEntry means we somehow entered guest mode with 2817 * an illegal RIP, and that's just the tip of the iceberg. There 2818 * is no telling what memory has been modified or what state has 2819 * been exposed to unknown code. Hitting this all but guarantees 2820 * a (very critical) hardware issue. 2821 */ 2822 WARN_ON(!(vmcs_read32(VM_EXIT_REASON) & 2823 VMX_EXIT_REASONS_FAILED_VMENTRY)); 2824 2825 return 0; 2826 } 2827 2828 static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu, 2829 struct vmcs12 *vmcs12); 2830 2831 static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu) 2832 { 2833 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 2834 struct vcpu_vmx *vmx = to_vmx(vcpu); 2835 struct page *page; 2836 u64 hpa; 2837 2838 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { 2839 /* 2840 * Translate L1 physical address to host physical 2841 * address for vmcs02. Keep the page pinned, so this 2842 * physical address remains valid. We keep a reference 2843 * to it so we can release it later. 2844 */ 2845 if (vmx->nested.apic_access_page) { /* shouldn't happen */ 2846 kvm_release_page_dirty(vmx->nested.apic_access_page); 2847 vmx->nested.apic_access_page = NULL; 2848 } 2849 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->apic_access_addr); 2850 /* 2851 * If translation failed, no matter: This feature asks 2852 * to exit when accessing the given address, and if it 2853 * can never be accessed, this feature won't do 2854 * anything anyway. 2855 */ 2856 if (!is_error_page(page)) { 2857 vmx->nested.apic_access_page = page; 2858 hpa = page_to_phys(vmx->nested.apic_access_page); 2859 vmcs_write64(APIC_ACCESS_ADDR, hpa); 2860 } else { 2861 vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, 2862 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES); 2863 } 2864 } 2865 2866 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) { 2867 if (vmx->nested.virtual_apic_page) { /* shouldn't happen */ 2868 kvm_release_page_dirty(vmx->nested.virtual_apic_page); 2869 vmx->nested.virtual_apic_page = NULL; 2870 } 2871 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->virtual_apic_page_addr); 2872 2873 /* 2874 * If translation failed, VM entry will fail because 2875 * prepare_vmcs02 set VIRTUAL_APIC_PAGE_ADDR to -1ull. 2876 */ 2877 if (!is_error_page(page)) { 2878 vmx->nested.virtual_apic_page = page; 2879 hpa = page_to_phys(vmx->nested.virtual_apic_page); 2880 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, hpa); 2881 } else if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING) && 2882 nested_cpu_has(vmcs12, CPU_BASED_CR8_STORE_EXITING) && 2883 !nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { 2884 /* 2885 * The processor will never use the TPR shadow, simply 2886 * clear the bit from the execution control. Such a 2887 * configuration is useless, but it happens in tests. 2888 * For any other configuration, failing the vm entry is 2889 * _not_ what the processor does but it's basically the 2890 * only possibility we have. 2891 */ 2892 vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL, 2893 CPU_BASED_TPR_SHADOW); 2894 } else { 2895 printk("bad virtual-APIC page address\n"); 2896 dump_vmcs(); 2897 } 2898 } 2899 2900 if (nested_cpu_has_posted_intr(vmcs12)) { 2901 if (vmx->nested.pi_desc_page) { /* shouldn't happen */ 2902 kunmap(vmx->nested.pi_desc_page); 2903 kvm_release_page_dirty(vmx->nested.pi_desc_page); 2904 vmx->nested.pi_desc_page = NULL; 2905 vmx->nested.pi_desc = NULL; 2906 vmcs_write64(POSTED_INTR_DESC_ADDR, -1ull); 2907 } 2908 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->posted_intr_desc_addr); 2909 if (is_error_page(page)) 2910 return; 2911 vmx->nested.pi_desc_page = page; 2912 vmx->nested.pi_desc = kmap(vmx->nested.pi_desc_page); 2913 vmx->nested.pi_desc = 2914 (struct pi_desc *)((void *)vmx->nested.pi_desc + 2915 (unsigned long)(vmcs12->posted_intr_desc_addr & 2916 (PAGE_SIZE - 1))); 2917 vmcs_write64(POSTED_INTR_DESC_ADDR, 2918 page_to_phys(vmx->nested.pi_desc_page) + 2919 (unsigned long)(vmcs12->posted_intr_desc_addr & 2920 (PAGE_SIZE - 1))); 2921 } 2922 if (nested_vmx_prepare_msr_bitmap(vcpu, vmcs12)) 2923 vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL, 2924 CPU_BASED_USE_MSR_BITMAPS); 2925 else 2926 vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL, 2927 CPU_BASED_USE_MSR_BITMAPS); 2928 } 2929 2930 /* 2931 * Intel's VMX Instruction Reference specifies a common set of prerequisites 2932 * for running VMX instructions (except VMXON, whose prerequisites are 2933 * slightly different). It also specifies what exception to inject otherwise. 2934 * Note that many of these exceptions have priority over VM exits, so they 2935 * don't have to be checked again here. 2936 */ 2937 static int nested_vmx_check_permission(struct kvm_vcpu *vcpu) 2938 { 2939 if (!to_vmx(vcpu)->nested.vmxon) { 2940 kvm_queue_exception(vcpu, UD_VECTOR); 2941 return 0; 2942 } 2943 2944 if (vmx_get_cpl(vcpu)) { 2945 kvm_inject_gp(vcpu, 0); 2946 return 0; 2947 } 2948 2949 return 1; 2950 } 2951 2952 static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu) 2953 { 2954 u8 rvi = vmx_get_rvi(); 2955 u8 vppr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_PROCPRI); 2956 2957 return ((rvi & 0xf0) > (vppr & 0xf0)); 2958 } 2959 2960 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, 2961 struct vmcs12 *vmcs12); 2962 2963 /* 2964 * If from_vmentry is false, this is being called from state restore (either RSM 2965 * or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume. 2966 + * 2967 + * Returns: 2968 + * 0 - success, i.e. proceed with actual VMEnter 2969 + * 1 - consistency check VMExit 2970 + * -1 - consistency check VMFail 2971 */ 2972 int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry) 2973 { 2974 struct vcpu_vmx *vmx = to_vmx(vcpu); 2975 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 2976 bool evaluate_pending_interrupts; 2977 u32 exit_reason = EXIT_REASON_INVALID_STATE; 2978 u32 exit_qual; 2979 2980 evaluate_pending_interrupts = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) & 2981 (CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING); 2982 if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu)) 2983 evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu); 2984 2985 if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) 2986 vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); 2987 if (kvm_mpx_supported() && 2988 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)) 2989 vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS); 2990 2991 vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02); 2992 2993 prepare_vmcs02_early(vmx, vmcs12); 2994 2995 if (from_vmentry) { 2996 nested_get_vmcs12_pages(vcpu); 2997 2998 if (nested_vmx_check_vmentry_hw(vcpu)) { 2999 vmx_switch_vmcs(vcpu, &vmx->vmcs01); 3000 return -1; 3001 } 3002 3003 if (nested_vmx_check_vmentry_postreqs(vcpu, vmcs12, &exit_qual)) 3004 goto vmentry_fail_vmexit; 3005 } 3006 3007 enter_guest_mode(vcpu); 3008 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) 3009 vcpu->arch.tsc_offset += vmcs12->tsc_offset; 3010 3011 if (prepare_vmcs02(vcpu, vmcs12, &exit_qual)) 3012 goto vmentry_fail_vmexit_guest_mode; 3013 3014 if (from_vmentry) { 3015 exit_reason = EXIT_REASON_MSR_LOAD_FAIL; 3016 exit_qual = nested_vmx_load_msr(vcpu, 3017 vmcs12->vm_entry_msr_load_addr, 3018 vmcs12->vm_entry_msr_load_count); 3019 if (exit_qual) 3020 goto vmentry_fail_vmexit_guest_mode; 3021 } else { 3022 /* 3023 * The MMU is not initialized to point at the right entities yet and 3024 * "get pages" would need to read data from the guest (i.e. we will 3025 * need to perform gpa to hpa translation). Request a call 3026 * to nested_get_vmcs12_pages before the next VM-entry. The MSRs 3027 * have already been set at vmentry time and should not be reset. 3028 */ 3029 kvm_make_request(KVM_REQ_GET_VMCS12_PAGES, vcpu); 3030 } 3031 3032 /* 3033 * If L1 had a pending IRQ/NMI until it executed 3034 * VMLAUNCH/VMRESUME which wasn't delivered because it was 3035 * disallowed (e.g. interrupts disabled), L0 needs to 3036 * evaluate if this pending event should cause an exit from L2 3037 * to L1 or delivered directly to L2 (e.g. In case L1 don't 3038 * intercept EXTERNAL_INTERRUPT). 3039 * 3040 * Usually this would be handled by the processor noticing an 3041 * IRQ/NMI window request, or checking RVI during evaluation of 3042 * pending virtual interrupts. However, this setting was done 3043 * on VMCS01 and now VMCS02 is active instead. Thus, we force L0 3044 * to perform pending event evaluation by requesting a KVM_REQ_EVENT. 3045 */ 3046 if (unlikely(evaluate_pending_interrupts)) 3047 kvm_make_request(KVM_REQ_EVENT, vcpu); 3048 3049 /* 3050 * Do not start the preemption timer hrtimer until after we know 3051 * we are successful, so that only nested_vmx_vmexit needs to cancel 3052 * the timer. 3053 */ 3054 vmx->nested.preemption_timer_expired = false; 3055 if (nested_cpu_has_preemption_timer(vmcs12)) 3056 vmx_start_preemption_timer(vcpu); 3057 3058 /* 3059 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point 3060 * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet 3061 * returned as far as L1 is concerned. It will only return (and set 3062 * the success flag) when L2 exits (see nested_vmx_vmexit()). 3063 */ 3064 return 0; 3065 3066 /* 3067 * A failed consistency check that leads to a VMExit during L1's 3068 * VMEnter to L2 is a variation of a normal VMexit, as explained in 3069 * 26.7 "VM-entry failures during or after loading guest state". 3070 */ 3071 vmentry_fail_vmexit_guest_mode: 3072 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) 3073 vcpu->arch.tsc_offset -= vmcs12->tsc_offset; 3074 leave_guest_mode(vcpu); 3075 3076 vmentry_fail_vmexit: 3077 vmx_switch_vmcs(vcpu, &vmx->vmcs01); 3078 3079 if (!from_vmentry) 3080 return 1; 3081 3082 load_vmcs12_host_state(vcpu, vmcs12); 3083 vmcs12->vm_exit_reason = exit_reason | VMX_EXIT_REASONS_FAILED_VMENTRY; 3084 vmcs12->exit_qualification = exit_qual; 3085 if (enable_shadow_vmcs || vmx->nested.hv_evmcs) 3086 vmx->nested.need_vmcs12_sync = true; 3087 return 1; 3088 } 3089 3090 /* 3091 * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1 3092 * for running an L2 nested guest. 3093 */ 3094 static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) 3095 { 3096 struct vmcs12 *vmcs12; 3097 struct vcpu_vmx *vmx = to_vmx(vcpu); 3098 u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu); 3099 int ret; 3100 3101 if (!nested_vmx_check_permission(vcpu)) 3102 return 1; 3103 3104 if (!nested_vmx_handle_enlightened_vmptrld(vcpu, true)) 3105 return 1; 3106 3107 if (!vmx->nested.hv_evmcs && vmx->nested.current_vmptr == -1ull) 3108 return nested_vmx_failInvalid(vcpu); 3109 3110 vmcs12 = get_vmcs12(vcpu); 3111 3112 /* 3113 * Can't VMLAUNCH or VMRESUME a shadow VMCS. Despite the fact 3114 * that there *is* a valid VMCS pointer, RFLAGS.CF is set 3115 * rather than RFLAGS.ZF, and no error number is stored to the 3116 * VM-instruction error field. 3117 */ 3118 if (vmcs12->hdr.shadow_vmcs) 3119 return nested_vmx_failInvalid(vcpu); 3120 3121 if (vmx->nested.hv_evmcs) { 3122 copy_enlightened_to_vmcs12(vmx); 3123 /* Enlightened VMCS doesn't have launch state */ 3124 vmcs12->launch_state = !launch; 3125 } else if (enable_shadow_vmcs) { 3126 copy_shadow_to_vmcs12(vmx); 3127 } 3128 3129 /* 3130 * The nested entry process starts with enforcing various prerequisites 3131 * on vmcs12 as required by the Intel SDM, and act appropriately when 3132 * they fail: As the SDM explains, some conditions should cause the 3133 * instruction to fail, while others will cause the instruction to seem 3134 * to succeed, but return an EXIT_REASON_INVALID_STATE. 3135 * To speed up the normal (success) code path, we should avoid checking 3136 * for misconfigurations which will anyway be caught by the processor 3137 * when using the merged vmcs02. 3138 */ 3139 if (interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS) 3140 return nested_vmx_failValid(vcpu, 3141 VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS); 3142 3143 if (vmcs12->launch_state == launch) 3144 return nested_vmx_failValid(vcpu, 3145 launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS 3146 : VMXERR_VMRESUME_NONLAUNCHED_VMCS); 3147 3148 ret = nested_vmx_check_vmentry_prereqs(vcpu, vmcs12); 3149 if (ret) 3150 return nested_vmx_failValid(vcpu, ret); 3151 3152 /* 3153 * We're finally done with prerequisite checking, and can start with 3154 * the nested entry. 3155 */ 3156 vmx->nested.nested_run_pending = 1; 3157 ret = nested_vmx_enter_non_root_mode(vcpu, true); 3158 vmx->nested.nested_run_pending = !ret; 3159 if (ret > 0) 3160 return 1; 3161 else if (ret) 3162 return nested_vmx_failValid(vcpu, 3163 VMXERR_ENTRY_INVALID_CONTROL_FIELD); 3164 3165 /* Hide L1D cache contents from the nested guest. */ 3166 vmx->vcpu.arch.l1tf_flush_l1d = true; 3167 3168 /* 3169 * Must happen outside of nested_vmx_enter_non_root_mode() as it will 3170 * also be used as part of restoring nVMX state for 3171 * snapshot restore (migration). 3172 * 3173 * In this flow, it is assumed that vmcs12 cache was 3174 * trasferred as part of captured nVMX state and should 3175 * therefore not be read from guest memory (which may not 3176 * exist on destination host yet). 3177 */ 3178 nested_cache_shadow_vmcs12(vcpu, vmcs12); 3179 3180 /* 3181 * If we're entering a halted L2 vcpu and the L2 vcpu won't be 3182 * awakened by event injection or by an NMI-window VM-exit or 3183 * by an interrupt-window VM-exit, halt the vcpu. 3184 */ 3185 if ((vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) && 3186 !(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) && 3187 !(vmcs12->cpu_based_vm_exec_control & CPU_BASED_VIRTUAL_NMI_PENDING) && 3188 !((vmcs12->cpu_based_vm_exec_control & CPU_BASED_VIRTUAL_INTR_PENDING) && 3189 (vmcs12->guest_rflags & X86_EFLAGS_IF))) { 3190 vmx->nested.nested_run_pending = 0; 3191 return kvm_vcpu_halt(vcpu); 3192 } 3193 return 1; 3194 } 3195 3196 /* 3197 * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date 3198 * because L2 may have changed some cr0 bits directly (CRO_GUEST_HOST_MASK). 3199 * This function returns the new value we should put in vmcs12.guest_cr0. 3200 * It's not enough to just return the vmcs02 GUEST_CR0. Rather, 3201 * 1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now 3202 * available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0 3203 * didn't trap the bit, because if L1 did, so would L0). 3204 * 2. Bits that L1 asked to trap (and therefore L0 also did) could not have 3205 * been modified by L2, and L1 knows it. So just leave the old value of 3206 * the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0 3207 * isn't relevant, because if L0 traps this bit it can set it to anything. 3208 * 3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have 3209 * changed these bits, and therefore they need to be updated, but L0 3210 * didn't necessarily allow them to be changed in GUEST_CR0 - and rather 3211 * put them in vmcs02 CR0_READ_SHADOW. So take these bits from there. 3212 */ 3213 static inline unsigned long 3214 vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) 3215 { 3216 return 3217 /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) | 3218 /*2*/ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) | 3219 /*3*/ (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask | 3220 vcpu->arch.cr0_guest_owned_bits)); 3221 } 3222 3223 static inline unsigned long 3224 vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) 3225 { 3226 return 3227 /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) | 3228 /*2*/ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) | 3229 /*3*/ (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask | 3230 vcpu->arch.cr4_guest_owned_bits)); 3231 } 3232 3233 static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu, 3234 struct vmcs12 *vmcs12) 3235 { 3236 u32 idt_vectoring; 3237 unsigned int nr; 3238 3239 if (vcpu->arch.exception.injected) { 3240 nr = vcpu->arch.exception.nr; 3241 idt_vectoring = nr | VECTORING_INFO_VALID_MASK; 3242 3243 if (kvm_exception_is_soft(nr)) { 3244 vmcs12->vm_exit_instruction_len = 3245 vcpu->arch.event_exit_inst_len; 3246 idt_vectoring |= INTR_TYPE_SOFT_EXCEPTION; 3247 } else 3248 idt_vectoring |= INTR_TYPE_HARD_EXCEPTION; 3249 3250 if (vcpu->arch.exception.has_error_code) { 3251 idt_vectoring |= VECTORING_INFO_DELIVER_CODE_MASK; 3252 vmcs12->idt_vectoring_error_code = 3253 vcpu->arch.exception.error_code; 3254 } 3255 3256 vmcs12->idt_vectoring_info_field = idt_vectoring; 3257 } else if (vcpu->arch.nmi_injected) { 3258 vmcs12->idt_vectoring_info_field = 3259 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR; 3260 } else if (vcpu->arch.interrupt.injected) { 3261 nr = vcpu->arch.interrupt.nr; 3262 idt_vectoring = nr | VECTORING_INFO_VALID_MASK; 3263 3264 if (vcpu->arch.interrupt.soft) { 3265 idt_vectoring |= INTR_TYPE_SOFT_INTR; 3266 vmcs12->vm_entry_instruction_len = 3267 vcpu->arch.event_exit_inst_len; 3268 } else 3269 idt_vectoring |= INTR_TYPE_EXT_INTR; 3270 3271 vmcs12->idt_vectoring_info_field = idt_vectoring; 3272 } 3273 } 3274 3275 3276 static void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu) 3277 { 3278 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 3279 gfn_t gfn; 3280 3281 /* 3282 * Don't need to mark the APIC access page dirty; it is never 3283 * written to by the CPU during APIC virtualization. 3284 */ 3285 3286 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) { 3287 gfn = vmcs12->virtual_apic_page_addr >> PAGE_SHIFT; 3288 kvm_vcpu_mark_page_dirty(vcpu, gfn); 3289 } 3290 3291 if (nested_cpu_has_posted_intr(vmcs12)) { 3292 gfn = vmcs12->posted_intr_desc_addr >> PAGE_SHIFT; 3293 kvm_vcpu_mark_page_dirty(vcpu, gfn); 3294 } 3295 } 3296 3297 static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu) 3298 { 3299 struct vcpu_vmx *vmx = to_vmx(vcpu); 3300 int max_irr; 3301 void *vapic_page; 3302 u16 status; 3303 3304 if (!vmx->nested.pi_desc || !vmx->nested.pi_pending) 3305 return; 3306 3307 vmx->nested.pi_pending = false; 3308 if (!pi_test_and_clear_on(vmx->nested.pi_desc)) 3309 return; 3310 3311 max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256); 3312 if (max_irr != 256) { 3313 vapic_page = kmap(vmx->nested.virtual_apic_page); 3314 __kvm_apic_update_irr(vmx->nested.pi_desc->pir, 3315 vapic_page, &max_irr); 3316 kunmap(vmx->nested.virtual_apic_page); 3317 3318 status = vmcs_read16(GUEST_INTR_STATUS); 3319 if ((u8)max_irr > ((u8)status & 0xff)) { 3320 status &= ~0xff; 3321 status |= (u8)max_irr; 3322 vmcs_write16(GUEST_INTR_STATUS, status); 3323 } 3324 } 3325 3326 nested_mark_vmcs12_pages_dirty(vcpu); 3327 } 3328 3329 static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu, 3330 unsigned long exit_qual) 3331 { 3332 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 3333 unsigned int nr = vcpu->arch.exception.nr; 3334 u32 intr_info = nr | INTR_INFO_VALID_MASK; 3335 3336 if (vcpu->arch.exception.has_error_code) { 3337 vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code; 3338 intr_info |= INTR_INFO_DELIVER_CODE_MASK; 3339 } 3340 3341 if (kvm_exception_is_soft(nr)) 3342 intr_info |= INTR_TYPE_SOFT_EXCEPTION; 3343 else 3344 intr_info |= INTR_TYPE_HARD_EXCEPTION; 3345 3346 if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) && 3347 vmx_get_nmi_mask(vcpu)) 3348 intr_info |= INTR_INFO_UNBLOCK_NMI; 3349 3350 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual); 3351 } 3352 3353 static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr) 3354 { 3355 struct vcpu_vmx *vmx = to_vmx(vcpu); 3356 unsigned long exit_qual; 3357 bool block_nested_events = 3358 vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu); 3359 3360 if (vcpu->arch.exception.pending && 3361 nested_vmx_check_exception(vcpu, &exit_qual)) { 3362 if (block_nested_events) 3363 return -EBUSY; 3364 nested_vmx_inject_exception_vmexit(vcpu, exit_qual); 3365 return 0; 3366 } 3367 3368 if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) && 3369 vmx->nested.preemption_timer_expired) { 3370 if (block_nested_events) 3371 return -EBUSY; 3372 nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0); 3373 return 0; 3374 } 3375 3376 if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) { 3377 if (block_nested_events) 3378 return -EBUSY; 3379 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, 3380 NMI_VECTOR | INTR_TYPE_NMI_INTR | 3381 INTR_INFO_VALID_MASK, 0); 3382 /* 3383 * The NMI-triggered VM exit counts as injection: 3384 * clear this one and block further NMIs. 3385 */ 3386 vcpu->arch.nmi_pending = 0; 3387 vmx_set_nmi_mask(vcpu, true); 3388 return 0; 3389 } 3390 3391 if ((kvm_cpu_has_interrupt(vcpu) || external_intr) && 3392 nested_exit_on_intr(vcpu)) { 3393 if (block_nested_events) 3394 return -EBUSY; 3395 nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0); 3396 return 0; 3397 } 3398 3399 vmx_complete_nested_posted_interrupt(vcpu); 3400 return 0; 3401 } 3402 3403 static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu) 3404 { 3405 ktime_t remaining = 3406 hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer); 3407 u64 value; 3408 3409 if (ktime_to_ns(remaining) <= 0) 3410 return 0; 3411 3412 value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz; 3413 do_div(value, 1000000); 3414 return value >> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; 3415 } 3416 3417 /* 3418 * Update the guest state fields of vmcs12 to reflect changes that 3419 * occurred while L2 was running. (The "IA-32e mode guest" bit of the 3420 * VM-entry controls is also updated, since this is really a guest 3421 * state bit.) 3422 */ 3423 static void sync_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) 3424 { 3425 vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12); 3426 vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12); 3427 3428 vmcs12->guest_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP); 3429 vmcs12->guest_rip = kvm_register_read(vcpu, VCPU_REGS_RIP); 3430 vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS); 3431 3432 vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR); 3433 vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR); 3434 vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR); 3435 vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR); 3436 vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR); 3437 vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR); 3438 vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR); 3439 vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR); 3440 vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT); 3441 vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT); 3442 vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT); 3443 vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT); 3444 vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT); 3445 vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT); 3446 vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT); 3447 vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT); 3448 vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT); 3449 vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT); 3450 vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES); 3451 vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES); 3452 vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES); 3453 vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES); 3454 vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES); 3455 vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES); 3456 vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES); 3457 vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES); 3458 vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE); 3459 vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE); 3460 vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE); 3461 vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE); 3462 vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE); 3463 vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE); 3464 vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE); 3465 vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE); 3466 vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE); 3467 vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE); 3468 3469 vmcs12->guest_interruptibility_info = 3470 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); 3471 vmcs12->guest_pending_dbg_exceptions = 3472 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS); 3473 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) 3474 vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT; 3475 else 3476 vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE; 3477 3478 if (nested_cpu_has_preemption_timer(vmcs12) && 3479 vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER) 3480 vmcs12->vmx_preemption_timer_value = 3481 vmx_get_preemption_timer_value(vcpu); 3482 3483 /* 3484 * In some cases (usually, nested EPT), L2 is allowed to change its 3485 * own CR3 without exiting. If it has changed it, we must keep it. 3486 * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined 3487 * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12. 3488 * 3489 * Additionally, restore L2's PDPTR to vmcs12. 3490 */ 3491 if (enable_ept) { 3492 vmcs12->guest_cr3 = vmcs_readl(GUEST_CR3); 3493 vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0); 3494 vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1); 3495 vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2); 3496 vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3); 3497 } 3498 3499 vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS); 3500 3501 if (nested_cpu_has_vid(vmcs12)) 3502 vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS); 3503 3504 vmcs12->vm_entry_controls = 3505 (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) | 3506 (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE); 3507 3508 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) { 3509 kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7); 3510 vmcs12->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); 3511 } 3512 3513 /* TODO: These cannot have changed unless we have MSR bitmaps and 3514 * the relevant bit asks not to trap the change */ 3515 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT) 3516 vmcs12->guest_ia32_pat = vmcs_read64(GUEST_IA32_PAT); 3517 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER) 3518 vmcs12->guest_ia32_efer = vcpu->arch.efer; 3519 vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS); 3520 vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP); 3521 vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP); 3522 if (kvm_mpx_supported()) 3523 vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS); 3524 } 3525 3526 /* 3527 * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits 3528 * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12), 3529 * and this function updates it to reflect the changes to the guest state while 3530 * L2 was running (and perhaps made some exits which were handled directly by L0 3531 * without going back to L1), and to reflect the exit reason. 3532 * Note that we do not have to copy here all VMCS fields, just those that 3533 * could have changed by the L2 guest or the exit - i.e., the guest-state and 3534 * exit-information fields only. Other fields are modified by L1 with VMWRITE, 3535 * which already writes to vmcs12 directly. 3536 */ 3537 static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, 3538 u32 exit_reason, u32 exit_intr_info, 3539 unsigned long exit_qualification) 3540 { 3541 /* update guest state fields: */ 3542 sync_vmcs12(vcpu, vmcs12); 3543 3544 /* update exit information fields: */ 3545 3546 vmcs12->vm_exit_reason = exit_reason; 3547 vmcs12->exit_qualification = exit_qualification; 3548 vmcs12->vm_exit_intr_info = exit_intr_info; 3549 3550 vmcs12->idt_vectoring_info_field = 0; 3551 vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); 3552 vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 3553 3554 if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) { 3555 vmcs12->launch_state = 1; 3556 3557 /* vm_entry_intr_info_field is cleared on exit. Emulate this 3558 * instead of reading the real value. */ 3559 vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK; 3560 3561 /* 3562 * Transfer the event that L0 or L1 may wanted to inject into 3563 * L2 to IDT_VECTORING_INFO_FIELD. 3564 */ 3565 vmcs12_save_pending_event(vcpu, vmcs12); 3566 3567 /* 3568 * According to spec, there's no need to store the guest's 3569 * MSRs if the exit is due to a VM-entry failure that occurs 3570 * during or after loading the guest state. Since this exit 3571 * does not fall in that category, we need to save the MSRs. 3572 */ 3573 if (nested_vmx_store_msr(vcpu, 3574 vmcs12->vm_exit_msr_store_addr, 3575 vmcs12->vm_exit_msr_store_count)) 3576 nested_vmx_abort(vcpu, 3577 VMX_ABORT_SAVE_GUEST_MSR_FAIL); 3578 } 3579 3580 /* 3581 * Drop what we picked up for L2 via vmx_complete_interrupts. It is 3582 * preserved above and would only end up incorrectly in L1. 3583 */ 3584 vcpu->arch.nmi_injected = false; 3585 kvm_clear_exception_queue(vcpu); 3586 kvm_clear_interrupt_queue(vcpu); 3587 } 3588 3589 /* 3590 * A part of what we need to when the nested L2 guest exits and we want to 3591 * run its L1 parent, is to reset L1's guest state to the host state specified 3592 * in vmcs12. 3593 * This function is to be called not only on normal nested exit, but also on 3594 * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry 3595 * Failures During or After Loading Guest State"). 3596 * This function should be called when the active VMCS is L1's (vmcs01). 3597 */ 3598 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, 3599 struct vmcs12 *vmcs12) 3600 { 3601 struct kvm_segment seg; 3602 u32 entry_failure_code; 3603 3604 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) 3605 vcpu->arch.efer = vmcs12->host_ia32_efer; 3606 else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) 3607 vcpu->arch.efer |= (EFER_LMA | EFER_LME); 3608 else 3609 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME); 3610 vmx_set_efer(vcpu, vcpu->arch.efer); 3611 3612 kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->host_rsp); 3613 kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->host_rip); 3614 vmx_set_rflags(vcpu, X86_EFLAGS_FIXED); 3615 vmx_set_interrupt_shadow(vcpu, 0); 3616 3617 /* 3618 * Note that calling vmx_set_cr0 is important, even if cr0 hasn't 3619 * actually changed, because vmx_set_cr0 refers to efer set above. 3620 * 3621 * CR0_GUEST_HOST_MASK is already set in the original vmcs01 3622 * (KVM doesn't change it); 3623 */ 3624 vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS; 3625 vmx_set_cr0(vcpu, vmcs12->host_cr0); 3626 3627 /* Same as above - no reason to call set_cr4_guest_host_mask(). */ 3628 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); 3629 vmx_set_cr4(vcpu, vmcs12->host_cr4); 3630 3631 nested_ept_uninit_mmu_context(vcpu); 3632 3633 /* 3634 * Only PDPTE load can fail as the value of cr3 was checked on entry and 3635 * couldn't have changed. 3636 */ 3637 if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code)) 3638 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL); 3639 3640 if (!enable_ept) 3641 vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault; 3642 3643 /* 3644 * If vmcs01 doesn't use VPID, CPU flushes TLB on every 3645 * VMEntry/VMExit. Thus, no need to flush TLB. 3646 * 3647 * If vmcs12 doesn't use VPID, L1 expects TLB to be 3648 * flushed on every VMEntry/VMExit. 3649 * 3650 * Otherwise, we can preserve TLB entries as long as we are 3651 * able to tag L1 TLB entries differently than L2 TLB entries. 3652 * 3653 * If vmcs12 uses EPT, we need to execute this flush on EPTP01 3654 * and therefore we request the TLB flush to happen only after VMCS EPTP 3655 * has been set by KVM_REQ_LOAD_CR3. 3656 */ 3657 if (enable_vpid && 3658 (!nested_cpu_has_vpid(vmcs12) || !nested_has_guest_tlb_tag(vcpu))) { 3659 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 3660 } 3661 3662 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs); 3663 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp); 3664 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip); 3665 vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base); 3666 vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base); 3667 vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF); 3668 vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF); 3669 3670 /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */ 3671 if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS) 3672 vmcs_write64(GUEST_BNDCFGS, 0); 3673 3674 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) { 3675 vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat); 3676 vcpu->arch.pat = vmcs12->host_ia32_pat; 3677 } 3678 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) 3679 vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL, 3680 vmcs12->host_ia32_perf_global_ctrl); 3681 3682 /* Set L1 segment info according to Intel SDM 3683 27.5.2 Loading Host Segment and Descriptor-Table Registers */ 3684 seg = (struct kvm_segment) { 3685 .base = 0, 3686 .limit = 0xFFFFFFFF, 3687 .selector = vmcs12->host_cs_selector, 3688 .type = 11, 3689 .present = 1, 3690 .s = 1, 3691 .g = 1 3692 }; 3693 if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) 3694 seg.l = 1; 3695 else 3696 seg.db = 1; 3697 vmx_set_segment(vcpu, &seg, VCPU_SREG_CS); 3698 seg = (struct kvm_segment) { 3699 .base = 0, 3700 .limit = 0xFFFFFFFF, 3701 .type = 3, 3702 .present = 1, 3703 .s = 1, 3704 .db = 1, 3705 .g = 1 3706 }; 3707 seg.selector = vmcs12->host_ds_selector; 3708 vmx_set_segment(vcpu, &seg, VCPU_SREG_DS); 3709 seg.selector = vmcs12->host_es_selector; 3710 vmx_set_segment(vcpu, &seg, VCPU_SREG_ES); 3711 seg.selector = vmcs12->host_ss_selector; 3712 vmx_set_segment(vcpu, &seg, VCPU_SREG_SS); 3713 seg.selector = vmcs12->host_fs_selector; 3714 seg.base = vmcs12->host_fs_base; 3715 vmx_set_segment(vcpu, &seg, VCPU_SREG_FS); 3716 seg.selector = vmcs12->host_gs_selector; 3717 seg.base = vmcs12->host_gs_base; 3718 vmx_set_segment(vcpu, &seg, VCPU_SREG_GS); 3719 seg = (struct kvm_segment) { 3720 .base = vmcs12->host_tr_base, 3721 .limit = 0x67, 3722 .selector = vmcs12->host_tr_selector, 3723 .type = 11, 3724 .present = 1 3725 }; 3726 vmx_set_segment(vcpu, &seg, VCPU_SREG_TR); 3727 3728 kvm_set_dr(vcpu, 7, 0x400); 3729 vmcs_write64(GUEST_IA32_DEBUGCTL, 0); 3730 3731 if (cpu_has_vmx_msr_bitmap()) 3732 vmx_update_msr_bitmap(vcpu); 3733 3734 if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr, 3735 vmcs12->vm_exit_msr_load_count)) 3736 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL); 3737 } 3738 3739 static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx) 3740 { 3741 struct shared_msr_entry *efer_msr; 3742 unsigned int i; 3743 3744 if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_EFER) 3745 return vmcs_read64(GUEST_IA32_EFER); 3746 3747 if (cpu_has_load_ia32_efer()) 3748 return host_efer; 3749 3750 for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) { 3751 if (vmx->msr_autoload.guest.val[i].index == MSR_EFER) 3752 return vmx->msr_autoload.guest.val[i].value; 3753 } 3754 3755 efer_msr = find_msr_entry(vmx, MSR_EFER); 3756 if (efer_msr) 3757 return efer_msr->data; 3758 3759 return host_efer; 3760 } 3761 3762 static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu) 3763 { 3764 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 3765 struct vcpu_vmx *vmx = to_vmx(vcpu); 3766 struct vmx_msr_entry g, h; 3767 struct msr_data msr; 3768 gpa_t gpa; 3769 u32 i, j; 3770 3771 vcpu->arch.pat = vmcs_read64(GUEST_IA32_PAT); 3772 3773 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) { 3774 /* 3775 * L1's host DR7 is lost if KVM_GUESTDBG_USE_HW_BP is set 3776 * as vmcs01.GUEST_DR7 contains a userspace defined value 3777 * and vcpu->arch.dr7 is not squirreled away before the 3778 * nested VMENTER (not worth adding a variable in nested_vmx). 3779 */ 3780 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) 3781 kvm_set_dr(vcpu, 7, DR7_FIXED_1); 3782 else 3783 WARN_ON(kvm_set_dr(vcpu, 7, vmcs_readl(GUEST_DR7))); 3784 } 3785 3786 /* 3787 * Note that calling vmx_set_{efer,cr0,cr4} is important as they 3788 * handle a variety of side effects to KVM's software model. 3789 */ 3790 vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx)); 3791 3792 vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS; 3793 vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW)); 3794 3795 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); 3796 vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW)); 3797 3798 nested_ept_uninit_mmu_context(vcpu); 3799 3800 /* 3801 * This is only valid if EPT is in use, otherwise the vmcs01 GUEST_CR3 3802 * points to shadow pages! Fortunately we only get here after a WARN_ON 3803 * if EPT is disabled, so a VMabort is perfectly fine. 3804 */ 3805 if (enable_ept) { 3806 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); 3807 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); 3808 } else { 3809 nested_vmx_abort(vcpu, VMX_ABORT_VMCS_CORRUPTED); 3810 } 3811 3812 /* 3813 * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs 3814 * from vmcs01 (if necessary). The PDPTRs are not loaded on 3815 * VMFail, like everything else we just need to ensure our 3816 * software model is up-to-date. 3817 */ 3818 ept_save_pdptrs(vcpu); 3819 3820 kvm_mmu_reset_context(vcpu); 3821 3822 if (cpu_has_vmx_msr_bitmap()) 3823 vmx_update_msr_bitmap(vcpu); 3824 3825 /* 3826 * This nasty bit of open coding is a compromise between blindly 3827 * loading L1's MSRs using the exit load lists (incorrect emulation 3828 * of VMFail), leaving the nested VM's MSRs in the software model 3829 * (incorrect behavior) and snapshotting the modified MSRs (too 3830 * expensive since the lists are unbound by hardware). For each 3831 * MSR that was (prematurely) loaded from the nested VMEntry load 3832 * list, reload it from the exit load list if it exists and differs 3833 * from the guest value. The intent is to stuff host state as 3834 * silently as possible, not to fully process the exit load list. 3835 */ 3836 msr.host_initiated = false; 3837 for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) { 3838 gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g)); 3839 if (kvm_vcpu_read_guest(vcpu, gpa, &g, sizeof(g))) { 3840 pr_debug_ratelimited( 3841 "%s read MSR index failed (%u, 0x%08llx)\n", 3842 __func__, i, gpa); 3843 goto vmabort; 3844 } 3845 3846 for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) { 3847 gpa = vmcs12->vm_exit_msr_load_addr + (j * sizeof(h)); 3848 if (kvm_vcpu_read_guest(vcpu, gpa, &h, sizeof(h))) { 3849 pr_debug_ratelimited( 3850 "%s read MSR failed (%u, 0x%08llx)\n", 3851 __func__, j, gpa); 3852 goto vmabort; 3853 } 3854 if (h.index != g.index) 3855 continue; 3856 if (h.value == g.value) 3857 break; 3858 3859 if (nested_vmx_load_msr_check(vcpu, &h)) { 3860 pr_debug_ratelimited( 3861 "%s check failed (%u, 0x%x, 0x%x)\n", 3862 __func__, j, h.index, h.reserved); 3863 goto vmabort; 3864 } 3865 3866 msr.index = h.index; 3867 msr.data = h.value; 3868 if (kvm_set_msr(vcpu, &msr)) { 3869 pr_debug_ratelimited( 3870 "%s WRMSR failed (%u, 0x%x, 0x%llx)\n", 3871 __func__, j, h.index, h.value); 3872 goto vmabort; 3873 } 3874 } 3875 } 3876 3877 return; 3878 3879 vmabort: 3880 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL); 3881 } 3882 3883 /* 3884 * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1 3885 * and modify vmcs12 to make it see what it would expect to see there if 3886 * L2 was its real guest. Must only be called when in L2 (is_guest_mode()) 3887 */ 3888 void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, 3889 u32 exit_intr_info, unsigned long exit_qualification) 3890 { 3891 struct vcpu_vmx *vmx = to_vmx(vcpu); 3892 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 3893 3894 /* trying to cancel vmlaunch/vmresume is a bug */ 3895 WARN_ON_ONCE(vmx->nested.nested_run_pending); 3896 3897 leave_guest_mode(vcpu); 3898 3899 if (nested_cpu_has_preemption_timer(vmcs12)) 3900 hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer); 3901 3902 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) 3903 vcpu->arch.tsc_offset -= vmcs12->tsc_offset; 3904 3905 if (likely(!vmx->fail)) { 3906 if (exit_reason == -1) 3907 sync_vmcs12(vcpu, vmcs12); 3908 else 3909 prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info, 3910 exit_qualification); 3911 3912 /* 3913 * Must happen outside of sync_vmcs12() as it will 3914 * also be used to capture vmcs12 cache as part of 3915 * capturing nVMX state for snapshot (migration). 3916 * 3917 * Otherwise, this flush will dirty guest memory at a 3918 * point it is already assumed by user-space to be 3919 * immutable. 3920 */ 3921 nested_flush_cached_shadow_vmcs12(vcpu, vmcs12); 3922 } else { 3923 /* 3924 * The only expected VM-instruction error is "VM entry with 3925 * invalid control field(s)." Anything else indicates a 3926 * problem with L0. And we should never get here with a 3927 * VMFail of any type if early consistency checks are enabled. 3928 */ 3929 WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) != 3930 VMXERR_ENTRY_INVALID_CONTROL_FIELD); 3931 WARN_ON_ONCE(nested_early_check); 3932 } 3933 3934 vmx_switch_vmcs(vcpu, &vmx->vmcs01); 3935 3936 /* Update any VMCS fields that might have changed while L2 ran */ 3937 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); 3938 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); 3939 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); 3940 3941 if (kvm_has_tsc_control) 3942 decache_tsc_multiplier(vmx); 3943 3944 if (vmx->nested.change_vmcs01_virtual_apic_mode) { 3945 vmx->nested.change_vmcs01_virtual_apic_mode = false; 3946 vmx_set_virtual_apic_mode(vcpu); 3947 } else if (!nested_cpu_has_ept(vmcs12) && 3948 nested_cpu_has2(vmcs12, 3949 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { 3950 vmx_flush_tlb(vcpu, true); 3951 } 3952 3953 /* Unpin physical memory we referred to in vmcs02 */ 3954 if (vmx->nested.apic_access_page) { 3955 kvm_release_page_dirty(vmx->nested.apic_access_page); 3956 vmx->nested.apic_access_page = NULL; 3957 } 3958 if (vmx->nested.virtual_apic_page) { 3959 kvm_release_page_dirty(vmx->nested.virtual_apic_page); 3960 vmx->nested.virtual_apic_page = NULL; 3961 } 3962 if (vmx->nested.pi_desc_page) { 3963 kunmap(vmx->nested.pi_desc_page); 3964 kvm_release_page_dirty(vmx->nested.pi_desc_page); 3965 vmx->nested.pi_desc_page = NULL; 3966 vmx->nested.pi_desc = NULL; 3967 } 3968 3969 /* 3970 * We are now running in L2, mmu_notifier will force to reload the 3971 * page's hpa for L2 vmcs. Need to reload it for L1 before entering L1. 3972 */ 3973 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); 3974 3975 if ((exit_reason != -1) && (enable_shadow_vmcs || vmx->nested.hv_evmcs)) 3976 vmx->nested.need_vmcs12_sync = true; 3977 3978 /* in case we halted in L2 */ 3979 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 3980 3981 if (likely(!vmx->fail)) { 3982 /* 3983 * TODO: SDM says that with acknowledge interrupt on 3984 * exit, bit 31 of the VM-exit interrupt information 3985 * (valid interrupt) is always set to 1 on 3986 * EXIT_REASON_EXTERNAL_INTERRUPT, so we shouldn't 3987 * need kvm_cpu_has_interrupt(). See the commit 3988 * message for details. 3989 */ 3990 if (nested_exit_intr_ack_set(vcpu) && 3991 exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT && 3992 kvm_cpu_has_interrupt(vcpu)) { 3993 int irq = kvm_cpu_get_interrupt(vcpu); 3994 WARN_ON(irq < 0); 3995 vmcs12->vm_exit_intr_info = irq | 3996 INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR; 3997 } 3998 3999 if (exit_reason != -1) 4000 trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason, 4001 vmcs12->exit_qualification, 4002 vmcs12->idt_vectoring_info_field, 4003 vmcs12->vm_exit_intr_info, 4004 vmcs12->vm_exit_intr_error_code, 4005 KVM_ISA_VMX); 4006 4007 load_vmcs12_host_state(vcpu, vmcs12); 4008 4009 return; 4010 } 4011 4012 /* 4013 * After an early L2 VM-entry failure, we're now back 4014 * in L1 which thinks it just finished a VMLAUNCH or 4015 * VMRESUME instruction, so we need to set the failure 4016 * flag and the VM-instruction error field of the VMCS 4017 * accordingly, and skip the emulated instruction. 4018 */ 4019 (void)nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); 4020 4021 /* 4022 * Restore L1's host state to KVM's software model. We're here 4023 * because a consistency check was caught by hardware, which 4024 * means some amount of guest state has been propagated to KVM's 4025 * model and needs to be unwound to the host's state. 4026 */ 4027 nested_vmx_restore_host_state(vcpu); 4028 4029 vmx->fail = 0; 4030 } 4031 4032 /* 4033 * Decode the memory-address operand of a vmx instruction, as recorded on an 4034 * exit caused by such an instruction (run by a guest hypervisor). 4035 * On success, returns 0. When the operand is invalid, returns 1 and throws 4036 * #UD or #GP. 4037 */ 4038 int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification, 4039 u32 vmx_instruction_info, bool wr, gva_t *ret) 4040 { 4041 gva_t off; 4042 bool exn; 4043 struct kvm_segment s; 4044 4045 /* 4046 * According to Vol. 3B, "Information for VM Exits Due to Instruction 4047 * Execution", on an exit, vmx_instruction_info holds most of the 4048 * addressing components of the operand. Only the displacement part 4049 * is put in exit_qualification (see 3B, "Basic VM-Exit Information"). 4050 * For how an actual address is calculated from all these components, 4051 * refer to Vol. 1, "Operand Addressing". 4052 */ 4053 int scaling = vmx_instruction_info & 3; 4054 int addr_size = (vmx_instruction_info >> 7) & 7; 4055 bool is_reg = vmx_instruction_info & (1u << 10); 4056 int seg_reg = (vmx_instruction_info >> 15) & 7; 4057 int index_reg = (vmx_instruction_info >> 18) & 0xf; 4058 bool index_is_valid = !(vmx_instruction_info & (1u << 22)); 4059 int base_reg = (vmx_instruction_info >> 23) & 0xf; 4060 bool base_is_valid = !(vmx_instruction_info & (1u << 27)); 4061 4062 if (is_reg) { 4063 kvm_queue_exception(vcpu, UD_VECTOR); 4064 return 1; 4065 } 4066 4067 /* Addr = segment_base + offset */ 4068 /* offset = base + [index * scale] + displacement */ 4069 off = exit_qualification; /* holds the displacement */ 4070 if (addr_size == 1) 4071 off = (gva_t)sign_extend64(off, 31); 4072 else if (addr_size == 0) 4073 off = (gva_t)sign_extend64(off, 15); 4074 if (base_is_valid) 4075 off += kvm_register_read(vcpu, base_reg); 4076 if (index_is_valid) 4077 off += kvm_register_read(vcpu, index_reg)<<scaling; 4078 vmx_get_segment(vcpu, &s, seg_reg); 4079 4080 /* 4081 * The effective address, i.e. @off, of a memory operand is truncated 4082 * based on the address size of the instruction. Note that this is 4083 * the *effective address*, i.e. the address prior to accounting for 4084 * the segment's base. 4085 */ 4086 if (addr_size == 1) /* 32 bit */ 4087 off &= 0xffffffff; 4088 else if (addr_size == 0) /* 16 bit */ 4089 off &= 0xffff; 4090 4091 /* Checks for #GP/#SS exceptions. */ 4092 exn = false; 4093 if (is_long_mode(vcpu)) { 4094 /* 4095 * The virtual/linear address is never truncated in 64-bit 4096 * mode, e.g. a 32-bit address size can yield a 64-bit virtual 4097 * address when using FS/GS with a non-zero base. 4098 */ 4099 *ret = s.base + off; 4100 4101 /* Long mode: #GP(0)/#SS(0) if the memory address is in a 4102 * non-canonical form. This is the only check on the memory 4103 * destination for long mode! 4104 */ 4105 exn = is_noncanonical_address(*ret, vcpu); 4106 } else { 4107 /* 4108 * When not in long mode, the virtual/linear address is 4109 * unconditionally truncated to 32 bits regardless of the 4110 * address size. 4111 */ 4112 *ret = (s.base + off) & 0xffffffff; 4113 4114 /* Protected mode: apply checks for segment validity in the 4115 * following order: 4116 * - segment type check (#GP(0) may be thrown) 4117 * - usability check (#GP(0)/#SS(0)) 4118 * - limit check (#GP(0)/#SS(0)) 4119 */ 4120 if (wr) 4121 /* #GP(0) if the destination operand is located in a 4122 * read-only data segment or any code segment. 4123 */ 4124 exn = ((s.type & 0xa) == 0 || (s.type & 8)); 4125 else 4126 /* #GP(0) if the source operand is located in an 4127 * execute-only code segment 4128 */ 4129 exn = ((s.type & 0xa) == 8); 4130 if (exn) { 4131 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); 4132 return 1; 4133 } 4134 /* Protected mode: #GP(0)/#SS(0) if the segment is unusable. 4135 */ 4136 exn = (s.unusable != 0); 4137 4138 /* 4139 * Protected mode: #GP(0)/#SS(0) if the memory operand is 4140 * outside the segment limit. All CPUs that support VMX ignore 4141 * limit checks for flat segments, i.e. segments with base==0, 4142 * limit==0xffffffff and of type expand-up data or code. 4143 */ 4144 if (!(s.base == 0 && s.limit == 0xffffffff && 4145 ((s.type & 8) || !(s.type & 4)))) 4146 exn = exn || (off + sizeof(u64) > s.limit); 4147 } 4148 if (exn) { 4149 kvm_queue_exception_e(vcpu, 4150 seg_reg == VCPU_SREG_SS ? 4151 SS_VECTOR : GP_VECTOR, 4152 0); 4153 return 1; 4154 } 4155 4156 return 0; 4157 } 4158 4159 static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer) 4160 { 4161 gva_t gva; 4162 struct x86_exception e; 4163 4164 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), 4165 vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva)) 4166 return 1; 4167 4168 if (kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e)) { 4169 kvm_inject_page_fault(vcpu, &e); 4170 return 1; 4171 } 4172 4173 return 0; 4174 } 4175 4176 /* 4177 * Allocate a shadow VMCS and associate it with the currently loaded 4178 * VMCS, unless such a shadow VMCS already exists. The newly allocated 4179 * VMCS is also VMCLEARed, so that it is ready for use. 4180 */ 4181 static struct vmcs *alloc_shadow_vmcs(struct kvm_vcpu *vcpu) 4182 { 4183 struct vcpu_vmx *vmx = to_vmx(vcpu); 4184 struct loaded_vmcs *loaded_vmcs = vmx->loaded_vmcs; 4185 4186 /* 4187 * We should allocate a shadow vmcs for vmcs01 only when L1 4188 * executes VMXON and free it when L1 executes VMXOFF. 4189 * As it is invalid to execute VMXON twice, we shouldn't reach 4190 * here when vmcs01 already have an allocated shadow vmcs. 4191 */ 4192 WARN_ON(loaded_vmcs == &vmx->vmcs01 && loaded_vmcs->shadow_vmcs); 4193 4194 if (!loaded_vmcs->shadow_vmcs) { 4195 loaded_vmcs->shadow_vmcs = alloc_vmcs(true); 4196 if (loaded_vmcs->shadow_vmcs) 4197 vmcs_clear(loaded_vmcs->shadow_vmcs); 4198 } 4199 return loaded_vmcs->shadow_vmcs; 4200 } 4201 4202 static int enter_vmx_operation(struct kvm_vcpu *vcpu) 4203 { 4204 struct vcpu_vmx *vmx = to_vmx(vcpu); 4205 int r; 4206 4207 r = alloc_loaded_vmcs(&vmx->nested.vmcs02); 4208 if (r < 0) 4209 goto out_vmcs02; 4210 4211 vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT); 4212 if (!vmx->nested.cached_vmcs12) 4213 goto out_cached_vmcs12; 4214 4215 vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT); 4216 if (!vmx->nested.cached_shadow_vmcs12) 4217 goto out_cached_shadow_vmcs12; 4218 4219 if (enable_shadow_vmcs && !alloc_shadow_vmcs(vcpu)) 4220 goto out_shadow_vmcs; 4221 4222 hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC, 4223 HRTIMER_MODE_REL_PINNED); 4224 vmx->nested.preemption_timer.function = vmx_preemption_timer_fn; 4225 4226 vmx->nested.vpid02 = allocate_vpid(); 4227 4228 vmx->nested.vmcs02_initialized = false; 4229 vmx->nested.vmxon = true; 4230 4231 if (pt_mode == PT_MODE_HOST_GUEST) { 4232 vmx->pt_desc.guest.ctl = 0; 4233 pt_update_intercept_for_msr(vmx); 4234 } 4235 4236 return 0; 4237 4238 out_shadow_vmcs: 4239 kfree(vmx->nested.cached_shadow_vmcs12); 4240 4241 out_cached_shadow_vmcs12: 4242 kfree(vmx->nested.cached_vmcs12); 4243 4244 out_cached_vmcs12: 4245 free_loaded_vmcs(&vmx->nested.vmcs02); 4246 4247 out_vmcs02: 4248 return -ENOMEM; 4249 } 4250 4251 /* 4252 * Emulate the VMXON instruction. 4253 * Currently, we just remember that VMX is active, and do not save or even 4254 * inspect the argument to VMXON (the so-called "VMXON pointer") because we 4255 * do not currently need to store anything in that guest-allocated memory 4256 * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their 4257 * argument is different from the VMXON pointer (which the spec says they do). 4258 */ 4259 static int handle_vmon(struct kvm_vcpu *vcpu) 4260 { 4261 int ret; 4262 gpa_t vmptr; 4263 struct page *page; 4264 struct vcpu_vmx *vmx = to_vmx(vcpu); 4265 const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED 4266 | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; 4267 4268 /* 4269 * The Intel VMX Instruction Reference lists a bunch of bits that are 4270 * prerequisite to running VMXON, most notably cr4.VMXE must be set to 4271 * 1 (see vmx_set_cr4() for when we allow the guest to set this). 4272 * Otherwise, we should fail with #UD. But most faulting conditions 4273 * have already been checked by hardware, prior to the VM-exit for 4274 * VMXON. We do test guest cr4.VMXE because processor CR4 always has 4275 * that bit set to 1 in non-root mode. 4276 */ 4277 if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) { 4278 kvm_queue_exception(vcpu, UD_VECTOR); 4279 return 1; 4280 } 4281 4282 /* CPL=0 must be checked manually. */ 4283 if (vmx_get_cpl(vcpu)) { 4284 kvm_inject_gp(vcpu, 0); 4285 return 1; 4286 } 4287 4288 if (vmx->nested.vmxon) 4289 return nested_vmx_failValid(vcpu, 4290 VMXERR_VMXON_IN_VMX_ROOT_OPERATION); 4291 4292 if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES) 4293 != VMXON_NEEDED_FEATURES) { 4294 kvm_inject_gp(vcpu, 0); 4295 return 1; 4296 } 4297 4298 if (nested_vmx_get_vmptr(vcpu, &vmptr)) 4299 return 1; 4300 4301 /* 4302 * SDM 3: 24.11.5 4303 * The first 4 bytes of VMXON region contain the supported 4304 * VMCS revision identifier 4305 * 4306 * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case; 4307 * which replaces physical address width with 32 4308 */ 4309 if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) 4310 return nested_vmx_failInvalid(vcpu); 4311 4312 page = kvm_vcpu_gpa_to_page(vcpu, vmptr); 4313 if (is_error_page(page)) 4314 return nested_vmx_failInvalid(vcpu); 4315 4316 if (*(u32 *)kmap(page) != VMCS12_REVISION) { 4317 kunmap(page); 4318 kvm_release_page_clean(page); 4319 return nested_vmx_failInvalid(vcpu); 4320 } 4321 kunmap(page); 4322 kvm_release_page_clean(page); 4323 4324 vmx->nested.vmxon_ptr = vmptr; 4325 ret = enter_vmx_operation(vcpu); 4326 if (ret) 4327 return ret; 4328 4329 return nested_vmx_succeed(vcpu); 4330 } 4331 4332 static inline void nested_release_vmcs12(struct kvm_vcpu *vcpu) 4333 { 4334 struct vcpu_vmx *vmx = to_vmx(vcpu); 4335 4336 if (vmx->nested.current_vmptr == -1ull) 4337 return; 4338 4339 if (enable_shadow_vmcs) { 4340 /* copy to memory all shadowed fields in case 4341 they were modified */ 4342 copy_shadow_to_vmcs12(vmx); 4343 vmx->nested.need_vmcs12_sync = false; 4344 vmx_disable_shadow_vmcs(vmx); 4345 } 4346 vmx->nested.posted_intr_nv = -1; 4347 4348 /* Flush VMCS12 to guest memory */ 4349 kvm_vcpu_write_guest_page(vcpu, 4350 vmx->nested.current_vmptr >> PAGE_SHIFT, 4351 vmx->nested.cached_vmcs12, 0, VMCS12_SIZE); 4352 4353 kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); 4354 4355 vmx->nested.current_vmptr = -1ull; 4356 } 4357 4358 /* Emulate the VMXOFF instruction */ 4359 static int handle_vmoff(struct kvm_vcpu *vcpu) 4360 { 4361 if (!nested_vmx_check_permission(vcpu)) 4362 return 1; 4363 free_nested(vcpu); 4364 return nested_vmx_succeed(vcpu); 4365 } 4366 4367 /* Emulate the VMCLEAR instruction */ 4368 static int handle_vmclear(struct kvm_vcpu *vcpu) 4369 { 4370 struct vcpu_vmx *vmx = to_vmx(vcpu); 4371 u32 zero = 0; 4372 gpa_t vmptr; 4373 4374 if (!nested_vmx_check_permission(vcpu)) 4375 return 1; 4376 4377 if (nested_vmx_get_vmptr(vcpu, &vmptr)) 4378 return 1; 4379 4380 if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) 4381 return nested_vmx_failValid(vcpu, 4382 VMXERR_VMCLEAR_INVALID_ADDRESS); 4383 4384 if (vmptr == vmx->nested.vmxon_ptr) 4385 return nested_vmx_failValid(vcpu, 4386 VMXERR_VMCLEAR_VMXON_POINTER); 4387 4388 if (vmx->nested.hv_evmcs_page) { 4389 if (vmptr == vmx->nested.hv_evmcs_vmptr) 4390 nested_release_evmcs(vcpu); 4391 } else { 4392 if (vmptr == vmx->nested.current_vmptr) 4393 nested_release_vmcs12(vcpu); 4394 4395 kvm_vcpu_write_guest(vcpu, 4396 vmptr + offsetof(struct vmcs12, 4397 launch_state), 4398 &zero, sizeof(zero)); 4399 } 4400 4401 return nested_vmx_succeed(vcpu); 4402 } 4403 4404 static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch); 4405 4406 /* Emulate the VMLAUNCH instruction */ 4407 static int handle_vmlaunch(struct kvm_vcpu *vcpu) 4408 { 4409 return nested_vmx_run(vcpu, true); 4410 } 4411 4412 /* Emulate the VMRESUME instruction */ 4413 static int handle_vmresume(struct kvm_vcpu *vcpu) 4414 { 4415 4416 return nested_vmx_run(vcpu, false); 4417 } 4418 4419 static int handle_vmread(struct kvm_vcpu *vcpu) 4420 { 4421 unsigned long field; 4422 u64 field_value; 4423 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 4424 u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 4425 gva_t gva = 0; 4426 struct vmcs12 *vmcs12; 4427 4428 if (!nested_vmx_check_permission(vcpu)) 4429 return 1; 4430 4431 if (to_vmx(vcpu)->nested.current_vmptr == -1ull) 4432 return nested_vmx_failInvalid(vcpu); 4433 4434 if (!is_guest_mode(vcpu)) 4435 vmcs12 = get_vmcs12(vcpu); 4436 else { 4437 /* 4438 * When vmcs->vmcs_link_pointer is -1ull, any VMREAD 4439 * to shadowed-field sets the ALU flags for VMfailInvalid. 4440 */ 4441 if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull) 4442 return nested_vmx_failInvalid(vcpu); 4443 vmcs12 = get_shadow_vmcs12(vcpu); 4444 } 4445 4446 /* Decode instruction info and find the field to read */ 4447 field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); 4448 /* Read the field, zero-extended to a u64 field_value */ 4449 if (vmcs12_read_any(vmcs12, field, &field_value) < 0) 4450 return nested_vmx_failValid(vcpu, 4451 VMXERR_UNSUPPORTED_VMCS_COMPONENT); 4452 4453 /* 4454 * Now copy part of this value to register or memory, as requested. 4455 * Note that the number of bits actually copied is 32 or 64 depending 4456 * on the guest's mode (32 or 64 bit), not on the given field's length. 4457 */ 4458 if (vmx_instruction_info & (1u << 10)) { 4459 kvm_register_writel(vcpu, (((vmx_instruction_info) >> 3) & 0xf), 4460 field_value); 4461 } else { 4462 if (get_vmx_mem_address(vcpu, exit_qualification, 4463 vmx_instruction_info, true, &gva)) 4464 return 1; 4465 /* _system ok, nested_vmx_check_permission has verified cpl=0 */ 4466 kvm_write_guest_virt_system(vcpu, gva, &field_value, 4467 (is_long_mode(vcpu) ? 8 : 4), NULL); 4468 } 4469 4470 return nested_vmx_succeed(vcpu); 4471 } 4472 4473 4474 static int handle_vmwrite(struct kvm_vcpu *vcpu) 4475 { 4476 unsigned long field; 4477 gva_t gva; 4478 struct vcpu_vmx *vmx = to_vmx(vcpu); 4479 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 4480 u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 4481 4482 /* The value to write might be 32 or 64 bits, depending on L1's long 4483 * mode, and eventually we need to write that into a field of several 4484 * possible lengths. The code below first zero-extends the value to 64 4485 * bit (field_value), and then copies only the appropriate number of 4486 * bits into the vmcs12 field. 4487 */ 4488 u64 field_value = 0; 4489 struct x86_exception e; 4490 struct vmcs12 *vmcs12; 4491 4492 if (!nested_vmx_check_permission(vcpu)) 4493 return 1; 4494 4495 if (vmx->nested.current_vmptr == -1ull) 4496 return nested_vmx_failInvalid(vcpu); 4497 4498 if (vmx_instruction_info & (1u << 10)) 4499 field_value = kvm_register_readl(vcpu, 4500 (((vmx_instruction_info) >> 3) & 0xf)); 4501 else { 4502 if (get_vmx_mem_address(vcpu, exit_qualification, 4503 vmx_instruction_info, false, &gva)) 4504 return 1; 4505 if (kvm_read_guest_virt(vcpu, gva, &field_value, 4506 (is_64_bit_mode(vcpu) ? 8 : 4), &e)) { 4507 kvm_inject_page_fault(vcpu, &e); 4508 return 1; 4509 } 4510 } 4511 4512 4513 field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); 4514 /* 4515 * If the vCPU supports "VMWRITE to any supported field in the 4516 * VMCS," then the "read-only" fields are actually read/write. 4517 */ 4518 if (vmcs_field_readonly(field) && 4519 !nested_cpu_has_vmwrite_any_field(vcpu)) 4520 return nested_vmx_failValid(vcpu, 4521 VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT); 4522 4523 if (!is_guest_mode(vcpu)) 4524 vmcs12 = get_vmcs12(vcpu); 4525 else { 4526 /* 4527 * When vmcs->vmcs_link_pointer is -1ull, any VMWRITE 4528 * to shadowed-field sets the ALU flags for VMfailInvalid. 4529 */ 4530 if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull) 4531 return nested_vmx_failInvalid(vcpu); 4532 vmcs12 = get_shadow_vmcs12(vcpu); 4533 } 4534 4535 if (vmcs12_write_any(vmcs12, field, field_value) < 0) 4536 return nested_vmx_failValid(vcpu, 4537 VMXERR_UNSUPPORTED_VMCS_COMPONENT); 4538 4539 /* 4540 * Do not track vmcs12 dirty-state if in guest-mode 4541 * as we actually dirty shadow vmcs12 instead of vmcs12. 4542 */ 4543 if (!is_guest_mode(vcpu)) { 4544 switch (field) { 4545 #define SHADOW_FIELD_RW(x) case x: 4546 #include "vmcs_shadow_fields.h" 4547 /* 4548 * The fields that can be updated by L1 without a vmexit are 4549 * always updated in the vmcs02, the others go down the slow 4550 * path of prepare_vmcs02. 4551 */ 4552 break; 4553 default: 4554 vmx->nested.dirty_vmcs12 = true; 4555 break; 4556 } 4557 } 4558 4559 return nested_vmx_succeed(vcpu); 4560 } 4561 4562 static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr) 4563 { 4564 vmx->nested.current_vmptr = vmptr; 4565 if (enable_shadow_vmcs) { 4566 vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL, 4567 SECONDARY_EXEC_SHADOW_VMCS); 4568 vmcs_write64(VMCS_LINK_POINTER, 4569 __pa(vmx->vmcs01.shadow_vmcs)); 4570 vmx->nested.need_vmcs12_sync = true; 4571 } 4572 vmx->nested.dirty_vmcs12 = true; 4573 } 4574 4575 /* Emulate the VMPTRLD instruction */ 4576 static int handle_vmptrld(struct kvm_vcpu *vcpu) 4577 { 4578 struct vcpu_vmx *vmx = to_vmx(vcpu); 4579 gpa_t vmptr; 4580 4581 if (!nested_vmx_check_permission(vcpu)) 4582 return 1; 4583 4584 if (nested_vmx_get_vmptr(vcpu, &vmptr)) 4585 return 1; 4586 4587 if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) 4588 return nested_vmx_failValid(vcpu, 4589 VMXERR_VMPTRLD_INVALID_ADDRESS); 4590 4591 if (vmptr == vmx->nested.vmxon_ptr) 4592 return nested_vmx_failValid(vcpu, 4593 VMXERR_VMPTRLD_VMXON_POINTER); 4594 4595 /* Forbid normal VMPTRLD if Enlightened version was used */ 4596 if (vmx->nested.hv_evmcs) 4597 return 1; 4598 4599 if (vmx->nested.current_vmptr != vmptr) { 4600 struct vmcs12 *new_vmcs12; 4601 struct page *page; 4602 4603 page = kvm_vcpu_gpa_to_page(vcpu, vmptr); 4604 if (is_error_page(page)) { 4605 /* 4606 * Reads from an unbacked page return all 1s, 4607 * which means that the 32 bits located at the 4608 * given physical address won't match the required 4609 * VMCS12_REVISION identifier. 4610 */ 4611 return nested_vmx_failValid(vcpu, 4612 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); 4613 } 4614 new_vmcs12 = kmap(page); 4615 if (new_vmcs12->hdr.revision_id != VMCS12_REVISION || 4616 (new_vmcs12->hdr.shadow_vmcs && 4617 !nested_cpu_has_vmx_shadow_vmcs(vcpu))) { 4618 kunmap(page); 4619 kvm_release_page_clean(page); 4620 return nested_vmx_failValid(vcpu, 4621 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); 4622 } 4623 4624 nested_release_vmcs12(vcpu); 4625 4626 /* 4627 * Load VMCS12 from guest memory since it is not already 4628 * cached. 4629 */ 4630 memcpy(vmx->nested.cached_vmcs12, new_vmcs12, VMCS12_SIZE); 4631 kunmap(page); 4632 kvm_release_page_clean(page); 4633 4634 set_current_vmptr(vmx, vmptr); 4635 } 4636 4637 return nested_vmx_succeed(vcpu); 4638 } 4639 4640 /* Emulate the VMPTRST instruction */ 4641 static int handle_vmptrst(struct kvm_vcpu *vcpu) 4642 { 4643 unsigned long exit_qual = vmcs_readl(EXIT_QUALIFICATION); 4644 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO); 4645 gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr; 4646 struct x86_exception e; 4647 gva_t gva; 4648 4649 if (!nested_vmx_check_permission(vcpu)) 4650 return 1; 4651 4652 if (unlikely(to_vmx(vcpu)->nested.hv_evmcs)) 4653 return 1; 4654 4655 if (get_vmx_mem_address(vcpu, exit_qual, instr_info, true, &gva)) 4656 return 1; 4657 /* *_system ok, nested_vmx_check_permission has verified cpl=0 */ 4658 if (kvm_write_guest_virt_system(vcpu, gva, (void *)¤t_vmptr, 4659 sizeof(gpa_t), &e)) { 4660 kvm_inject_page_fault(vcpu, &e); 4661 return 1; 4662 } 4663 return nested_vmx_succeed(vcpu); 4664 } 4665 4666 /* Emulate the INVEPT instruction */ 4667 static int handle_invept(struct kvm_vcpu *vcpu) 4668 { 4669 struct vcpu_vmx *vmx = to_vmx(vcpu); 4670 u32 vmx_instruction_info, types; 4671 unsigned long type; 4672 gva_t gva; 4673 struct x86_exception e; 4674 struct { 4675 u64 eptp, gpa; 4676 } operand; 4677 4678 if (!(vmx->nested.msrs.secondary_ctls_high & 4679 SECONDARY_EXEC_ENABLE_EPT) || 4680 !(vmx->nested.msrs.ept_caps & VMX_EPT_INVEPT_BIT)) { 4681 kvm_queue_exception(vcpu, UD_VECTOR); 4682 return 1; 4683 } 4684 4685 if (!nested_vmx_check_permission(vcpu)) 4686 return 1; 4687 4688 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 4689 type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); 4690 4691 types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; 4692 4693 if (type >= 32 || !(types & (1 << type))) 4694 return nested_vmx_failValid(vcpu, 4695 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 4696 4697 /* According to the Intel VMX instruction reference, the memory 4698 * operand is read even if it isn't needed (e.g., for type==global) 4699 */ 4700 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), 4701 vmx_instruction_info, false, &gva)) 4702 return 1; 4703 if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) { 4704 kvm_inject_page_fault(vcpu, &e); 4705 return 1; 4706 } 4707 4708 switch (type) { 4709 case VMX_EPT_EXTENT_GLOBAL: 4710 /* 4711 * TODO: track mappings and invalidate 4712 * single context requests appropriately 4713 */ 4714 case VMX_EPT_EXTENT_CONTEXT: 4715 kvm_mmu_sync_roots(vcpu); 4716 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 4717 break; 4718 default: 4719 BUG_ON(1); 4720 break; 4721 } 4722 4723 return nested_vmx_succeed(vcpu); 4724 } 4725 4726 static int handle_invvpid(struct kvm_vcpu *vcpu) 4727 { 4728 struct vcpu_vmx *vmx = to_vmx(vcpu); 4729 u32 vmx_instruction_info; 4730 unsigned long type, types; 4731 gva_t gva; 4732 struct x86_exception e; 4733 struct { 4734 u64 vpid; 4735 u64 gla; 4736 } operand; 4737 u16 vpid02; 4738 4739 if (!(vmx->nested.msrs.secondary_ctls_high & 4740 SECONDARY_EXEC_ENABLE_VPID) || 4741 !(vmx->nested.msrs.vpid_caps & VMX_VPID_INVVPID_BIT)) { 4742 kvm_queue_exception(vcpu, UD_VECTOR); 4743 return 1; 4744 } 4745 4746 if (!nested_vmx_check_permission(vcpu)) 4747 return 1; 4748 4749 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 4750 type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); 4751 4752 types = (vmx->nested.msrs.vpid_caps & 4753 VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8; 4754 4755 if (type >= 32 || !(types & (1 << type))) 4756 return nested_vmx_failValid(vcpu, 4757 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 4758 4759 /* according to the intel vmx instruction reference, the memory 4760 * operand is read even if it isn't needed (e.g., for type==global) 4761 */ 4762 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), 4763 vmx_instruction_info, false, &gva)) 4764 return 1; 4765 if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) { 4766 kvm_inject_page_fault(vcpu, &e); 4767 return 1; 4768 } 4769 if (operand.vpid >> 16) 4770 return nested_vmx_failValid(vcpu, 4771 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 4772 4773 vpid02 = nested_get_vpid02(vcpu); 4774 switch (type) { 4775 case VMX_VPID_EXTENT_INDIVIDUAL_ADDR: 4776 if (!operand.vpid || 4777 is_noncanonical_address(operand.gla, vcpu)) 4778 return nested_vmx_failValid(vcpu, 4779 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 4780 if (cpu_has_vmx_invvpid_individual_addr()) { 4781 __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR, 4782 vpid02, operand.gla); 4783 } else 4784 __vmx_flush_tlb(vcpu, vpid02, false); 4785 break; 4786 case VMX_VPID_EXTENT_SINGLE_CONTEXT: 4787 case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL: 4788 if (!operand.vpid) 4789 return nested_vmx_failValid(vcpu, 4790 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 4791 __vmx_flush_tlb(vcpu, vpid02, false); 4792 break; 4793 case VMX_VPID_EXTENT_ALL_CONTEXT: 4794 __vmx_flush_tlb(vcpu, vpid02, false); 4795 break; 4796 default: 4797 WARN_ON_ONCE(1); 4798 return kvm_skip_emulated_instruction(vcpu); 4799 } 4800 4801 return nested_vmx_succeed(vcpu); 4802 } 4803 4804 static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu, 4805 struct vmcs12 *vmcs12) 4806 { 4807 u32 index = vcpu->arch.regs[VCPU_REGS_RCX]; 4808 u64 address; 4809 bool accessed_dirty; 4810 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 4811 4812 if (!nested_cpu_has_eptp_switching(vmcs12) || 4813 !nested_cpu_has_ept(vmcs12)) 4814 return 1; 4815 4816 if (index >= VMFUNC_EPTP_ENTRIES) 4817 return 1; 4818 4819 4820 if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT, 4821 &address, index * 8, 8)) 4822 return 1; 4823 4824 accessed_dirty = !!(address & VMX_EPTP_AD_ENABLE_BIT); 4825 4826 /* 4827 * If the (L2) guest does a vmfunc to the currently 4828 * active ept pointer, we don't have to do anything else 4829 */ 4830 if (vmcs12->ept_pointer != address) { 4831 if (!valid_ept_address(vcpu, address)) 4832 return 1; 4833 4834 kvm_mmu_unload(vcpu); 4835 mmu->ept_ad = accessed_dirty; 4836 mmu->mmu_role.base.ad_disabled = !accessed_dirty; 4837 vmcs12->ept_pointer = address; 4838 /* 4839 * TODO: Check what's the correct approach in case 4840 * mmu reload fails. Currently, we just let the next 4841 * reload potentially fail 4842 */ 4843 kvm_mmu_reload(vcpu); 4844 } 4845 4846 return 0; 4847 } 4848 4849 static int handle_vmfunc(struct kvm_vcpu *vcpu) 4850 { 4851 struct vcpu_vmx *vmx = to_vmx(vcpu); 4852 struct vmcs12 *vmcs12; 4853 u32 function = vcpu->arch.regs[VCPU_REGS_RAX]; 4854 4855 /* 4856 * VMFUNC is only supported for nested guests, but we always enable the 4857 * secondary control for simplicity; for non-nested mode, fake that we 4858 * didn't by injecting #UD. 4859 */ 4860 if (!is_guest_mode(vcpu)) { 4861 kvm_queue_exception(vcpu, UD_VECTOR); 4862 return 1; 4863 } 4864 4865 vmcs12 = get_vmcs12(vcpu); 4866 if ((vmcs12->vm_function_control & (1 << function)) == 0) 4867 goto fail; 4868 4869 switch (function) { 4870 case 0: 4871 if (nested_vmx_eptp_switching(vcpu, vmcs12)) 4872 goto fail; 4873 break; 4874 default: 4875 goto fail; 4876 } 4877 return kvm_skip_emulated_instruction(vcpu); 4878 4879 fail: 4880 nested_vmx_vmexit(vcpu, vmx->exit_reason, 4881 vmcs_read32(VM_EXIT_INTR_INFO), 4882 vmcs_readl(EXIT_QUALIFICATION)); 4883 return 1; 4884 } 4885 4886 4887 static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu, 4888 struct vmcs12 *vmcs12) 4889 { 4890 unsigned long exit_qualification; 4891 gpa_t bitmap, last_bitmap; 4892 unsigned int port; 4893 int size; 4894 u8 b; 4895 4896 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) 4897 return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING); 4898 4899 exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 4900 4901 port = exit_qualification >> 16; 4902 size = (exit_qualification & 7) + 1; 4903 4904 last_bitmap = (gpa_t)-1; 4905 b = -1; 4906 4907 while (size > 0) { 4908 if (port < 0x8000) 4909 bitmap = vmcs12->io_bitmap_a; 4910 else if (port < 0x10000) 4911 bitmap = vmcs12->io_bitmap_b; 4912 else 4913 return true; 4914 bitmap += (port & 0x7fff) / 8; 4915 4916 if (last_bitmap != bitmap) 4917 if (kvm_vcpu_read_guest(vcpu, bitmap, &b, 1)) 4918 return true; 4919 if (b & (1 << (port & 7))) 4920 return true; 4921 4922 port++; 4923 size--; 4924 last_bitmap = bitmap; 4925 } 4926 4927 return false; 4928 } 4929 4930 /* 4931 * Return 1 if we should exit from L2 to L1 to handle an MSR access access, 4932 * rather than handle it ourselves in L0. I.e., check whether L1 expressed 4933 * disinterest in the current event (read or write a specific MSR) by using an 4934 * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps. 4935 */ 4936 static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu, 4937 struct vmcs12 *vmcs12, u32 exit_reason) 4938 { 4939 u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX]; 4940 gpa_t bitmap; 4941 4942 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) 4943 return true; 4944 4945 /* 4946 * The MSR_BITMAP page is divided into four 1024-byte bitmaps, 4947 * for the four combinations of read/write and low/high MSR numbers. 4948 * First we need to figure out which of the four to use: 4949 */ 4950 bitmap = vmcs12->msr_bitmap; 4951 if (exit_reason == EXIT_REASON_MSR_WRITE) 4952 bitmap += 2048; 4953 if (msr_index >= 0xc0000000) { 4954 msr_index -= 0xc0000000; 4955 bitmap += 1024; 4956 } 4957 4958 /* Then read the msr_index'th bit from this bitmap: */ 4959 if (msr_index < 1024*8) { 4960 unsigned char b; 4961 if (kvm_vcpu_read_guest(vcpu, bitmap + msr_index/8, &b, 1)) 4962 return true; 4963 return 1 & (b >> (msr_index & 7)); 4964 } else 4965 return true; /* let L1 handle the wrong parameter */ 4966 } 4967 4968 /* 4969 * Return 1 if we should exit from L2 to L1 to handle a CR access exit, 4970 * rather than handle it ourselves in L0. I.e., check if L1 wanted to 4971 * intercept (via guest_host_mask etc.) the current event. 4972 */ 4973 static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, 4974 struct vmcs12 *vmcs12) 4975 { 4976 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 4977 int cr = exit_qualification & 15; 4978 int reg; 4979 unsigned long val; 4980 4981 switch ((exit_qualification >> 4) & 3) { 4982 case 0: /* mov to cr */ 4983 reg = (exit_qualification >> 8) & 15; 4984 val = kvm_register_readl(vcpu, reg); 4985 switch (cr) { 4986 case 0: 4987 if (vmcs12->cr0_guest_host_mask & 4988 (val ^ vmcs12->cr0_read_shadow)) 4989 return true; 4990 break; 4991 case 3: 4992 if ((vmcs12->cr3_target_count >= 1 && 4993 vmcs12->cr3_target_value0 == val) || 4994 (vmcs12->cr3_target_count >= 2 && 4995 vmcs12->cr3_target_value1 == val) || 4996 (vmcs12->cr3_target_count >= 3 && 4997 vmcs12->cr3_target_value2 == val) || 4998 (vmcs12->cr3_target_count >= 4 && 4999 vmcs12->cr3_target_value3 == val)) 5000 return false; 5001 if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING)) 5002 return true; 5003 break; 5004 case 4: 5005 if (vmcs12->cr4_guest_host_mask & 5006 (vmcs12->cr4_read_shadow ^ val)) 5007 return true; 5008 break; 5009 case 8: 5010 if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING)) 5011 return true; 5012 break; 5013 } 5014 break; 5015 case 2: /* clts */ 5016 if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) && 5017 (vmcs12->cr0_read_shadow & X86_CR0_TS)) 5018 return true; 5019 break; 5020 case 1: /* mov from cr */ 5021 switch (cr) { 5022 case 3: 5023 if (vmcs12->cpu_based_vm_exec_control & 5024 CPU_BASED_CR3_STORE_EXITING) 5025 return true; 5026 break; 5027 case 8: 5028 if (vmcs12->cpu_based_vm_exec_control & 5029 CPU_BASED_CR8_STORE_EXITING) 5030 return true; 5031 break; 5032 } 5033 break; 5034 case 3: /* lmsw */ 5035 /* 5036 * lmsw can change bits 1..3 of cr0, and only set bit 0 of 5037 * cr0. Other attempted changes are ignored, with no exit. 5038 */ 5039 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f; 5040 if (vmcs12->cr0_guest_host_mask & 0xe & 5041 (val ^ vmcs12->cr0_read_shadow)) 5042 return true; 5043 if ((vmcs12->cr0_guest_host_mask & 0x1) && 5044 !(vmcs12->cr0_read_shadow & 0x1) && 5045 (val & 0x1)) 5046 return true; 5047 break; 5048 } 5049 return false; 5050 } 5051 5052 static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu, 5053 struct vmcs12 *vmcs12, gpa_t bitmap) 5054 { 5055 u32 vmx_instruction_info; 5056 unsigned long field; 5057 u8 b; 5058 5059 if (!nested_cpu_has_shadow_vmcs(vmcs12)) 5060 return true; 5061 5062 /* Decode instruction info and find the field to access */ 5063 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 5064 field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); 5065 5066 /* Out-of-range fields always cause a VM exit from L2 to L1 */ 5067 if (field >> 15) 5068 return true; 5069 5070 if (kvm_vcpu_read_guest(vcpu, bitmap + field/8, &b, 1)) 5071 return true; 5072 5073 return 1 & (b >> (field & 7)); 5074 } 5075 5076 /* 5077 * Return 1 if we should exit from L2 to L1 to handle an exit, or 0 if we 5078 * should handle it ourselves in L0 (and then continue L2). Only call this 5079 * when in is_guest_mode (L2). 5080 */ 5081 bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason) 5082 { 5083 u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO); 5084 struct vcpu_vmx *vmx = to_vmx(vcpu); 5085 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 5086 5087 if (vmx->nested.nested_run_pending) 5088 return false; 5089 5090 if (unlikely(vmx->fail)) { 5091 pr_info_ratelimited("%s failed vm entry %x\n", __func__, 5092 vmcs_read32(VM_INSTRUCTION_ERROR)); 5093 return true; 5094 } 5095 5096 /* 5097 * The host physical addresses of some pages of guest memory 5098 * are loaded into the vmcs02 (e.g. vmcs12's Virtual APIC 5099 * Page). The CPU may write to these pages via their host 5100 * physical address while L2 is running, bypassing any 5101 * address-translation-based dirty tracking (e.g. EPT write 5102 * protection). 5103 * 5104 * Mark them dirty on every exit from L2 to prevent them from 5105 * getting out of sync with dirty tracking. 5106 */ 5107 nested_mark_vmcs12_pages_dirty(vcpu); 5108 5109 trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason, 5110 vmcs_readl(EXIT_QUALIFICATION), 5111 vmx->idt_vectoring_info, 5112 intr_info, 5113 vmcs_read32(VM_EXIT_INTR_ERROR_CODE), 5114 KVM_ISA_VMX); 5115 5116 switch (exit_reason) { 5117 case EXIT_REASON_EXCEPTION_NMI: 5118 if (is_nmi(intr_info)) 5119 return false; 5120 else if (is_page_fault(intr_info)) 5121 return !vmx->vcpu.arch.apf.host_apf_reason && enable_ept; 5122 else if (is_debug(intr_info) && 5123 vcpu->guest_debug & 5124 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) 5125 return false; 5126 else if (is_breakpoint(intr_info) && 5127 vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) 5128 return false; 5129 return vmcs12->exception_bitmap & 5130 (1u << (intr_info & INTR_INFO_VECTOR_MASK)); 5131 case EXIT_REASON_EXTERNAL_INTERRUPT: 5132 return false; 5133 case EXIT_REASON_TRIPLE_FAULT: 5134 return true; 5135 case EXIT_REASON_PENDING_INTERRUPT: 5136 return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_INTR_PENDING); 5137 case EXIT_REASON_NMI_WINDOW: 5138 return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING); 5139 case EXIT_REASON_TASK_SWITCH: 5140 return true; 5141 case EXIT_REASON_CPUID: 5142 return true; 5143 case EXIT_REASON_HLT: 5144 return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING); 5145 case EXIT_REASON_INVD: 5146 return true; 5147 case EXIT_REASON_INVLPG: 5148 return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING); 5149 case EXIT_REASON_RDPMC: 5150 return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING); 5151 case EXIT_REASON_RDRAND: 5152 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDRAND_EXITING); 5153 case EXIT_REASON_RDSEED: 5154 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDSEED_EXITING); 5155 case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP: 5156 return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING); 5157 case EXIT_REASON_VMREAD: 5158 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12, 5159 vmcs12->vmread_bitmap); 5160 case EXIT_REASON_VMWRITE: 5161 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12, 5162 vmcs12->vmwrite_bitmap); 5163 case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR: 5164 case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD: 5165 case EXIT_REASON_VMPTRST: case EXIT_REASON_VMRESUME: 5166 case EXIT_REASON_VMOFF: case EXIT_REASON_VMON: 5167 case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID: 5168 /* 5169 * VMX instructions trap unconditionally. This allows L1 to 5170 * emulate them for its L2 guest, i.e., allows 3-level nesting! 5171 */ 5172 return true; 5173 case EXIT_REASON_CR_ACCESS: 5174 return nested_vmx_exit_handled_cr(vcpu, vmcs12); 5175 case EXIT_REASON_DR_ACCESS: 5176 return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING); 5177 case EXIT_REASON_IO_INSTRUCTION: 5178 return nested_vmx_exit_handled_io(vcpu, vmcs12); 5179 case EXIT_REASON_GDTR_IDTR: case EXIT_REASON_LDTR_TR: 5180 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC); 5181 case EXIT_REASON_MSR_READ: 5182 case EXIT_REASON_MSR_WRITE: 5183 return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason); 5184 case EXIT_REASON_INVALID_STATE: 5185 return true; 5186 case EXIT_REASON_MWAIT_INSTRUCTION: 5187 return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING); 5188 case EXIT_REASON_MONITOR_TRAP_FLAG: 5189 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_TRAP_FLAG); 5190 case EXIT_REASON_MONITOR_INSTRUCTION: 5191 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING); 5192 case EXIT_REASON_PAUSE_INSTRUCTION: 5193 return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) || 5194 nested_cpu_has2(vmcs12, 5195 SECONDARY_EXEC_PAUSE_LOOP_EXITING); 5196 case EXIT_REASON_MCE_DURING_VMENTRY: 5197 return false; 5198 case EXIT_REASON_TPR_BELOW_THRESHOLD: 5199 return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW); 5200 case EXIT_REASON_APIC_ACCESS: 5201 case EXIT_REASON_APIC_WRITE: 5202 case EXIT_REASON_EOI_INDUCED: 5203 /* 5204 * The controls for "virtualize APIC accesses," "APIC- 5205 * register virtualization," and "virtual-interrupt 5206 * delivery" only come from vmcs12. 5207 */ 5208 return true; 5209 case EXIT_REASON_EPT_VIOLATION: 5210 /* 5211 * L0 always deals with the EPT violation. If nested EPT is 5212 * used, and the nested mmu code discovers that the address is 5213 * missing in the guest EPT table (EPT12), the EPT violation 5214 * will be injected with nested_ept_inject_page_fault() 5215 */ 5216 return false; 5217 case EXIT_REASON_EPT_MISCONFIG: 5218 /* 5219 * L2 never uses directly L1's EPT, but rather L0's own EPT 5220 * table (shadow on EPT) or a merged EPT table that L0 built 5221 * (EPT on EPT). So any problems with the structure of the 5222 * table is L0's fault. 5223 */ 5224 return false; 5225 case EXIT_REASON_INVPCID: 5226 return 5227 nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_INVPCID) && 5228 nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING); 5229 case EXIT_REASON_WBINVD: 5230 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING); 5231 case EXIT_REASON_XSETBV: 5232 return true; 5233 case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS: 5234 /* 5235 * This should never happen, since it is not possible to 5236 * set XSS to a non-zero value---neither in L1 nor in L2. 5237 * If if it were, XSS would have to be checked against 5238 * the XSS exit bitmap in vmcs12. 5239 */ 5240 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES); 5241 case EXIT_REASON_PREEMPTION_TIMER: 5242 return false; 5243 case EXIT_REASON_PML_FULL: 5244 /* We emulate PML support to L1. */ 5245 return false; 5246 case EXIT_REASON_VMFUNC: 5247 /* VM functions are emulated through L2->L0 vmexits. */ 5248 return false; 5249 case EXIT_REASON_ENCLS: 5250 /* SGX is never exposed to L1 */ 5251 return false; 5252 default: 5253 return true; 5254 } 5255 } 5256 5257 5258 static int vmx_get_nested_state(struct kvm_vcpu *vcpu, 5259 struct kvm_nested_state __user *user_kvm_nested_state, 5260 u32 user_data_size) 5261 { 5262 struct vcpu_vmx *vmx; 5263 struct vmcs12 *vmcs12; 5264 struct kvm_nested_state kvm_state = { 5265 .flags = 0, 5266 .format = 0, 5267 .size = sizeof(kvm_state), 5268 .vmx.vmxon_pa = -1ull, 5269 .vmx.vmcs_pa = -1ull, 5270 }; 5271 5272 if (!vcpu) 5273 return kvm_state.size + 2 * VMCS12_SIZE; 5274 5275 vmx = to_vmx(vcpu); 5276 vmcs12 = get_vmcs12(vcpu); 5277 5278 if (nested_vmx_allowed(vcpu) && vmx->nested.enlightened_vmcs_enabled) 5279 kvm_state.flags |= KVM_STATE_NESTED_EVMCS; 5280 5281 if (nested_vmx_allowed(vcpu) && 5282 (vmx->nested.vmxon || vmx->nested.smm.vmxon)) { 5283 kvm_state.vmx.vmxon_pa = vmx->nested.vmxon_ptr; 5284 kvm_state.vmx.vmcs_pa = vmx->nested.current_vmptr; 5285 5286 if (vmx_has_valid_vmcs12(vcpu)) { 5287 kvm_state.size += VMCS12_SIZE; 5288 5289 if (is_guest_mode(vcpu) && 5290 nested_cpu_has_shadow_vmcs(vmcs12) && 5291 vmcs12->vmcs_link_pointer != -1ull) 5292 kvm_state.size += VMCS12_SIZE; 5293 } 5294 5295 if (vmx->nested.smm.vmxon) 5296 kvm_state.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON; 5297 5298 if (vmx->nested.smm.guest_mode) 5299 kvm_state.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE; 5300 5301 if (is_guest_mode(vcpu)) { 5302 kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE; 5303 5304 if (vmx->nested.nested_run_pending) 5305 kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING; 5306 } 5307 } 5308 5309 if (user_data_size < kvm_state.size) 5310 goto out; 5311 5312 if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state))) 5313 return -EFAULT; 5314 5315 if (!vmx_has_valid_vmcs12(vcpu)) 5316 goto out; 5317 5318 /* 5319 * When running L2, the authoritative vmcs12 state is in the 5320 * vmcs02. When running L1, the authoritative vmcs12 state is 5321 * in the shadow or enlightened vmcs linked to vmcs01, unless 5322 * need_vmcs12_sync is set, in which case, the authoritative 5323 * vmcs12 state is in the vmcs12 already. 5324 */ 5325 if (is_guest_mode(vcpu)) { 5326 sync_vmcs12(vcpu, vmcs12); 5327 } else if (!vmx->nested.need_vmcs12_sync) { 5328 if (vmx->nested.hv_evmcs) 5329 copy_enlightened_to_vmcs12(vmx); 5330 else if (enable_shadow_vmcs) 5331 copy_shadow_to_vmcs12(vmx); 5332 } 5333 5334 /* 5335 * Copy over the full allocated size of vmcs12 rather than just the size 5336 * of the struct. 5337 */ 5338 if (copy_to_user(user_kvm_nested_state->data, vmcs12, VMCS12_SIZE)) 5339 return -EFAULT; 5340 5341 if (nested_cpu_has_shadow_vmcs(vmcs12) && 5342 vmcs12->vmcs_link_pointer != -1ull) { 5343 if (copy_to_user(user_kvm_nested_state->data + VMCS12_SIZE, 5344 get_shadow_vmcs12(vcpu), VMCS12_SIZE)) 5345 return -EFAULT; 5346 } 5347 5348 out: 5349 return kvm_state.size; 5350 } 5351 5352 /* 5353 * Forcibly leave nested mode in order to be able to reset the VCPU later on. 5354 */ 5355 void vmx_leave_nested(struct kvm_vcpu *vcpu) 5356 { 5357 if (is_guest_mode(vcpu)) { 5358 to_vmx(vcpu)->nested.nested_run_pending = 0; 5359 nested_vmx_vmexit(vcpu, -1, 0, 0); 5360 } 5361 free_nested(vcpu); 5362 } 5363 5364 static int vmx_set_nested_state(struct kvm_vcpu *vcpu, 5365 struct kvm_nested_state __user *user_kvm_nested_state, 5366 struct kvm_nested_state *kvm_state) 5367 { 5368 struct vcpu_vmx *vmx = to_vmx(vcpu); 5369 struct vmcs12 *vmcs12; 5370 u32 exit_qual; 5371 int ret; 5372 5373 if (kvm_state->format != 0) 5374 return -EINVAL; 5375 5376 if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) 5377 nested_enable_evmcs(vcpu, NULL); 5378 5379 if (!nested_vmx_allowed(vcpu)) 5380 return kvm_state->vmx.vmxon_pa == -1ull ? 0 : -EINVAL; 5381 5382 if (kvm_state->vmx.vmxon_pa == -1ull) { 5383 if (kvm_state->vmx.smm.flags) 5384 return -EINVAL; 5385 5386 if (kvm_state->vmx.vmcs_pa != -1ull) 5387 return -EINVAL; 5388 5389 vmx_leave_nested(vcpu); 5390 return 0; 5391 } 5392 5393 if (!page_address_valid(vcpu, kvm_state->vmx.vmxon_pa)) 5394 return -EINVAL; 5395 5396 if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && 5397 (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) 5398 return -EINVAL; 5399 5400 if (kvm_state->vmx.smm.flags & 5401 ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON)) 5402 return -EINVAL; 5403 5404 /* 5405 * SMM temporarily disables VMX, so we cannot be in guest mode, 5406 * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags 5407 * must be zero. 5408 */ 5409 if (is_smm(vcpu) ? kvm_state->flags : kvm_state->vmx.smm.flags) 5410 return -EINVAL; 5411 5412 if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && 5413 !(kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON)) 5414 return -EINVAL; 5415 5416 vmx_leave_nested(vcpu); 5417 if (kvm_state->vmx.vmxon_pa == -1ull) 5418 return 0; 5419 5420 vmx->nested.vmxon_ptr = kvm_state->vmx.vmxon_pa; 5421 ret = enter_vmx_operation(vcpu); 5422 if (ret) 5423 return ret; 5424 5425 /* Empty 'VMXON' state is permitted */ 5426 if (kvm_state->size < sizeof(kvm_state) + sizeof(*vmcs12)) 5427 return 0; 5428 5429 if (kvm_state->vmx.vmcs_pa != -1ull) { 5430 if (kvm_state->vmx.vmcs_pa == kvm_state->vmx.vmxon_pa || 5431 !page_address_valid(vcpu, kvm_state->vmx.vmcs_pa)) 5432 return -EINVAL; 5433 5434 set_current_vmptr(vmx, kvm_state->vmx.vmcs_pa); 5435 } else if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) { 5436 /* 5437 * Sync eVMCS upon entry as we may not have 5438 * HV_X64_MSR_VP_ASSIST_PAGE set up yet. 5439 */ 5440 vmx->nested.need_vmcs12_sync = true; 5441 } else { 5442 return -EINVAL; 5443 } 5444 5445 if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) { 5446 vmx->nested.smm.vmxon = true; 5447 vmx->nested.vmxon = false; 5448 5449 if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) 5450 vmx->nested.smm.guest_mode = true; 5451 } 5452 5453 vmcs12 = get_vmcs12(vcpu); 5454 if (copy_from_user(vmcs12, user_kvm_nested_state->data, sizeof(*vmcs12))) 5455 return -EFAULT; 5456 5457 if (vmcs12->hdr.revision_id != VMCS12_REVISION) 5458 return -EINVAL; 5459 5460 if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) 5461 return 0; 5462 5463 vmx->nested.nested_run_pending = 5464 !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING); 5465 5466 if (nested_cpu_has_shadow_vmcs(vmcs12) && 5467 vmcs12->vmcs_link_pointer != -1ull) { 5468 struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu); 5469 5470 if (kvm_state->size < sizeof(kvm_state) + 2 * sizeof(*vmcs12)) 5471 return -EINVAL; 5472 5473 if (copy_from_user(shadow_vmcs12, 5474 user_kvm_nested_state->data + VMCS12_SIZE, 5475 sizeof(*vmcs12))) 5476 return -EFAULT; 5477 5478 if (shadow_vmcs12->hdr.revision_id != VMCS12_REVISION || 5479 !shadow_vmcs12->hdr.shadow_vmcs) 5480 return -EINVAL; 5481 } 5482 5483 if (nested_vmx_check_vmentry_prereqs(vcpu, vmcs12) || 5484 nested_vmx_check_vmentry_postreqs(vcpu, vmcs12, &exit_qual)) 5485 return -EINVAL; 5486 5487 vmx->nested.dirty_vmcs12 = true; 5488 ret = nested_vmx_enter_non_root_mode(vcpu, false); 5489 if (ret) 5490 return -EINVAL; 5491 5492 return 0; 5493 } 5494 5495 void nested_vmx_vcpu_setup(void) 5496 { 5497 if (enable_shadow_vmcs) { 5498 /* 5499 * At vCPU creation, "VMWRITE to any supported field 5500 * in the VMCS" is supported, so use the more 5501 * permissive vmx_vmread_bitmap to specify both read 5502 * and write permissions for the shadow VMCS. 5503 */ 5504 vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap)); 5505 vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmread_bitmap)); 5506 } 5507 } 5508 5509 /* 5510 * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be 5511 * returned for the various VMX controls MSRs when nested VMX is enabled. 5512 * The same values should also be used to verify that vmcs12 control fields are 5513 * valid during nested entry from L1 to L2. 5514 * Each of these control msrs has a low and high 32-bit half: A low bit is on 5515 * if the corresponding bit in the (32-bit) control field *must* be on, and a 5516 * bit in the high half is on if the corresponding bit in the control field 5517 * may be on. See also vmx_control_verify(). 5518 */ 5519 void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps, 5520 bool apicv) 5521 { 5522 /* 5523 * Note that as a general rule, the high half of the MSRs (bits in 5524 * the control fields which may be 1) should be initialized by the 5525 * intersection of the underlying hardware's MSR (i.e., features which 5526 * can be supported) and the list of features we want to expose - 5527 * because they are known to be properly supported in our code. 5528 * Also, usually, the low half of the MSRs (bits which must be 1) can 5529 * be set to 0, meaning that L1 may turn off any of these bits. The 5530 * reason is that if one of these bits is necessary, it will appear 5531 * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control 5532 * fields of vmcs01 and vmcs02, will turn these bits off - and 5533 * nested_vmx_exit_reflected() will not pass related exits to L1. 5534 * These rules have exceptions below. 5535 */ 5536 5537 /* pin-based controls */ 5538 rdmsr(MSR_IA32_VMX_PINBASED_CTLS, 5539 msrs->pinbased_ctls_low, 5540 msrs->pinbased_ctls_high); 5541 msrs->pinbased_ctls_low |= 5542 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR; 5543 msrs->pinbased_ctls_high &= 5544 PIN_BASED_EXT_INTR_MASK | 5545 PIN_BASED_NMI_EXITING | 5546 PIN_BASED_VIRTUAL_NMIS | 5547 (apicv ? PIN_BASED_POSTED_INTR : 0); 5548 msrs->pinbased_ctls_high |= 5549 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR | 5550 PIN_BASED_VMX_PREEMPTION_TIMER; 5551 5552 /* exit controls */ 5553 rdmsr(MSR_IA32_VMX_EXIT_CTLS, 5554 msrs->exit_ctls_low, 5555 msrs->exit_ctls_high); 5556 msrs->exit_ctls_low = 5557 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR; 5558 5559 msrs->exit_ctls_high &= 5560 #ifdef CONFIG_X86_64 5561 VM_EXIT_HOST_ADDR_SPACE_SIZE | 5562 #endif 5563 VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT; 5564 msrs->exit_ctls_high |= 5565 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR | 5566 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER | 5567 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT; 5568 5569 /* We support free control of debug control saving. */ 5570 msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS; 5571 5572 /* entry controls */ 5573 rdmsr(MSR_IA32_VMX_ENTRY_CTLS, 5574 msrs->entry_ctls_low, 5575 msrs->entry_ctls_high); 5576 msrs->entry_ctls_low = 5577 VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR; 5578 msrs->entry_ctls_high &= 5579 #ifdef CONFIG_X86_64 5580 VM_ENTRY_IA32E_MODE | 5581 #endif 5582 VM_ENTRY_LOAD_IA32_PAT; 5583 msrs->entry_ctls_high |= 5584 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER); 5585 5586 /* We support free control of debug control loading. */ 5587 msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS; 5588 5589 /* cpu-based controls */ 5590 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, 5591 msrs->procbased_ctls_low, 5592 msrs->procbased_ctls_high); 5593 msrs->procbased_ctls_low = 5594 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR; 5595 msrs->procbased_ctls_high &= 5596 CPU_BASED_VIRTUAL_INTR_PENDING | 5597 CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING | 5598 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING | 5599 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING | 5600 CPU_BASED_CR3_STORE_EXITING | 5601 #ifdef CONFIG_X86_64 5602 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING | 5603 #endif 5604 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING | 5605 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG | 5606 CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING | 5607 CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING | 5608 CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; 5609 /* 5610 * We can allow some features even when not supported by the 5611 * hardware. For example, L1 can specify an MSR bitmap - and we 5612 * can use it to avoid exits to L1 - even when L0 runs L2 5613 * without MSR bitmaps. 5614 */ 5615 msrs->procbased_ctls_high |= 5616 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR | 5617 CPU_BASED_USE_MSR_BITMAPS; 5618 5619 /* We support free control of CR3 access interception. */ 5620 msrs->procbased_ctls_low &= 5621 ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING); 5622 5623 /* 5624 * secondary cpu-based controls. Do not include those that 5625 * depend on CPUID bits, they are added later by vmx_cpuid_update. 5626 */ 5627 if (msrs->procbased_ctls_high & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) 5628 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, 5629 msrs->secondary_ctls_low, 5630 msrs->secondary_ctls_high); 5631 5632 msrs->secondary_ctls_low = 0; 5633 msrs->secondary_ctls_high &= 5634 SECONDARY_EXEC_DESC | 5635 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 5636 SECONDARY_EXEC_APIC_REGISTER_VIRT | 5637 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 5638 SECONDARY_EXEC_WBINVD_EXITING; 5639 5640 /* 5641 * We can emulate "VMCS shadowing," even if the hardware 5642 * doesn't support it. 5643 */ 5644 msrs->secondary_ctls_high |= 5645 SECONDARY_EXEC_SHADOW_VMCS; 5646 5647 if (enable_ept) { 5648 /* nested EPT: emulate EPT also to L1 */ 5649 msrs->secondary_ctls_high |= 5650 SECONDARY_EXEC_ENABLE_EPT; 5651 msrs->ept_caps = VMX_EPT_PAGE_WALK_4_BIT | 5652 VMX_EPTP_WB_BIT | VMX_EPT_INVEPT_BIT; 5653 if (cpu_has_vmx_ept_execute_only()) 5654 msrs->ept_caps |= 5655 VMX_EPT_EXECUTE_ONLY_BIT; 5656 msrs->ept_caps &= ept_caps; 5657 msrs->ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT | 5658 VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT | 5659 VMX_EPT_1GB_PAGE_BIT; 5660 if (enable_ept_ad_bits) { 5661 msrs->secondary_ctls_high |= 5662 SECONDARY_EXEC_ENABLE_PML; 5663 msrs->ept_caps |= VMX_EPT_AD_BIT; 5664 } 5665 } 5666 5667 if (cpu_has_vmx_vmfunc()) { 5668 msrs->secondary_ctls_high |= 5669 SECONDARY_EXEC_ENABLE_VMFUNC; 5670 /* 5671 * Advertise EPTP switching unconditionally 5672 * since we emulate it 5673 */ 5674 if (enable_ept) 5675 msrs->vmfunc_controls = 5676 VMX_VMFUNC_EPTP_SWITCHING; 5677 } 5678 5679 /* 5680 * Old versions of KVM use the single-context version without 5681 * checking for support, so declare that it is supported even 5682 * though it is treated as global context. The alternative is 5683 * not failing the single-context invvpid, and it is worse. 5684 */ 5685 if (enable_vpid) { 5686 msrs->secondary_ctls_high |= 5687 SECONDARY_EXEC_ENABLE_VPID; 5688 msrs->vpid_caps = VMX_VPID_INVVPID_BIT | 5689 VMX_VPID_EXTENT_SUPPORTED_MASK; 5690 } 5691 5692 if (enable_unrestricted_guest) 5693 msrs->secondary_ctls_high |= 5694 SECONDARY_EXEC_UNRESTRICTED_GUEST; 5695 5696 if (flexpriority_enabled) 5697 msrs->secondary_ctls_high |= 5698 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; 5699 5700 /* miscellaneous data */ 5701 rdmsr(MSR_IA32_VMX_MISC, 5702 msrs->misc_low, 5703 msrs->misc_high); 5704 msrs->misc_low &= VMX_MISC_SAVE_EFER_LMA; 5705 msrs->misc_low |= 5706 MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS | 5707 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE | 5708 VMX_MISC_ACTIVITY_HLT; 5709 msrs->misc_high = 0; 5710 5711 /* 5712 * This MSR reports some information about VMX support. We 5713 * should return information about the VMX we emulate for the 5714 * guest, and the VMCS structure we give it - not about the 5715 * VMX support of the underlying hardware. 5716 */ 5717 msrs->basic = 5718 VMCS12_REVISION | 5719 VMX_BASIC_TRUE_CTLS | 5720 ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) | 5721 (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT); 5722 5723 if (cpu_has_vmx_basic_inout()) 5724 msrs->basic |= VMX_BASIC_INOUT; 5725 5726 /* 5727 * These MSRs specify bits which the guest must keep fixed on 5728 * while L1 is in VMXON mode (in L1's root mode, or running an L2). 5729 * We picked the standard core2 setting. 5730 */ 5731 #define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE) 5732 #define VMXON_CR4_ALWAYSON X86_CR4_VMXE 5733 msrs->cr0_fixed0 = VMXON_CR0_ALWAYSON; 5734 msrs->cr4_fixed0 = VMXON_CR4_ALWAYSON; 5735 5736 /* These MSRs specify bits which the guest must keep fixed off. */ 5737 rdmsrl(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1); 5738 rdmsrl(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1); 5739 5740 /* highest index: VMX_PREEMPTION_TIMER_VALUE */ 5741 msrs->vmcs_enum = VMCS12_MAX_FIELD_INDEX << 1; 5742 } 5743 5744 void nested_vmx_hardware_unsetup(void) 5745 { 5746 int i; 5747 5748 if (enable_shadow_vmcs) { 5749 for (i = 0; i < VMX_BITMAP_NR; i++) 5750 free_page((unsigned long)vmx_bitmap[i]); 5751 } 5752 } 5753 5754 __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *)) 5755 { 5756 int i; 5757 5758 /* 5759 * Without EPT it is not possible to restore L1's CR3 and PDPTR on 5760 * VMfail, because they are not available in vmcs01. Just always 5761 * use hardware checks. 5762 */ 5763 if (!enable_ept) 5764 nested_early_check = 1; 5765 5766 if (!cpu_has_vmx_shadow_vmcs()) 5767 enable_shadow_vmcs = 0; 5768 if (enable_shadow_vmcs) { 5769 for (i = 0; i < VMX_BITMAP_NR; i++) { 5770 /* 5771 * The vmx_bitmap is not tied to a VM and so should 5772 * not be charged to a memcg. 5773 */ 5774 vmx_bitmap[i] = (unsigned long *) 5775 __get_free_page(GFP_KERNEL); 5776 if (!vmx_bitmap[i]) { 5777 nested_vmx_hardware_unsetup(); 5778 return -ENOMEM; 5779 } 5780 } 5781 5782 init_vmcs_shadow_fields(); 5783 } 5784 5785 exit_handlers[EXIT_REASON_VMCLEAR] = handle_vmclear, 5786 exit_handlers[EXIT_REASON_VMLAUNCH] = handle_vmlaunch, 5787 exit_handlers[EXIT_REASON_VMPTRLD] = handle_vmptrld, 5788 exit_handlers[EXIT_REASON_VMPTRST] = handle_vmptrst, 5789 exit_handlers[EXIT_REASON_VMREAD] = handle_vmread, 5790 exit_handlers[EXIT_REASON_VMRESUME] = handle_vmresume, 5791 exit_handlers[EXIT_REASON_VMWRITE] = handle_vmwrite, 5792 exit_handlers[EXIT_REASON_VMOFF] = handle_vmoff, 5793 exit_handlers[EXIT_REASON_VMON] = handle_vmon, 5794 exit_handlers[EXIT_REASON_INVEPT] = handle_invept, 5795 exit_handlers[EXIT_REASON_INVVPID] = handle_invvpid, 5796 exit_handlers[EXIT_REASON_VMFUNC] = handle_vmfunc, 5797 5798 kvm_x86_ops->check_nested_events = vmx_check_nested_events; 5799 kvm_x86_ops->get_nested_state = vmx_get_nested_state; 5800 kvm_x86_ops->set_nested_state = vmx_set_nested_state; 5801 kvm_x86_ops->get_vmcs12_pages = nested_get_vmcs12_pages, 5802 kvm_x86_ops->nested_enable_evmcs = nested_enable_evmcs; 5803 kvm_x86_ops->nested_get_evmcs_version = nested_get_evmcs_version; 5804 5805 return 0; 5806 } 5807