1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/frame.h> 4 #include <linux/percpu.h> 5 6 #include <asm/debugreg.h> 7 #include <asm/mmu_context.h> 8 9 #include "cpuid.h" 10 #include "hyperv.h" 11 #include "mmu.h" 12 #include "nested.h" 13 #include "trace.h" 14 #include "x86.h" 15 16 static bool __read_mostly enable_shadow_vmcs = 1; 17 module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO); 18 19 static bool __read_mostly nested_early_check = 0; 20 module_param(nested_early_check, bool, S_IRUGO); 21 22 /* 23 * Hyper-V requires all of these, so mark them as supported even though 24 * they are just treated the same as all-context. 25 */ 26 #define VMX_VPID_EXTENT_SUPPORTED_MASK \ 27 (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \ 28 VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \ 29 VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \ 30 VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT) 31 32 #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5 33 34 enum { 35 VMX_VMREAD_BITMAP, 36 VMX_VMWRITE_BITMAP, 37 VMX_BITMAP_NR 38 }; 39 static unsigned long *vmx_bitmap[VMX_BITMAP_NR]; 40 41 #define vmx_vmread_bitmap (vmx_bitmap[VMX_VMREAD_BITMAP]) 42 #define vmx_vmwrite_bitmap (vmx_bitmap[VMX_VMWRITE_BITMAP]) 43 44 static u16 shadow_read_only_fields[] = { 45 #define SHADOW_FIELD_RO(x) x, 46 #include "vmcs_shadow_fields.h" 47 }; 48 static int max_shadow_read_only_fields = 49 ARRAY_SIZE(shadow_read_only_fields); 50 51 static u16 shadow_read_write_fields[] = { 52 #define SHADOW_FIELD_RW(x) x, 53 #include "vmcs_shadow_fields.h" 54 }; 55 static int max_shadow_read_write_fields = 56 ARRAY_SIZE(shadow_read_write_fields); 57 58 void init_vmcs_shadow_fields(void) 59 { 60 int i, j; 61 62 memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE); 63 memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE); 64 65 for (i = j = 0; i < max_shadow_read_only_fields; i++) { 66 u16 field = shadow_read_only_fields[i]; 67 68 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 && 69 (i + 1 == max_shadow_read_only_fields || 70 shadow_read_only_fields[i + 1] != field + 1)) 71 pr_err("Missing field from shadow_read_only_field %x\n", 72 field + 1); 73 74 clear_bit(field, vmx_vmread_bitmap); 75 #ifdef CONFIG_X86_64 76 if (field & 1) 77 continue; 78 #endif 79 if (j < i) 80 shadow_read_only_fields[j] = field; 81 j++; 82 } 83 max_shadow_read_only_fields = j; 84 85 for (i = j = 0; i < max_shadow_read_write_fields; i++) { 86 u16 field = shadow_read_write_fields[i]; 87 88 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 && 89 (i + 1 == max_shadow_read_write_fields || 90 shadow_read_write_fields[i + 1] != field + 1)) 91 pr_err("Missing field from shadow_read_write_field %x\n", 92 field + 1); 93 94 /* 95 * PML and the preemption timer can be emulated, but the 96 * processor cannot vmwrite to fields that don't exist 97 * on bare metal. 98 */ 99 switch (field) { 100 case GUEST_PML_INDEX: 101 if (!cpu_has_vmx_pml()) 102 continue; 103 break; 104 case VMX_PREEMPTION_TIMER_VALUE: 105 if (!cpu_has_vmx_preemption_timer()) 106 continue; 107 break; 108 case GUEST_INTR_STATUS: 109 if (!cpu_has_vmx_apicv()) 110 continue; 111 break; 112 default: 113 break; 114 } 115 116 clear_bit(field, vmx_vmwrite_bitmap); 117 clear_bit(field, vmx_vmread_bitmap); 118 #ifdef CONFIG_X86_64 119 if (field & 1) 120 continue; 121 #endif 122 if (j < i) 123 shadow_read_write_fields[j] = field; 124 j++; 125 } 126 max_shadow_read_write_fields = j; 127 } 128 129 /* 130 * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(), 131 * set the success or error code of an emulated VMX instruction (as specified 132 * by Vol 2B, VMX Instruction Reference, "Conventions"), and skip the emulated 133 * instruction. 134 */ 135 static int nested_vmx_succeed(struct kvm_vcpu *vcpu) 136 { 137 vmx_set_rflags(vcpu, vmx_get_rflags(vcpu) 138 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | 139 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF)); 140 return kvm_skip_emulated_instruction(vcpu); 141 } 142 143 static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu) 144 { 145 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) 146 & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF | 147 X86_EFLAGS_SF | X86_EFLAGS_OF)) 148 | X86_EFLAGS_CF); 149 return kvm_skip_emulated_instruction(vcpu); 150 } 151 152 static int nested_vmx_failValid(struct kvm_vcpu *vcpu, 153 u32 vm_instruction_error) 154 { 155 struct vcpu_vmx *vmx = to_vmx(vcpu); 156 157 /* 158 * failValid writes the error number to the current VMCS, which 159 * can't be done if there isn't a current VMCS. 160 */ 161 if (vmx->nested.current_vmptr == -1ull && !vmx->nested.hv_evmcs) 162 return nested_vmx_failInvalid(vcpu); 163 164 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) 165 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | 166 X86_EFLAGS_SF | X86_EFLAGS_OF)) 167 | X86_EFLAGS_ZF); 168 get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error; 169 /* 170 * We don't need to force a shadow sync because 171 * VM_INSTRUCTION_ERROR is not shadowed 172 */ 173 return kvm_skip_emulated_instruction(vcpu); 174 } 175 176 static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator) 177 { 178 /* TODO: not to reset guest simply here. */ 179 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 180 pr_debug_ratelimited("kvm: nested vmx abort, indicator %d\n", indicator); 181 } 182 183 static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx) 184 { 185 vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_SHADOW_VMCS); 186 vmcs_write64(VMCS_LINK_POINTER, -1ull); 187 } 188 189 static inline void nested_release_evmcs(struct kvm_vcpu *vcpu) 190 { 191 struct vcpu_vmx *vmx = to_vmx(vcpu); 192 193 if (!vmx->nested.hv_evmcs) 194 return; 195 196 kunmap(vmx->nested.hv_evmcs_page); 197 kvm_release_page_dirty(vmx->nested.hv_evmcs_page); 198 vmx->nested.hv_evmcs_vmptr = -1ull; 199 vmx->nested.hv_evmcs_page = NULL; 200 vmx->nested.hv_evmcs = NULL; 201 } 202 203 /* 204 * Free whatever needs to be freed from vmx->nested when L1 goes down, or 205 * just stops using VMX. 206 */ 207 static void free_nested(struct kvm_vcpu *vcpu) 208 { 209 struct vcpu_vmx *vmx = to_vmx(vcpu); 210 211 if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon) 212 return; 213 214 vmx->nested.vmxon = false; 215 vmx->nested.smm.vmxon = false; 216 free_vpid(vmx->nested.vpid02); 217 vmx->nested.posted_intr_nv = -1; 218 vmx->nested.current_vmptr = -1ull; 219 if (enable_shadow_vmcs) { 220 vmx_disable_shadow_vmcs(vmx); 221 vmcs_clear(vmx->vmcs01.shadow_vmcs); 222 free_vmcs(vmx->vmcs01.shadow_vmcs); 223 vmx->vmcs01.shadow_vmcs = NULL; 224 } 225 kfree(vmx->nested.cached_vmcs12); 226 kfree(vmx->nested.cached_shadow_vmcs12); 227 /* Unpin physical memory we referred to in the vmcs02 */ 228 if (vmx->nested.apic_access_page) { 229 kvm_release_page_dirty(vmx->nested.apic_access_page); 230 vmx->nested.apic_access_page = NULL; 231 } 232 if (vmx->nested.virtual_apic_page) { 233 kvm_release_page_dirty(vmx->nested.virtual_apic_page); 234 vmx->nested.virtual_apic_page = NULL; 235 } 236 if (vmx->nested.pi_desc_page) { 237 kunmap(vmx->nested.pi_desc_page); 238 kvm_release_page_dirty(vmx->nested.pi_desc_page); 239 vmx->nested.pi_desc_page = NULL; 240 vmx->nested.pi_desc = NULL; 241 } 242 243 kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); 244 245 nested_release_evmcs(vcpu); 246 247 free_loaded_vmcs(&vmx->nested.vmcs02); 248 } 249 250 static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs) 251 { 252 struct vcpu_vmx *vmx = to_vmx(vcpu); 253 int cpu; 254 255 if (vmx->loaded_vmcs == vmcs) 256 return; 257 258 cpu = get_cpu(); 259 vmx_vcpu_put(vcpu); 260 vmx->loaded_vmcs = vmcs; 261 vmx_vcpu_load(vcpu, cpu); 262 put_cpu(); 263 264 vm_entry_controls_reset_shadow(vmx); 265 vm_exit_controls_reset_shadow(vmx); 266 vmx_segment_cache_clear(vmx); 267 } 268 269 /* 270 * Ensure that the current vmcs of the logical processor is the 271 * vmcs01 of the vcpu before calling free_nested(). 272 */ 273 void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu) 274 { 275 vcpu_load(vcpu); 276 vmx_switch_vmcs(vcpu, &to_vmx(vcpu)->vmcs01); 277 free_nested(vcpu); 278 vcpu_put(vcpu); 279 } 280 281 static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu, 282 struct x86_exception *fault) 283 { 284 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 285 struct vcpu_vmx *vmx = to_vmx(vcpu); 286 u32 exit_reason; 287 unsigned long exit_qualification = vcpu->arch.exit_qualification; 288 289 if (vmx->nested.pml_full) { 290 exit_reason = EXIT_REASON_PML_FULL; 291 vmx->nested.pml_full = false; 292 exit_qualification &= INTR_INFO_UNBLOCK_NMI; 293 } else if (fault->error_code & PFERR_RSVD_MASK) 294 exit_reason = EXIT_REASON_EPT_MISCONFIG; 295 else 296 exit_reason = EXIT_REASON_EPT_VIOLATION; 297 298 nested_vmx_vmexit(vcpu, exit_reason, 0, exit_qualification); 299 vmcs12->guest_physical_address = fault->address; 300 } 301 302 static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) 303 { 304 WARN_ON(mmu_is_nested(vcpu)); 305 306 vcpu->arch.mmu = &vcpu->arch.guest_mmu; 307 kvm_init_shadow_ept_mmu(vcpu, 308 to_vmx(vcpu)->nested.msrs.ept_caps & 309 VMX_EPT_EXECUTE_ONLY_BIT, 310 nested_ept_ad_enabled(vcpu), 311 nested_ept_get_cr3(vcpu)); 312 vcpu->arch.mmu->set_cr3 = vmx_set_cr3; 313 vcpu->arch.mmu->get_cr3 = nested_ept_get_cr3; 314 vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault; 315 vcpu->arch.mmu->get_pdptr = kvm_pdptr_read; 316 317 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; 318 } 319 320 static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu) 321 { 322 vcpu->arch.mmu = &vcpu->arch.root_mmu; 323 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; 324 } 325 326 static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12, 327 u16 error_code) 328 { 329 bool inequality, bit; 330 331 bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0; 332 inequality = 333 (error_code & vmcs12->page_fault_error_code_mask) != 334 vmcs12->page_fault_error_code_match; 335 return inequality ^ bit; 336 } 337 338 339 /* 340 * KVM wants to inject page-faults which it got to the guest. This function 341 * checks whether in a nested guest, we need to inject them to L1 or L2. 342 */ 343 static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit_qual) 344 { 345 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 346 unsigned int nr = vcpu->arch.exception.nr; 347 bool has_payload = vcpu->arch.exception.has_payload; 348 unsigned long payload = vcpu->arch.exception.payload; 349 350 if (nr == PF_VECTOR) { 351 if (vcpu->arch.exception.nested_apf) { 352 *exit_qual = vcpu->arch.apf.nested_apf_token; 353 return 1; 354 } 355 if (nested_vmx_is_page_fault_vmexit(vmcs12, 356 vcpu->arch.exception.error_code)) { 357 *exit_qual = has_payload ? payload : vcpu->arch.cr2; 358 return 1; 359 } 360 } else if (vmcs12->exception_bitmap & (1u << nr)) { 361 if (nr == DB_VECTOR) { 362 if (!has_payload) { 363 payload = vcpu->arch.dr6; 364 payload &= ~(DR6_FIXED_1 | DR6_BT); 365 payload ^= DR6_RTM; 366 } 367 *exit_qual = payload; 368 } else 369 *exit_qual = 0; 370 return 1; 371 } 372 373 return 0; 374 } 375 376 377 static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu, 378 struct x86_exception *fault) 379 { 380 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 381 382 WARN_ON(!is_guest_mode(vcpu)); 383 384 if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code) && 385 !to_vmx(vcpu)->nested.nested_run_pending) { 386 vmcs12->vm_exit_intr_error_code = fault->error_code; 387 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, 388 PF_VECTOR | INTR_TYPE_HARD_EXCEPTION | 389 INTR_INFO_DELIVER_CODE_MASK | INTR_INFO_VALID_MASK, 390 fault->address); 391 } else { 392 kvm_inject_page_fault(vcpu, fault); 393 } 394 } 395 396 static bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa) 397 { 398 return PAGE_ALIGNED(gpa) && !(gpa >> cpuid_maxphyaddr(vcpu)); 399 } 400 401 static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu, 402 struct vmcs12 *vmcs12) 403 { 404 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) 405 return 0; 406 407 if (!page_address_valid(vcpu, vmcs12->io_bitmap_a) || 408 !page_address_valid(vcpu, vmcs12->io_bitmap_b)) 409 return -EINVAL; 410 411 return 0; 412 } 413 414 static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu, 415 struct vmcs12 *vmcs12) 416 { 417 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) 418 return 0; 419 420 if (!page_address_valid(vcpu, vmcs12->msr_bitmap)) 421 return -EINVAL; 422 423 return 0; 424 } 425 426 static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu, 427 struct vmcs12 *vmcs12) 428 { 429 if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) 430 return 0; 431 432 if (!page_address_valid(vcpu, vmcs12->virtual_apic_page_addr)) 433 return -EINVAL; 434 435 return 0; 436 } 437 438 /* 439 * Check if MSR is intercepted for L01 MSR bitmap. 440 */ 441 static bool msr_write_intercepted_l01(struct kvm_vcpu *vcpu, u32 msr) 442 { 443 unsigned long *msr_bitmap; 444 int f = sizeof(unsigned long); 445 446 if (!cpu_has_vmx_msr_bitmap()) 447 return true; 448 449 msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap; 450 451 if (msr <= 0x1fff) { 452 return !!test_bit(msr, msr_bitmap + 0x800 / f); 453 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { 454 msr &= 0x1fff; 455 return !!test_bit(msr, msr_bitmap + 0xc00 / f); 456 } 457 458 return true; 459 } 460 461 /* 462 * If a msr is allowed by L0, we should check whether it is allowed by L1. 463 * The corresponding bit will be cleared unless both of L0 and L1 allow it. 464 */ 465 static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1, 466 unsigned long *msr_bitmap_nested, 467 u32 msr, int type) 468 { 469 int f = sizeof(unsigned long); 470 471 /* 472 * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals 473 * have the write-low and read-high bitmap offsets the wrong way round. 474 * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. 475 */ 476 if (msr <= 0x1fff) { 477 if (type & MSR_TYPE_R && 478 !test_bit(msr, msr_bitmap_l1 + 0x000 / f)) 479 /* read-low */ 480 __clear_bit(msr, msr_bitmap_nested + 0x000 / f); 481 482 if (type & MSR_TYPE_W && 483 !test_bit(msr, msr_bitmap_l1 + 0x800 / f)) 484 /* write-low */ 485 __clear_bit(msr, msr_bitmap_nested + 0x800 / f); 486 487 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { 488 msr &= 0x1fff; 489 if (type & MSR_TYPE_R && 490 !test_bit(msr, msr_bitmap_l1 + 0x400 / f)) 491 /* read-high */ 492 __clear_bit(msr, msr_bitmap_nested + 0x400 / f); 493 494 if (type & MSR_TYPE_W && 495 !test_bit(msr, msr_bitmap_l1 + 0xc00 / f)) 496 /* write-high */ 497 __clear_bit(msr, msr_bitmap_nested + 0xc00 / f); 498 499 } 500 } 501 502 /* 503 * Merge L0's and L1's MSR bitmap, return false to indicate that 504 * we do not use the hardware. 505 */ 506 static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu, 507 struct vmcs12 *vmcs12) 508 { 509 int msr; 510 struct page *page; 511 unsigned long *msr_bitmap_l1; 512 unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap; 513 /* 514 * pred_cmd & spec_ctrl are trying to verify two things: 515 * 516 * 1. L0 gave a permission to L1 to actually passthrough the MSR. This 517 * ensures that we do not accidentally generate an L02 MSR bitmap 518 * from the L12 MSR bitmap that is too permissive. 519 * 2. That L1 or L2s have actually used the MSR. This avoids 520 * unnecessarily merging of the bitmap if the MSR is unused. This 521 * works properly because we only update the L01 MSR bitmap lazily. 522 * So even if L0 should pass L1 these MSRs, the L01 bitmap is only 523 * updated to reflect this when L1 (or its L2s) actually write to 524 * the MSR. 525 */ 526 bool pred_cmd = !msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD); 527 bool spec_ctrl = !msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL); 528 529 /* Nothing to do if the MSR bitmap is not in use. */ 530 if (!cpu_has_vmx_msr_bitmap() || 531 !nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) 532 return false; 533 534 if (!nested_cpu_has_virt_x2apic_mode(vmcs12) && 535 !pred_cmd && !spec_ctrl) 536 return false; 537 538 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->msr_bitmap); 539 if (is_error_page(page)) 540 return false; 541 542 msr_bitmap_l1 = (unsigned long *)kmap(page); 543 if (nested_cpu_has_apic_reg_virt(vmcs12)) { 544 /* 545 * L0 need not intercept reads for MSRs between 0x800 and 0x8ff, it 546 * just lets the processor take the value from the virtual-APIC page; 547 * take those 256 bits directly from the L1 bitmap. 548 */ 549 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { 550 unsigned word = msr / BITS_PER_LONG; 551 msr_bitmap_l0[word] = msr_bitmap_l1[word]; 552 msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0; 553 } 554 } else { 555 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { 556 unsigned word = msr / BITS_PER_LONG; 557 msr_bitmap_l0[word] = ~0; 558 msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0; 559 } 560 } 561 562 nested_vmx_disable_intercept_for_msr( 563 msr_bitmap_l1, msr_bitmap_l0, 564 X2APIC_MSR(APIC_TASKPRI), 565 MSR_TYPE_W); 566 567 if (nested_cpu_has_vid(vmcs12)) { 568 nested_vmx_disable_intercept_for_msr( 569 msr_bitmap_l1, msr_bitmap_l0, 570 X2APIC_MSR(APIC_EOI), 571 MSR_TYPE_W); 572 nested_vmx_disable_intercept_for_msr( 573 msr_bitmap_l1, msr_bitmap_l0, 574 X2APIC_MSR(APIC_SELF_IPI), 575 MSR_TYPE_W); 576 } 577 578 if (spec_ctrl) 579 nested_vmx_disable_intercept_for_msr( 580 msr_bitmap_l1, msr_bitmap_l0, 581 MSR_IA32_SPEC_CTRL, 582 MSR_TYPE_R | MSR_TYPE_W); 583 584 if (pred_cmd) 585 nested_vmx_disable_intercept_for_msr( 586 msr_bitmap_l1, msr_bitmap_l0, 587 MSR_IA32_PRED_CMD, 588 MSR_TYPE_W); 589 590 kunmap(page); 591 kvm_release_page_clean(page); 592 593 return true; 594 } 595 596 static void nested_cache_shadow_vmcs12(struct kvm_vcpu *vcpu, 597 struct vmcs12 *vmcs12) 598 { 599 struct vmcs12 *shadow; 600 struct page *page; 601 602 if (!nested_cpu_has_shadow_vmcs(vmcs12) || 603 vmcs12->vmcs_link_pointer == -1ull) 604 return; 605 606 shadow = get_shadow_vmcs12(vcpu); 607 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->vmcs_link_pointer); 608 609 memcpy(shadow, kmap(page), VMCS12_SIZE); 610 611 kunmap(page); 612 kvm_release_page_clean(page); 613 } 614 615 static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu *vcpu, 616 struct vmcs12 *vmcs12) 617 { 618 struct vcpu_vmx *vmx = to_vmx(vcpu); 619 620 if (!nested_cpu_has_shadow_vmcs(vmcs12) || 621 vmcs12->vmcs_link_pointer == -1ull) 622 return; 623 624 kvm_write_guest(vmx->vcpu.kvm, vmcs12->vmcs_link_pointer, 625 get_shadow_vmcs12(vcpu), VMCS12_SIZE); 626 } 627 628 /* 629 * In nested virtualization, check if L1 has set 630 * VM_EXIT_ACK_INTR_ON_EXIT 631 */ 632 static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu) 633 { 634 return get_vmcs12(vcpu)->vm_exit_controls & 635 VM_EXIT_ACK_INTR_ON_EXIT; 636 } 637 638 static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu) 639 { 640 return nested_cpu_has_nmi_exiting(get_vmcs12(vcpu)); 641 } 642 643 static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu, 644 struct vmcs12 *vmcs12) 645 { 646 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) && 647 !page_address_valid(vcpu, vmcs12->apic_access_addr)) 648 return -EINVAL; 649 else 650 return 0; 651 } 652 653 static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, 654 struct vmcs12 *vmcs12) 655 { 656 if (!nested_cpu_has_virt_x2apic_mode(vmcs12) && 657 !nested_cpu_has_apic_reg_virt(vmcs12) && 658 !nested_cpu_has_vid(vmcs12) && 659 !nested_cpu_has_posted_intr(vmcs12)) 660 return 0; 661 662 /* 663 * If virtualize x2apic mode is enabled, 664 * virtualize apic access must be disabled. 665 */ 666 if (nested_cpu_has_virt_x2apic_mode(vmcs12) && 667 nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) 668 return -EINVAL; 669 670 /* 671 * If virtual interrupt delivery is enabled, 672 * we must exit on external interrupts. 673 */ 674 if (nested_cpu_has_vid(vmcs12) && 675 !nested_exit_on_intr(vcpu)) 676 return -EINVAL; 677 678 /* 679 * bits 15:8 should be zero in posted_intr_nv, 680 * the descriptor address has been already checked 681 * in nested_get_vmcs12_pages. 682 * 683 * bits 5:0 of posted_intr_desc_addr should be zero. 684 */ 685 if (nested_cpu_has_posted_intr(vmcs12) && 686 (!nested_cpu_has_vid(vmcs12) || 687 !nested_exit_intr_ack_set(vcpu) || 688 (vmcs12->posted_intr_nv & 0xff00) || 689 (vmcs12->posted_intr_desc_addr & 0x3f) || 690 (vmcs12->posted_intr_desc_addr >> cpuid_maxphyaddr(vcpu)))) 691 return -EINVAL; 692 693 /* tpr shadow is needed by all apicv features. */ 694 if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) 695 return -EINVAL; 696 697 return 0; 698 } 699 700 static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu, 701 u32 count, u64 addr) 702 { 703 int maxphyaddr; 704 705 if (count == 0) 706 return 0; 707 maxphyaddr = cpuid_maxphyaddr(vcpu); 708 if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr || 709 (addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr) 710 return -EINVAL; 711 712 return 0; 713 } 714 715 static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu *vcpu, 716 struct vmcs12 *vmcs12) 717 { 718 if (nested_vmx_check_msr_switch(vcpu, vmcs12->vm_exit_msr_load_count, 719 vmcs12->vm_exit_msr_load_addr) || 720 nested_vmx_check_msr_switch(vcpu, vmcs12->vm_exit_msr_store_count, 721 vmcs12->vm_exit_msr_store_addr)) 722 return -EINVAL; 723 724 return 0; 725 } 726 727 static int nested_vmx_check_entry_msr_switch_controls(struct kvm_vcpu *vcpu, 728 struct vmcs12 *vmcs12) 729 { 730 if (nested_vmx_check_msr_switch(vcpu, vmcs12->vm_entry_msr_load_count, 731 vmcs12->vm_entry_msr_load_addr)) 732 return -EINVAL; 733 734 return 0; 735 } 736 737 static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu, 738 struct vmcs12 *vmcs12) 739 { 740 if (!nested_cpu_has_pml(vmcs12)) 741 return 0; 742 743 if (!nested_cpu_has_ept(vmcs12) || 744 !page_address_valid(vcpu, vmcs12->pml_address)) 745 return -EINVAL; 746 747 return 0; 748 } 749 750 static int nested_vmx_check_unrestricted_guest_controls(struct kvm_vcpu *vcpu, 751 struct vmcs12 *vmcs12) 752 { 753 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST) && 754 !nested_cpu_has_ept(vmcs12)) 755 return -EINVAL; 756 return 0; 757 } 758 759 static int nested_vmx_check_mode_based_ept_exec_controls(struct kvm_vcpu *vcpu, 760 struct vmcs12 *vmcs12) 761 { 762 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_MODE_BASED_EPT_EXEC) && 763 !nested_cpu_has_ept(vmcs12)) 764 return -EINVAL; 765 return 0; 766 } 767 768 static int nested_vmx_check_shadow_vmcs_controls(struct kvm_vcpu *vcpu, 769 struct vmcs12 *vmcs12) 770 { 771 if (!nested_cpu_has_shadow_vmcs(vmcs12)) 772 return 0; 773 774 if (!page_address_valid(vcpu, vmcs12->vmread_bitmap) || 775 !page_address_valid(vcpu, vmcs12->vmwrite_bitmap)) 776 return -EINVAL; 777 778 return 0; 779 } 780 781 static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu, 782 struct vmx_msr_entry *e) 783 { 784 /* x2APIC MSR accesses are not allowed */ 785 if (vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8) 786 return -EINVAL; 787 if (e->index == MSR_IA32_UCODE_WRITE || /* SDM Table 35-2 */ 788 e->index == MSR_IA32_UCODE_REV) 789 return -EINVAL; 790 if (e->reserved != 0) 791 return -EINVAL; 792 return 0; 793 } 794 795 static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu, 796 struct vmx_msr_entry *e) 797 { 798 if (e->index == MSR_FS_BASE || 799 e->index == MSR_GS_BASE || 800 e->index == MSR_IA32_SMM_MONITOR_CTL || /* SMM is not supported */ 801 nested_vmx_msr_check_common(vcpu, e)) 802 return -EINVAL; 803 return 0; 804 } 805 806 static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu, 807 struct vmx_msr_entry *e) 808 { 809 if (e->index == MSR_IA32_SMBASE || /* SMM is not supported */ 810 nested_vmx_msr_check_common(vcpu, e)) 811 return -EINVAL; 812 return 0; 813 } 814 815 /* 816 * Load guest's/host's msr at nested entry/exit. 817 * return 0 for success, entry index for failure. 818 */ 819 static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) 820 { 821 u32 i; 822 struct vmx_msr_entry e; 823 struct msr_data msr; 824 825 msr.host_initiated = false; 826 for (i = 0; i < count; i++) { 827 if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e), 828 &e, sizeof(e))) { 829 pr_debug_ratelimited( 830 "%s cannot read MSR entry (%u, 0x%08llx)\n", 831 __func__, i, gpa + i * sizeof(e)); 832 goto fail; 833 } 834 if (nested_vmx_load_msr_check(vcpu, &e)) { 835 pr_debug_ratelimited( 836 "%s check failed (%u, 0x%x, 0x%x)\n", 837 __func__, i, e.index, e.reserved); 838 goto fail; 839 } 840 msr.index = e.index; 841 msr.data = e.value; 842 if (kvm_set_msr(vcpu, &msr)) { 843 pr_debug_ratelimited( 844 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n", 845 __func__, i, e.index, e.value); 846 goto fail; 847 } 848 } 849 return 0; 850 fail: 851 return i + 1; 852 } 853 854 static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) 855 { 856 u32 i; 857 struct vmx_msr_entry e; 858 859 for (i = 0; i < count; i++) { 860 struct msr_data msr_info; 861 if (kvm_vcpu_read_guest(vcpu, 862 gpa + i * sizeof(e), 863 &e, 2 * sizeof(u32))) { 864 pr_debug_ratelimited( 865 "%s cannot read MSR entry (%u, 0x%08llx)\n", 866 __func__, i, gpa + i * sizeof(e)); 867 return -EINVAL; 868 } 869 if (nested_vmx_store_msr_check(vcpu, &e)) { 870 pr_debug_ratelimited( 871 "%s check failed (%u, 0x%x, 0x%x)\n", 872 __func__, i, e.index, e.reserved); 873 return -EINVAL; 874 } 875 msr_info.host_initiated = false; 876 msr_info.index = e.index; 877 if (kvm_get_msr(vcpu, &msr_info)) { 878 pr_debug_ratelimited( 879 "%s cannot read MSR (%u, 0x%x)\n", 880 __func__, i, e.index); 881 return -EINVAL; 882 } 883 if (kvm_vcpu_write_guest(vcpu, 884 gpa + i * sizeof(e) + 885 offsetof(struct vmx_msr_entry, value), 886 &msr_info.data, sizeof(msr_info.data))) { 887 pr_debug_ratelimited( 888 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n", 889 __func__, i, e.index, msr_info.data); 890 return -EINVAL; 891 } 892 } 893 return 0; 894 } 895 896 static bool nested_cr3_valid(struct kvm_vcpu *vcpu, unsigned long val) 897 { 898 unsigned long invalid_mask; 899 900 invalid_mask = (~0ULL) << cpuid_maxphyaddr(vcpu); 901 return (val & invalid_mask) == 0; 902 } 903 904 /* 905 * Load guest's/host's cr3 at nested entry/exit. nested_ept is true if we are 906 * emulating VM entry into a guest with EPT enabled. 907 * Returns 0 on success, 1 on failure. Invalid state exit qualification code 908 * is assigned to entry_failure_code on failure. 909 */ 910 static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept, 911 u32 *entry_failure_code) 912 { 913 if (cr3 != kvm_read_cr3(vcpu) || (!nested_ept && pdptrs_changed(vcpu))) { 914 if (!nested_cr3_valid(vcpu, cr3)) { 915 *entry_failure_code = ENTRY_FAIL_DEFAULT; 916 return 1; 917 } 918 919 /* 920 * If PAE paging and EPT are both on, CR3 is not used by the CPU and 921 * must not be dereferenced. 922 */ 923 if (!is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu) && 924 !nested_ept) { 925 if (!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) { 926 *entry_failure_code = ENTRY_FAIL_PDPTE; 927 return 1; 928 } 929 } 930 } 931 932 if (!nested_ept) 933 kvm_mmu_new_cr3(vcpu, cr3, false); 934 935 vcpu->arch.cr3 = cr3; 936 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); 937 938 kvm_init_mmu(vcpu, false); 939 940 return 0; 941 } 942 943 /* 944 * Returns if KVM is able to config CPU to tag TLB entries 945 * populated by L2 differently than TLB entries populated 946 * by L1. 947 * 948 * If L1 uses EPT, then TLB entries are tagged with different EPTP. 949 * 950 * If L1 uses VPID and we allocated a vpid02, TLB entries are tagged 951 * with different VPID (L1 entries are tagged with vmx->vpid 952 * while L2 entries are tagged with vmx->nested.vpid02). 953 */ 954 static bool nested_has_guest_tlb_tag(struct kvm_vcpu *vcpu) 955 { 956 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 957 958 return nested_cpu_has_ept(vmcs12) || 959 (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02); 960 } 961 962 static u16 nested_get_vpid02(struct kvm_vcpu *vcpu) 963 { 964 struct vcpu_vmx *vmx = to_vmx(vcpu); 965 966 return vmx->nested.vpid02 ? vmx->nested.vpid02 : vmx->vpid; 967 } 968 969 970 static inline bool vmx_control_verify(u32 control, u32 low, u32 high) 971 { 972 return fixed_bits_valid(control, low, high); 973 } 974 975 static inline u64 vmx_control_msr(u32 low, u32 high) 976 { 977 return low | ((u64)high << 32); 978 } 979 980 static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask) 981 { 982 superset &= mask; 983 subset &= mask; 984 985 return (superset | subset) == superset; 986 } 987 988 static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data) 989 { 990 const u64 feature_and_reserved = 991 /* feature (except bit 48; see below) */ 992 BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) | 993 /* reserved */ 994 BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56); 995 u64 vmx_basic = vmx->nested.msrs.basic; 996 997 if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved)) 998 return -EINVAL; 999 1000 /* 1001 * KVM does not emulate a version of VMX that constrains physical 1002 * addresses of VMX structures (e.g. VMCS) to 32-bits. 1003 */ 1004 if (data & BIT_ULL(48)) 1005 return -EINVAL; 1006 1007 if (vmx_basic_vmcs_revision_id(vmx_basic) != 1008 vmx_basic_vmcs_revision_id(data)) 1009 return -EINVAL; 1010 1011 if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data)) 1012 return -EINVAL; 1013 1014 vmx->nested.msrs.basic = data; 1015 return 0; 1016 } 1017 1018 static int 1019 vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) 1020 { 1021 u64 supported; 1022 u32 *lowp, *highp; 1023 1024 switch (msr_index) { 1025 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: 1026 lowp = &vmx->nested.msrs.pinbased_ctls_low; 1027 highp = &vmx->nested.msrs.pinbased_ctls_high; 1028 break; 1029 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: 1030 lowp = &vmx->nested.msrs.procbased_ctls_low; 1031 highp = &vmx->nested.msrs.procbased_ctls_high; 1032 break; 1033 case MSR_IA32_VMX_TRUE_EXIT_CTLS: 1034 lowp = &vmx->nested.msrs.exit_ctls_low; 1035 highp = &vmx->nested.msrs.exit_ctls_high; 1036 break; 1037 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: 1038 lowp = &vmx->nested.msrs.entry_ctls_low; 1039 highp = &vmx->nested.msrs.entry_ctls_high; 1040 break; 1041 case MSR_IA32_VMX_PROCBASED_CTLS2: 1042 lowp = &vmx->nested.msrs.secondary_ctls_low; 1043 highp = &vmx->nested.msrs.secondary_ctls_high; 1044 break; 1045 default: 1046 BUG(); 1047 } 1048 1049 supported = vmx_control_msr(*lowp, *highp); 1050 1051 /* Check must-be-1 bits are still 1. */ 1052 if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0))) 1053 return -EINVAL; 1054 1055 /* Check must-be-0 bits are still 0. */ 1056 if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32))) 1057 return -EINVAL; 1058 1059 *lowp = data; 1060 *highp = data >> 32; 1061 return 0; 1062 } 1063 1064 static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data) 1065 { 1066 const u64 feature_and_reserved_bits = 1067 /* feature */ 1068 BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) | 1069 BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) | 1070 /* reserved */ 1071 GENMASK_ULL(13, 9) | BIT_ULL(31); 1072 u64 vmx_misc; 1073 1074 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low, 1075 vmx->nested.msrs.misc_high); 1076 1077 if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits)) 1078 return -EINVAL; 1079 1080 if ((vmx->nested.msrs.pinbased_ctls_high & 1081 PIN_BASED_VMX_PREEMPTION_TIMER) && 1082 vmx_misc_preemption_timer_rate(data) != 1083 vmx_misc_preemption_timer_rate(vmx_misc)) 1084 return -EINVAL; 1085 1086 if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc)) 1087 return -EINVAL; 1088 1089 if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc)) 1090 return -EINVAL; 1091 1092 if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc)) 1093 return -EINVAL; 1094 1095 vmx->nested.msrs.misc_low = data; 1096 vmx->nested.msrs.misc_high = data >> 32; 1097 1098 /* 1099 * If L1 has read-only VM-exit information fields, use the 1100 * less permissive vmx_vmwrite_bitmap to specify write 1101 * permissions for the shadow VMCS. 1102 */ 1103 if (enable_shadow_vmcs && !nested_cpu_has_vmwrite_any_field(&vmx->vcpu)) 1104 vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap)); 1105 1106 return 0; 1107 } 1108 1109 static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data) 1110 { 1111 u64 vmx_ept_vpid_cap; 1112 1113 vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.msrs.ept_caps, 1114 vmx->nested.msrs.vpid_caps); 1115 1116 /* Every bit is either reserved or a feature bit. */ 1117 if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL)) 1118 return -EINVAL; 1119 1120 vmx->nested.msrs.ept_caps = data; 1121 vmx->nested.msrs.vpid_caps = data >> 32; 1122 return 0; 1123 } 1124 1125 static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) 1126 { 1127 u64 *msr; 1128 1129 switch (msr_index) { 1130 case MSR_IA32_VMX_CR0_FIXED0: 1131 msr = &vmx->nested.msrs.cr0_fixed0; 1132 break; 1133 case MSR_IA32_VMX_CR4_FIXED0: 1134 msr = &vmx->nested.msrs.cr4_fixed0; 1135 break; 1136 default: 1137 BUG(); 1138 } 1139 1140 /* 1141 * 1 bits (which indicates bits which "must-be-1" during VMX operation) 1142 * must be 1 in the restored value. 1143 */ 1144 if (!is_bitwise_subset(data, *msr, -1ULL)) 1145 return -EINVAL; 1146 1147 *msr = data; 1148 return 0; 1149 } 1150 1151 /* 1152 * Called when userspace is restoring VMX MSRs. 1153 * 1154 * Returns 0 on success, non-0 otherwise. 1155 */ 1156 int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) 1157 { 1158 struct vcpu_vmx *vmx = to_vmx(vcpu); 1159 1160 /* 1161 * Don't allow changes to the VMX capability MSRs while the vCPU 1162 * is in VMX operation. 1163 */ 1164 if (vmx->nested.vmxon) 1165 return -EBUSY; 1166 1167 switch (msr_index) { 1168 case MSR_IA32_VMX_BASIC: 1169 return vmx_restore_vmx_basic(vmx, data); 1170 case MSR_IA32_VMX_PINBASED_CTLS: 1171 case MSR_IA32_VMX_PROCBASED_CTLS: 1172 case MSR_IA32_VMX_EXIT_CTLS: 1173 case MSR_IA32_VMX_ENTRY_CTLS: 1174 /* 1175 * The "non-true" VMX capability MSRs are generated from the 1176 * "true" MSRs, so we do not support restoring them directly. 1177 * 1178 * If userspace wants to emulate VMX_BASIC[55]=0, userspace 1179 * should restore the "true" MSRs with the must-be-1 bits 1180 * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND 1181 * DEFAULT SETTINGS". 1182 */ 1183 return -EINVAL; 1184 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: 1185 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: 1186 case MSR_IA32_VMX_TRUE_EXIT_CTLS: 1187 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: 1188 case MSR_IA32_VMX_PROCBASED_CTLS2: 1189 return vmx_restore_control_msr(vmx, msr_index, data); 1190 case MSR_IA32_VMX_MISC: 1191 return vmx_restore_vmx_misc(vmx, data); 1192 case MSR_IA32_VMX_CR0_FIXED0: 1193 case MSR_IA32_VMX_CR4_FIXED0: 1194 return vmx_restore_fixed0_msr(vmx, msr_index, data); 1195 case MSR_IA32_VMX_CR0_FIXED1: 1196 case MSR_IA32_VMX_CR4_FIXED1: 1197 /* 1198 * These MSRs are generated based on the vCPU's CPUID, so we 1199 * do not support restoring them directly. 1200 */ 1201 return -EINVAL; 1202 case MSR_IA32_VMX_EPT_VPID_CAP: 1203 return vmx_restore_vmx_ept_vpid_cap(vmx, data); 1204 case MSR_IA32_VMX_VMCS_ENUM: 1205 vmx->nested.msrs.vmcs_enum = data; 1206 return 0; 1207 default: 1208 /* 1209 * The rest of the VMX capability MSRs do not support restore. 1210 */ 1211 return -EINVAL; 1212 } 1213 } 1214 1215 /* Returns 0 on success, non-0 otherwise. */ 1216 int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata) 1217 { 1218 switch (msr_index) { 1219 case MSR_IA32_VMX_BASIC: 1220 *pdata = msrs->basic; 1221 break; 1222 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: 1223 case MSR_IA32_VMX_PINBASED_CTLS: 1224 *pdata = vmx_control_msr( 1225 msrs->pinbased_ctls_low, 1226 msrs->pinbased_ctls_high); 1227 if (msr_index == MSR_IA32_VMX_PINBASED_CTLS) 1228 *pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR; 1229 break; 1230 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: 1231 case MSR_IA32_VMX_PROCBASED_CTLS: 1232 *pdata = vmx_control_msr( 1233 msrs->procbased_ctls_low, 1234 msrs->procbased_ctls_high); 1235 if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS) 1236 *pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR; 1237 break; 1238 case MSR_IA32_VMX_TRUE_EXIT_CTLS: 1239 case MSR_IA32_VMX_EXIT_CTLS: 1240 *pdata = vmx_control_msr( 1241 msrs->exit_ctls_low, 1242 msrs->exit_ctls_high); 1243 if (msr_index == MSR_IA32_VMX_EXIT_CTLS) 1244 *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR; 1245 break; 1246 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: 1247 case MSR_IA32_VMX_ENTRY_CTLS: 1248 *pdata = vmx_control_msr( 1249 msrs->entry_ctls_low, 1250 msrs->entry_ctls_high); 1251 if (msr_index == MSR_IA32_VMX_ENTRY_CTLS) 1252 *pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR; 1253 break; 1254 case MSR_IA32_VMX_MISC: 1255 *pdata = vmx_control_msr( 1256 msrs->misc_low, 1257 msrs->misc_high); 1258 break; 1259 case MSR_IA32_VMX_CR0_FIXED0: 1260 *pdata = msrs->cr0_fixed0; 1261 break; 1262 case MSR_IA32_VMX_CR0_FIXED1: 1263 *pdata = msrs->cr0_fixed1; 1264 break; 1265 case MSR_IA32_VMX_CR4_FIXED0: 1266 *pdata = msrs->cr4_fixed0; 1267 break; 1268 case MSR_IA32_VMX_CR4_FIXED1: 1269 *pdata = msrs->cr4_fixed1; 1270 break; 1271 case MSR_IA32_VMX_VMCS_ENUM: 1272 *pdata = msrs->vmcs_enum; 1273 break; 1274 case MSR_IA32_VMX_PROCBASED_CTLS2: 1275 *pdata = vmx_control_msr( 1276 msrs->secondary_ctls_low, 1277 msrs->secondary_ctls_high); 1278 break; 1279 case MSR_IA32_VMX_EPT_VPID_CAP: 1280 *pdata = msrs->ept_caps | 1281 ((u64)msrs->vpid_caps << 32); 1282 break; 1283 case MSR_IA32_VMX_VMFUNC: 1284 *pdata = msrs->vmfunc_controls; 1285 break; 1286 default: 1287 return 1; 1288 } 1289 1290 return 0; 1291 } 1292 1293 /* 1294 * Copy the writable VMCS shadow fields back to the VMCS12, in case 1295 * they have been modified by the L1 guest. Note that the "read-only" 1296 * VM-exit information fields are actually writable if the vCPU is 1297 * configured to support "VMWRITE to any supported field in the VMCS." 1298 */ 1299 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx) 1300 { 1301 const u16 *fields[] = { 1302 shadow_read_write_fields, 1303 shadow_read_only_fields 1304 }; 1305 const int max_fields[] = { 1306 max_shadow_read_write_fields, 1307 max_shadow_read_only_fields 1308 }; 1309 int i, q; 1310 unsigned long field; 1311 u64 field_value; 1312 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; 1313 1314 preempt_disable(); 1315 1316 vmcs_load(shadow_vmcs); 1317 1318 for (q = 0; q < ARRAY_SIZE(fields); q++) { 1319 for (i = 0; i < max_fields[q]; i++) { 1320 field = fields[q][i]; 1321 field_value = __vmcs_readl(field); 1322 vmcs12_write_any(get_vmcs12(&vmx->vcpu), field, field_value); 1323 } 1324 /* 1325 * Skip the VM-exit information fields if they are read-only. 1326 */ 1327 if (!nested_cpu_has_vmwrite_any_field(&vmx->vcpu)) 1328 break; 1329 } 1330 1331 vmcs_clear(shadow_vmcs); 1332 vmcs_load(vmx->loaded_vmcs->vmcs); 1333 1334 preempt_enable(); 1335 } 1336 1337 static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx) 1338 { 1339 const u16 *fields[] = { 1340 shadow_read_write_fields, 1341 shadow_read_only_fields 1342 }; 1343 const int max_fields[] = { 1344 max_shadow_read_write_fields, 1345 max_shadow_read_only_fields 1346 }; 1347 int i, q; 1348 unsigned long field; 1349 u64 field_value = 0; 1350 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; 1351 1352 vmcs_load(shadow_vmcs); 1353 1354 for (q = 0; q < ARRAY_SIZE(fields); q++) { 1355 for (i = 0; i < max_fields[q]; i++) { 1356 field = fields[q][i]; 1357 vmcs12_read_any(get_vmcs12(&vmx->vcpu), field, &field_value); 1358 __vmcs_writel(field, field_value); 1359 } 1360 } 1361 1362 vmcs_clear(shadow_vmcs); 1363 vmcs_load(vmx->loaded_vmcs->vmcs); 1364 } 1365 1366 static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx) 1367 { 1368 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; 1369 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; 1370 1371 /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */ 1372 vmcs12->tpr_threshold = evmcs->tpr_threshold; 1373 vmcs12->guest_rip = evmcs->guest_rip; 1374 1375 if (unlikely(!(evmcs->hv_clean_fields & 1376 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC))) { 1377 vmcs12->guest_rsp = evmcs->guest_rsp; 1378 vmcs12->guest_rflags = evmcs->guest_rflags; 1379 vmcs12->guest_interruptibility_info = 1380 evmcs->guest_interruptibility_info; 1381 } 1382 1383 if (unlikely(!(evmcs->hv_clean_fields & 1384 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) { 1385 vmcs12->cpu_based_vm_exec_control = 1386 evmcs->cpu_based_vm_exec_control; 1387 } 1388 1389 if (unlikely(!(evmcs->hv_clean_fields & 1390 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) { 1391 vmcs12->exception_bitmap = evmcs->exception_bitmap; 1392 } 1393 1394 if (unlikely(!(evmcs->hv_clean_fields & 1395 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY))) { 1396 vmcs12->vm_entry_controls = evmcs->vm_entry_controls; 1397 } 1398 1399 if (unlikely(!(evmcs->hv_clean_fields & 1400 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT))) { 1401 vmcs12->vm_entry_intr_info_field = 1402 evmcs->vm_entry_intr_info_field; 1403 vmcs12->vm_entry_exception_error_code = 1404 evmcs->vm_entry_exception_error_code; 1405 vmcs12->vm_entry_instruction_len = 1406 evmcs->vm_entry_instruction_len; 1407 } 1408 1409 if (unlikely(!(evmcs->hv_clean_fields & 1410 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) { 1411 vmcs12->host_ia32_pat = evmcs->host_ia32_pat; 1412 vmcs12->host_ia32_efer = evmcs->host_ia32_efer; 1413 vmcs12->host_cr0 = evmcs->host_cr0; 1414 vmcs12->host_cr3 = evmcs->host_cr3; 1415 vmcs12->host_cr4 = evmcs->host_cr4; 1416 vmcs12->host_ia32_sysenter_esp = evmcs->host_ia32_sysenter_esp; 1417 vmcs12->host_ia32_sysenter_eip = evmcs->host_ia32_sysenter_eip; 1418 vmcs12->host_rip = evmcs->host_rip; 1419 vmcs12->host_ia32_sysenter_cs = evmcs->host_ia32_sysenter_cs; 1420 vmcs12->host_es_selector = evmcs->host_es_selector; 1421 vmcs12->host_cs_selector = evmcs->host_cs_selector; 1422 vmcs12->host_ss_selector = evmcs->host_ss_selector; 1423 vmcs12->host_ds_selector = evmcs->host_ds_selector; 1424 vmcs12->host_fs_selector = evmcs->host_fs_selector; 1425 vmcs12->host_gs_selector = evmcs->host_gs_selector; 1426 vmcs12->host_tr_selector = evmcs->host_tr_selector; 1427 } 1428 1429 if (unlikely(!(evmcs->hv_clean_fields & 1430 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) { 1431 vmcs12->pin_based_vm_exec_control = 1432 evmcs->pin_based_vm_exec_control; 1433 vmcs12->vm_exit_controls = evmcs->vm_exit_controls; 1434 vmcs12->secondary_vm_exec_control = 1435 evmcs->secondary_vm_exec_control; 1436 } 1437 1438 if (unlikely(!(evmcs->hv_clean_fields & 1439 HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP))) { 1440 vmcs12->io_bitmap_a = evmcs->io_bitmap_a; 1441 vmcs12->io_bitmap_b = evmcs->io_bitmap_b; 1442 } 1443 1444 if (unlikely(!(evmcs->hv_clean_fields & 1445 HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP))) { 1446 vmcs12->msr_bitmap = evmcs->msr_bitmap; 1447 } 1448 1449 if (unlikely(!(evmcs->hv_clean_fields & 1450 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2))) { 1451 vmcs12->guest_es_base = evmcs->guest_es_base; 1452 vmcs12->guest_cs_base = evmcs->guest_cs_base; 1453 vmcs12->guest_ss_base = evmcs->guest_ss_base; 1454 vmcs12->guest_ds_base = evmcs->guest_ds_base; 1455 vmcs12->guest_fs_base = evmcs->guest_fs_base; 1456 vmcs12->guest_gs_base = evmcs->guest_gs_base; 1457 vmcs12->guest_ldtr_base = evmcs->guest_ldtr_base; 1458 vmcs12->guest_tr_base = evmcs->guest_tr_base; 1459 vmcs12->guest_gdtr_base = evmcs->guest_gdtr_base; 1460 vmcs12->guest_idtr_base = evmcs->guest_idtr_base; 1461 vmcs12->guest_es_limit = evmcs->guest_es_limit; 1462 vmcs12->guest_cs_limit = evmcs->guest_cs_limit; 1463 vmcs12->guest_ss_limit = evmcs->guest_ss_limit; 1464 vmcs12->guest_ds_limit = evmcs->guest_ds_limit; 1465 vmcs12->guest_fs_limit = evmcs->guest_fs_limit; 1466 vmcs12->guest_gs_limit = evmcs->guest_gs_limit; 1467 vmcs12->guest_ldtr_limit = evmcs->guest_ldtr_limit; 1468 vmcs12->guest_tr_limit = evmcs->guest_tr_limit; 1469 vmcs12->guest_gdtr_limit = evmcs->guest_gdtr_limit; 1470 vmcs12->guest_idtr_limit = evmcs->guest_idtr_limit; 1471 vmcs12->guest_es_ar_bytes = evmcs->guest_es_ar_bytes; 1472 vmcs12->guest_cs_ar_bytes = evmcs->guest_cs_ar_bytes; 1473 vmcs12->guest_ss_ar_bytes = evmcs->guest_ss_ar_bytes; 1474 vmcs12->guest_ds_ar_bytes = evmcs->guest_ds_ar_bytes; 1475 vmcs12->guest_fs_ar_bytes = evmcs->guest_fs_ar_bytes; 1476 vmcs12->guest_gs_ar_bytes = evmcs->guest_gs_ar_bytes; 1477 vmcs12->guest_ldtr_ar_bytes = evmcs->guest_ldtr_ar_bytes; 1478 vmcs12->guest_tr_ar_bytes = evmcs->guest_tr_ar_bytes; 1479 vmcs12->guest_es_selector = evmcs->guest_es_selector; 1480 vmcs12->guest_cs_selector = evmcs->guest_cs_selector; 1481 vmcs12->guest_ss_selector = evmcs->guest_ss_selector; 1482 vmcs12->guest_ds_selector = evmcs->guest_ds_selector; 1483 vmcs12->guest_fs_selector = evmcs->guest_fs_selector; 1484 vmcs12->guest_gs_selector = evmcs->guest_gs_selector; 1485 vmcs12->guest_ldtr_selector = evmcs->guest_ldtr_selector; 1486 vmcs12->guest_tr_selector = evmcs->guest_tr_selector; 1487 } 1488 1489 if (unlikely(!(evmcs->hv_clean_fields & 1490 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2))) { 1491 vmcs12->tsc_offset = evmcs->tsc_offset; 1492 vmcs12->virtual_apic_page_addr = evmcs->virtual_apic_page_addr; 1493 vmcs12->xss_exit_bitmap = evmcs->xss_exit_bitmap; 1494 } 1495 1496 if (unlikely(!(evmcs->hv_clean_fields & 1497 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR))) { 1498 vmcs12->cr0_guest_host_mask = evmcs->cr0_guest_host_mask; 1499 vmcs12->cr4_guest_host_mask = evmcs->cr4_guest_host_mask; 1500 vmcs12->cr0_read_shadow = evmcs->cr0_read_shadow; 1501 vmcs12->cr4_read_shadow = evmcs->cr4_read_shadow; 1502 vmcs12->guest_cr0 = evmcs->guest_cr0; 1503 vmcs12->guest_cr3 = evmcs->guest_cr3; 1504 vmcs12->guest_cr4 = evmcs->guest_cr4; 1505 vmcs12->guest_dr7 = evmcs->guest_dr7; 1506 } 1507 1508 if (unlikely(!(evmcs->hv_clean_fields & 1509 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER))) { 1510 vmcs12->host_fs_base = evmcs->host_fs_base; 1511 vmcs12->host_gs_base = evmcs->host_gs_base; 1512 vmcs12->host_tr_base = evmcs->host_tr_base; 1513 vmcs12->host_gdtr_base = evmcs->host_gdtr_base; 1514 vmcs12->host_idtr_base = evmcs->host_idtr_base; 1515 vmcs12->host_rsp = evmcs->host_rsp; 1516 } 1517 1518 if (unlikely(!(evmcs->hv_clean_fields & 1519 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT))) { 1520 vmcs12->ept_pointer = evmcs->ept_pointer; 1521 vmcs12->virtual_processor_id = evmcs->virtual_processor_id; 1522 } 1523 1524 if (unlikely(!(evmcs->hv_clean_fields & 1525 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1))) { 1526 vmcs12->vmcs_link_pointer = evmcs->vmcs_link_pointer; 1527 vmcs12->guest_ia32_debugctl = evmcs->guest_ia32_debugctl; 1528 vmcs12->guest_ia32_pat = evmcs->guest_ia32_pat; 1529 vmcs12->guest_ia32_efer = evmcs->guest_ia32_efer; 1530 vmcs12->guest_pdptr0 = evmcs->guest_pdptr0; 1531 vmcs12->guest_pdptr1 = evmcs->guest_pdptr1; 1532 vmcs12->guest_pdptr2 = evmcs->guest_pdptr2; 1533 vmcs12->guest_pdptr3 = evmcs->guest_pdptr3; 1534 vmcs12->guest_pending_dbg_exceptions = 1535 evmcs->guest_pending_dbg_exceptions; 1536 vmcs12->guest_sysenter_esp = evmcs->guest_sysenter_esp; 1537 vmcs12->guest_sysenter_eip = evmcs->guest_sysenter_eip; 1538 vmcs12->guest_bndcfgs = evmcs->guest_bndcfgs; 1539 vmcs12->guest_activity_state = evmcs->guest_activity_state; 1540 vmcs12->guest_sysenter_cs = evmcs->guest_sysenter_cs; 1541 } 1542 1543 /* 1544 * Not used? 1545 * vmcs12->vm_exit_msr_store_addr = evmcs->vm_exit_msr_store_addr; 1546 * vmcs12->vm_exit_msr_load_addr = evmcs->vm_exit_msr_load_addr; 1547 * vmcs12->vm_entry_msr_load_addr = evmcs->vm_entry_msr_load_addr; 1548 * vmcs12->cr3_target_value0 = evmcs->cr3_target_value0; 1549 * vmcs12->cr3_target_value1 = evmcs->cr3_target_value1; 1550 * vmcs12->cr3_target_value2 = evmcs->cr3_target_value2; 1551 * vmcs12->cr3_target_value3 = evmcs->cr3_target_value3; 1552 * vmcs12->page_fault_error_code_mask = 1553 * evmcs->page_fault_error_code_mask; 1554 * vmcs12->page_fault_error_code_match = 1555 * evmcs->page_fault_error_code_match; 1556 * vmcs12->cr3_target_count = evmcs->cr3_target_count; 1557 * vmcs12->vm_exit_msr_store_count = evmcs->vm_exit_msr_store_count; 1558 * vmcs12->vm_exit_msr_load_count = evmcs->vm_exit_msr_load_count; 1559 * vmcs12->vm_entry_msr_load_count = evmcs->vm_entry_msr_load_count; 1560 */ 1561 1562 /* 1563 * Read only fields: 1564 * vmcs12->guest_physical_address = evmcs->guest_physical_address; 1565 * vmcs12->vm_instruction_error = evmcs->vm_instruction_error; 1566 * vmcs12->vm_exit_reason = evmcs->vm_exit_reason; 1567 * vmcs12->vm_exit_intr_info = evmcs->vm_exit_intr_info; 1568 * vmcs12->vm_exit_intr_error_code = evmcs->vm_exit_intr_error_code; 1569 * vmcs12->idt_vectoring_info_field = evmcs->idt_vectoring_info_field; 1570 * vmcs12->idt_vectoring_error_code = evmcs->idt_vectoring_error_code; 1571 * vmcs12->vm_exit_instruction_len = evmcs->vm_exit_instruction_len; 1572 * vmcs12->vmx_instruction_info = evmcs->vmx_instruction_info; 1573 * vmcs12->exit_qualification = evmcs->exit_qualification; 1574 * vmcs12->guest_linear_address = evmcs->guest_linear_address; 1575 * 1576 * Not present in struct vmcs12: 1577 * vmcs12->exit_io_instruction_ecx = evmcs->exit_io_instruction_ecx; 1578 * vmcs12->exit_io_instruction_esi = evmcs->exit_io_instruction_esi; 1579 * vmcs12->exit_io_instruction_edi = evmcs->exit_io_instruction_edi; 1580 * vmcs12->exit_io_instruction_eip = evmcs->exit_io_instruction_eip; 1581 */ 1582 1583 return 0; 1584 } 1585 1586 static int copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx) 1587 { 1588 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; 1589 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; 1590 1591 /* 1592 * Should not be changed by KVM: 1593 * 1594 * evmcs->host_es_selector = vmcs12->host_es_selector; 1595 * evmcs->host_cs_selector = vmcs12->host_cs_selector; 1596 * evmcs->host_ss_selector = vmcs12->host_ss_selector; 1597 * evmcs->host_ds_selector = vmcs12->host_ds_selector; 1598 * evmcs->host_fs_selector = vmcs12->host_fs_selector; 1599 * evmcs->host_gs_selector = vmcs12->host_gs_selector; 1600 * evmcs->host_tr_selector = vmcs12->host_tr_selector; 1601 * evmcs->host_ia32_pat = vmcs12->host_ia32_pat; 1602 * evmcs->host_ia32_efer = vmcs12->host_ia32_efer; 1603 * evmcs->host_cr0 = vmcs12->host_cr0; 1604 * evmcs->host_cr3 = vmcs12->host_cr3; 1605 * evmcs->host_cr4 = vmcs12->host_cr4; 1606 * evmcs->host_ia32_sysenter_esp = vmcs12->host_ia32_sysenter_esp; 1607 * evmcs->host_ia32_sysenter_eip = vmcs12->host_ia32_sysenter_eip; 1608 * evmcs->host_rip = vmcs12->host_rip; 1609 * evmcs->host_ia32_sysenter_cs = vmcs12->host_ia32_sysenter_cs; 1610 * evmcs->host_fs_base = vmcs12->host_fs_base; 1611 * evmcs->host_gs_base = vmcs12->host_gs_base; 1612 * evmcs->host_tr_base = vmcs12->host_tr_base; 1613 * evmcs->host_gdtr_base = vmcs12->host_gdtr_base; 1614 * evmcs->host_idtr_base = vmcs12->host_idtr_base; 1615 * evmcs->host_rsp = vmcs12->host_rsp; 1616 * sync_vmcs12() doesn't read these: 1617 * evmcs->io_bitmap_a = vmcs12->io_bitmap_a; 1618 * evmcs->io_bitmap_b = vmcs12->io_bitmap_b; 1619 * evmcs->msr_bitmap = vmcs12->msr_bitmap; 1620 * evmcs->ept_pointer = vmcs12->ept_pointer; 1621 * evmcs->xss_exit_bitmap = vmcs12->xss_exit_bitmap; 1622 * evmcs->vm_exit_msr_store_addr = vmcs12->vm_exit_msr_store_addr; 1623 * evmcs->vm_exit_msr_load_addr = vmcs12->vm_exit_msr_load_addr; 1624 * evmcs->vm_entry_msr_load_addr = vmcs12->vm_entry_msr_load_addr; 1625 * evmcs->cr3_target_value0 = vmcs12->cr3_target_value0; 1626 * evmcs->cr3_target_value1 = vmcs12->cr3_target_value1; 1627 * evmcs->cr3_target_value2 = vmcs12->cr3_target_value2; 1628 * evmcs->cr3_target_value3 = vmcs12->cr3_target_value3; 1629 * evmcs->tpr_threshold = vmcs12->tpr_threshold; 1630 * evmcs->virtual_processor_id = vmcs12->virtual_processor_id; 1631 * evmcs->exception_bitmap = vmcs12->exception_bitmap; 1632 * evmcs->vmcs_link_pointer = vmcs12->vmcs_link_pointer; 1633 * evmcs->pin_based_vm_exec_control = vmcs12->pin_based_vm_exec_control; 1634 * evmcs->vm_exit_controls = vmcs12->vm_exit_controls; 1635 * evmcs->secondary_vm_exec_control = vmcs12->secondary_vm_exec_control; 1636 * evmcs->page_fault_error_code_mask = 1637 * vmcs12->page_fault_error_code_mask; 1638 * evmcs->page_fault_error_code_match = 1639 * vmcs12->page_fault_error_code_match; 1640 * evmcs->cr3_target_count = vmcs12->cr3_target_count; 1641 * evmcs->virtual_apic_page_addr = vmcs12->virtual_apic_page_addr; 1642 * evmcs->tsc_offset = vmcs12->tsc_offset; 1643 * evmcs->guest_ia32_debugctl = vmcs12->guest_ia32_debugctl; 1644 * evmcs->cr0_guest_host_mask = vmcs12->cr0_guest_host_mask; 1645 * evmcs->cr4_guest_host_mask = vmcs12->cr4_guest_host_mask; 1646 * evmcs->cr0_read_shadow = vmcs12->cr0_read_shadow; 1647 * evmcs->cr4_read_shadow = vmcs12->cr4_read_shadow; 1648 * evmcs->vm_exit_msr_store_count = vmcs12->vm_exit_msr_store_count; 1649 * evmcs->vm_exit_msr_load_count = vmcs12->vm_exit_msr_load_count; 1650 * evmcs->vm_entry_msr_load_count = vmcs12->vm_entry_msr_load_count; 1651 * 1652 * Not present in struct vmcs12: 1653 * evmcs->exit_io_instruction_ecx = vmcs12->exit_io_instruction_ecx; 1654 * evmcs->exit_io_instruction_esi = vmcs12->exit_io_instruction_esi; 1655 * evmcs->exit_io_instruction_edi = vmcs12->exit_io_instruction_edi; 1656 * evmcs->exit_io_instruction_eip = vmcs12->exit_io_instruction_eip; 1657 */ 1658 1659 evmcs->guest_es_selector = vmcs12->guest_es_selector; 1660 evmcs->guest_cs_selector = vmcs12->guest_cs_selector; 1661 evmcs->guest_ss_selector = vmcs12->guest_ss_selector; 1662 evmcs->guest_ds_selector = vmcs12->guest_ds_selector; 1663 evmcs->guest_fs_selector = vmcs12->guest_fs_selector; 1664 evmcs->guest_gs_selector = vmcs12->guest_gs_selector; 1665 evmcs->guest_ldtr_selector = vmcs12->guest_ldtr_selector; 1666 evmcs->guest_tr_selector = vmcs12->guest_tr_selector; 1667 1668 evmcs->guest_es_limit = vmcs12->guest_es_limit; 1669 evmcs->guest_cs_limit = vmcs12->guest_cs_limit; 1670 evmcs->guest_ss_limit = vmcs12->guest_ss_limit; 1671 evmcs->guest_ds_limit = vmcs12->guest_ds_limit; 1672 evmcs->guest_fs_limit = vmcs12->guest_fs_limit; 1673 evmcs->guest_gs_limit = vmcs12->guest_gs_limit; 1674 evmcs->guest_ldtr_limit = vmcs12->guest_ldtr_limit; 1675 evmcs->guest_tr_limit = vmcs12->guest_tr_limit; 1676 evmcs->guest_gdtr_limit = vmcs12->guest_gdtr_limit; 1677 evmcs->guest_idtr_limit = vmcs12->guest_idtr_limit; 1678 1679 evmcs->guest_es_ar_bytes = vmcs12->guest_es_ar_bytes; 1680 evmcs->guest_cs_ar_bytes = vmcs12->guest_cs_ar_bytes; 1681 evmcs->guest_ss_ar_bytes = vmcs12->guest_ss_ar_bytes; 1682 evmcs->guest_ds_ar_bytes = vmcs12->guest_ds_ar_bytes; 1683 evmcs->guest_fs_ar_bytes = vmcs12->guest_fs_ar_bytes; 1684 evmcs->guest_gs_ar_bytes = vmcs12->guest_gs_ar_bytes; 1685 evmcs->guest_ldtr_ar_bytes = vmcs12->guest_ldtr_ar_bytes; 1686 evmcs->guest_tr_ar_bytes = vmcs12->guest_tr_ar_bytes; 1687 1688 evmcs->guest_es_base = vmcs12->guest_es_base; 1689 evmcs->guest_cs_base = vmcs12->guest_cs_base; 1690 evmcs->guest_ss_base = vmcs12->guest_ss_base; 1691 evmcs->guest_ds_base = vmcs12->guest_ds_base; 1692 evmcs->guest_fs_base = vmcs12->guest_fs_base; 1693 evmcs->guest_gs_base = vmcs12->guest_gs_base; 1694 evmcs->guest_ldtr_base = vmcs12->guest_ldtr_base; 1695 evmcs->guest_tr_base = vmcs12->guest_tr_base; 1696 evmcs->guest_gdtr_base = vmcs12->guest_gdtr_base; 1697 evmcs->guest_idtr_base = vmcs12->guest_idtr_base; 1698 1699 evmcs->guest_ia32_pat = vmcs12->guest_ia32_pat; 1700 evmcs->guest_ia32_efer = vmcs12->guest_ia32_efer; 1701 1702 evmcs->guest_pdptr0 = vmcs12->guest_pdptr0; 1703 evmcs->guest_pdptr1 = vmcs12->guest_pdptr1; 1704 evmcs->guest_pdptr2 = vmcs12->guest_pdptr2; 1705 evmcs->guest_pdptr3 = vmcs12->guest_pdptr3; 1706 1707 evmcs->guest_pending_dbg_exceptions = 1708 vmcs12->guest_pending_dbg_exceptions; 1709 evmcs->guest_sysenter_esp = vmcs12->guest_sysenter_esp; 1710 evmcs->guest_sysenter_eip = vmcs12->guest_sysenter_eip; 1711 1712 evmcs->guest_activity_state = vmcs12->guest_activity_state; 1713 evmcs->guest_sysenter_cs = vmcs12->guest_sysenter_cs; 1714 1715 evmcs->guest_cr0 = vmcs12->guest_cr0; 1716 evmcs->guest_cr3 = vmcs12->guest_cr3; 1717 evmcs->guest_cr4 = vmcs12->guest_cr4; 1718 evmcs->guest_dr7 = vmcs12->guest_dr7; 1719 1720 evmcs->guest_physical_address = vmcs12->guest_physical_address; 1721 1722 evmcs->vm_instruction_error = vmcs12->vm_instruction_error; 1723 evmcs->vm_exit_reason = vmcs12->vm_exit_reason; 1724 evmcs->vm_exit_intr_info = vmcs12->vm_exit_intr_info; 1725 evmcs->vm_exit_intr_error_code = vmcs12->vm_exit_intr_error_code; 1726 evmcs->idt_vectoring_info_field = vmcs12->idt_vectoring_info_field; 1727 evmcs->idt_vectoring_error_code = vmcs12->idt_vectoring_error_code; 1728 evmcs->vm_exit_instruction_len = vmcs12->vm_exit_instruction_len; 1729 evmcs->vmx_instruction_info = vmcs12->vmx_instruction_info; 1730 1731 evmcs->exit_qualification = vmcs12->exit_qualification; 1732 1733 evmcs->guest_linear_address = vmcs12->guest_linear_address; 1734 evmcs->guest_rsp = vmcs12->guest_rsp; 1735 evmcs->guest_rflags = vmcs12->guest_rflags; 1736 1737 evmcs->guest_interruptibility_info = 1738 vmcs12->guest_interruptibility_info; 1739 evmcs->cpu_based_vm_exec_control = vmcs12->cpu_based_vm_exec_control; 1740 evmcs->vm_entry_controls = vmcs12->vm_entry_controls; 1741 evmcs->vm_entry_intr_info_field = vmcs12->vm_entry_intr_info_field; 1742 evmcs->vm_entry_exception_error_code = 1743 vmcs12->vm_entry_exception_error_code; 1744 evmcs->vm_entry_instruction_len = vmcs12->vm_entry_instruction_len; 1745 1746 evmcs->guest_rip = vmcs12->guest_rip; 1747 1748 evmcs->guest_bndcfgs = vmcs12->guest_bndcfgs; 1749 1750 return 0; 1751 } 1752 1753 /* 1754 * This is an equivalent of the nested hypervisor executing the vmptrld 1755 * instruction. 1756 */ 1757 static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu, 1758 bool from_launch) 1759 { 1760 struct vcpu_vmx *vmx = to_vmx(vcpu); 1761 struct hv_vp_assist_page assist_page; 1762 1763 if (likely(!vmx->nested.enlightened_vmcs_enabled)) 1764 return 1; 1765 1766 if (unlikely(!kvm_hv_get_assist_page(vcpu, &assist_page))) 1767 return 1; 1768 1769 if (unlikely(!assist_page.enlighten_vmentry)) 1770 return 1; 1771 1772 if (unlikely(assist_page.current_nested_vmcs != 1773 vmx->nested.hv_evmcs_vmptr)) { 1774 1775 if (!vmx->nested.hv_evmcs) 1776 vmx->nested.current_vmptr = -1ull; 1777 1778 nested_release_evmcs(vcpu); 1779 1780 vmx->nested.hv_evmcs_page = kvm_vcpu_gpa_to_page( 1781 vcpu, assist_page.current_nested_vmcs); 1782 1783 if (unlikely(is_error_page(vmx->nested.hv_evmcs_page))) 1784 return 0; 1785 1786 vmx->nested.hv_evmcs = kmap(vmx->nested.hv_evmcs_page); 1787 1788 /* 1789 * Currently, KVM only supports eVMCS version 1 1790 * (== KVM_EVMCS_VERSION) and thus we expect guest to set this 1791 * value to first u32 field of eVMCS which should specify eVMCS 1792 * VersionNumber. 1793 * 1794 * Guest should be aware of supported eVMCS versions by host by 1795 * examining CPUID.0x4000000A.EAX[0:15]. Host userspace VMM is 1796 * expected to set this CPUID leaf according to the value 1797 * returned in vmcs_version from nested_enable_evmcs(). 1798 * 1799 * However, it turns out that Microsoft Hyper-V fails to comply 1800 * to their own invented interface: When Hyper-V use eVMCS, it 1801 * just sets first u32 field of eVMCS to revision_id specified 1802 * in MSR_IA32_VMX_BASIC. Instead of used eVMCS version number 1803 * which is one of the supported versions specified in 1804 * CPUID.0x4000000A.EAX[0:15]. 1805 * 1806 * To overcome Hyper-V bug, we accept here either a supported 1807 * eVMCS version or VMCS12 revision_id as valid values for first 1808 * u32 field of eVMCS. 1809 */ 1810 if ((vmx->nested.hv_evmcs->revision_id != KVM_EVMCS_VERSION) && 1811 (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION)) { 1812 nested_release_evmcs(vcpu); 1813 return 0; 1814 } 1815 1816 vmx->nested.dirty_vmcs12 = true; 1817 /* 1818 * As we keep L2 state for one guest only 'hv_clean_fields' mask 1819 * can't be used when we switch between them. Reset it here for 1820 * simplicity. 1821 */ 1822 vmx->nested.hv_evmcs->hv_clean_fields &= 1823 ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; 1824 vmx->nested.hv_evmcs_vmptr = assist_page.current_nested_vmcs; 1825 1826 /* 1827 * Unlike normal vmcs12, enlightened vmcs12 is not fully 1828 * reloaded from guest's memory (read only fields, fields not 1829 * present in struct hv_enlightened_vmcs, ...). Make sure there 1830 * are no leftovers. 1831 */ 1832 if (from_launch) { 1833 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 1834 memset(vmcs12, 0, sizeof(*vmcs12)); 1835 vmcs12->hdr.revision_id = VMCS12_REVISION; 1836 } 1837 1838 } 1839 return 1; 1840 } 1841 1842 void nested_sync_from_vmcs12(struct kvm_vcpu *vcpu) 1843 { 1844 struct vcpu_vmx *vmx = to_vmx(vcpu); 1845 1846 /* 1847 * hv_evmcs may end up being not mapped after migration (when 1848 * L2 was running), map it here to make sure vmcs12 changes are 1849 * properly reflected. 1850 */ 1851 if (vmx->nested.enlightened_vmcs_enabled && !vmx->nested.hv_evmcs) 1852 nested_vmx_handle_enlightened_vmptrld(vcpu, false); 1853 1854 if (vmx->nested.hv_evmcs) { 1855 copy_vmcs12_to_enlightened(vmx); 1856 /* All fields are clean */ 1857 vmx->nested.hv_evmcs->hv_clean_fields |= 1858 HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; 1859 } else { 1860 copy_vmcs12_to_shadow(vmx); 1861 } 1862 1863 vmx->nested.need_vmcs12_sync = false; 1864 } 1865 1866 static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer) 1867 { 1868 struct vcpu_vmx *vmx = 1869 container_of(timer, struct vcpu_vmx, nested.preemption_timer); 1870 1871 vmx->nested.preemption_timer_expired = true; 1872 kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu); 1873 kvm_vcpu_kick(&vmx->vcpu); 1874 1875 return HRTIMER_NORESTART; 1876 } 1877 1878 static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu) 1879 { 1880 u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value; 1881 struct vcpu_vmx *vmx = to_vmx(vcpu); 1882 1883 /* 1884 * A timer value of zero is architecturally guaranteed to cause 1885 * a VMExit prior to executing any instructions in the guest. 1886 */ 1887 if (preemption_timeout == 0) { 1888 vmx_preemption_timer_fn(&vmx->nested.preemption_timer); 1889 return; 1890 } 1891 1892 if (vcpu->arch.virtual_tsc_khz == 0) 1893 return; 1894 1895 preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; 1896 preemption_timeout *= 1000000; 1897 do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz); 1898 hrtimer_start(&vmx->nested.preemption_timer, 1899 ns_to_ktime(preemption_timeout), HRTIMER_MODE_REL); 1900 } 1901 1902 static u64 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) 1903 { 1904 if (vmx->nested.nested_run_pending && 1905 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) 1906 return vmcs12->guest_ia32_efer; 1907 else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) 1908 return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME); 1909 else 1910 return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME); 1911 } 1912 1913 static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx) 1914 { 1915 /* 1916 * If vmcs02 hasn't been initialized, set the constant vmcs02 state 1917 * according to L0's settings (vmcs12 is irrelevant here). Host 1918 * fields that come from L0 and are not constant, e.g. HOST_CR3, 1919 * will be set as needed prior to VMLAUNCH/VMRESUME. 1920 */ 1921 if (vmx->nested.vmcs02_initialized) 1922 return; 1923 vmx->nested.vmcs02_initialized = true; 1924 1925 /* 1926 * We don't care what the EPTP value is we just need to guarantee 1927 * it's valid so we don't get a false positive when doing early 1928 * consistency checks. 1929 */ 1930 if (enable_ept && nested_early_check) 1931 vmcs_write64(EPT_POINTER, construct_eptp(&vmx->vcpu, 0)); 1932 1933 /* All VMFUNCs are currently emulated through L0 vmexits. */ 1934 if (cpu_has_vmx_vmfunc()) 1935 vmcs_write64(VM_FUNCTION_CONTROL, 0); 1936 1937 if (cpu_has_vmx_posted_intr()) 1938 vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR); 1939 1940 if (cpu_has_vmx_msr_bitmap()) 1941 vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap)); 1942 1943 if (enable_pml) 1944 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); 1945 1946 /* 1947 * Set the MSR load/store lists to match L0's settings. Only the 1948 * addresses are constant (for vmcs02), the counts can change based 1949 * on L2's behavior, e.g. switching to/from long mode. 1950 */ 1951 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); 1952 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val)); 1953 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val)); 1954 1955 vmx_set_constant_host_state(vmx); 1956 } 1957 1958 static void prepare_vmcs02_early_full(struct vcpu_vmx *vmx, 1959 struct vmcs12 *vmcs12) 1960 { 1961 prepare_vmcs02_constant_state(vmx); 1962 1963 vmcs_write64(VMCS_LINK_POINTER, -1ull); 1964 1965 if (enable_vpid) { 1966 if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) 1967 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02); 1968 else 1969 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); 1970 } 1971 } 1972 1973 static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) 1974 { 1975 u32 exec_control, vmcs12_exec_ctrl; 1976 u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12); 1977 1978 if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs) 1979 prepare_vmcs02_early_full(vmx, vmcs12); 1980 1981 /* 1982 * HOST_RSP is normally set correctly in vmx_vcpu_run() just before 1983 * entry, but only if the current (host) sp changed from the value 1984 * we wrote last (vmx->host_rsp). This cache is no longer relevant 1985 * if we switch vmcs, and rather than hold a separate cache per vmcs, 1986 * here we just force the write to happen on entry. host_rsp will 1987 * also be written unconditionally by nested_vmx_check_vmentry_hw() 1988 * if we are doing early consistency checks via hardware. 1989 */ 1990 vmx->host_rsp = 0; 1991 1992 /* 1993 * PIN CONTROLS 1994 */ 1995 exec_control = vmcs12->pin_based_vm_exec_control; 1996 1997 /* Preemption timer setting is computed directly in vmx_vcpu_run. */ 1998 exec_control |= vmcs_config.pin_based_exec_ctrl; 1999 exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER; 2000 vmx->loaded_vmcs->hv_timer_armed = false; 2001 2002 /* Posted interrupts setting is only taken from vmcs12. */ 2003 if (nested_cpu_has_posted_intr(vmcs12)) { 2004 vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv; 2005 vmx->nested.pi_pending = false; 2006 } else { 2007 exec_control &= ~PIN_BASED_POSTED_INTR; 2008 } 2009 vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, exec_control); 2010 2011 /* 2012 * EXEC CONTROLS 2013 */ 2014 exec_control = vmx_exec_control(vmx); /* L0's desires */ 2015 exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING; 2016 exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING; 2017 exec_control &= ~CPU_BASED_TPR_SHADOW; 2018 exec_control |= vmcs12->cpu_based_vm_exec_control; 2019 2020 /* 2021 * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR. Later, if 2022 * nested_get_vmcs12_pages can't fix it up, the illegal value 2023 * will result in a VM entry failure. 2024 */ 2025 if (exec_control & CPU_BASED_TPR_SHADOW) { 2026 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, -1ull); 2027 vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold); 2028 } else { 2029 #ifdef CONFIG_X86_64 2030 exec_control |= CPU_BASED_CR8_LOAD_EXITING | 2031 CPU_BASED_CR8_STORE_EXITING; 2032 #endif 2033 } 2034 2035 /* 2036 * A vmexit (to either L1 hypervisor or L0 userspace) is always needed 2037 * for I/O port accesses. 2038 */ 2039 exec_control &= ~CPU_BASED_USE_IO_BITMAPS; 2040 exec_control |= CPU_BASED_UNCOND_IO_EXITING; 2041 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control); 2042 2043 /* 2044 * SECONDARY EXEC CONTROLS 2045 */ 2046 if (cpu_has_secondary_exec_ctrls()) { 2047 exec_control = vmx->secondary_exec_control; 2048 2049 /* Take the following fields only from vmcs12 */ 2050 exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2051 SECONDARY_EXEC_ENABLE_INVPCID | 2052 SECONDARY_EXEC_RDTSCP | 2053 SECONDARY_EXEC_XSAVES | 2054 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2055 SECONDARY_EXEC_APIC_REGISTER_VIRT | 2056 SECONDARY_EXEC_ENABLE_VMFUNC); 2057 if (nested_cpu_has(vmcs12, 2058 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) { 2059 vmcs12_exec_ctrl = vmcs12->secondary_vm_exec_control & 2060 ~SECONDARY_EXEC_ENABLE_PML; 2061 exec_control |= vmcs12_exec_ctrl; 2062 } 2063 2064 /* VMCS shadowing for L2 is emulated for now */ 2065 exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS; 2066 2067 if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) 2068 vmcs_write16(GUEST_INTR_STATUS, 2069 vmcs12->guest_intr_status); 2070 2071 /* 2072 * Write an illegal value to APIC_ACCESS_ADDR. Later, 2073 * nested_get_vmcs12_pages will either fix it up or 2074 * remove the VM execution control. 2075 */ 2076 if (exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) 2077 vmcs_write64(APIC_ACCESS_ADDR, -1ull); 2078 2079 if (exec_control & SECONDARY_EXEC_ENCLS_EXITING) 2080 vmcs_write64(ENCLS_EXITING_BITMAP, -1ull); 2081 2082 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); 2083 } 2084 2085 /* 2086 * ENTRY CONTROLS 2087 * 2088 * vmcs12's VM_{ENTRY,EXIT}_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE 2089 * are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate 2090 * on the related bits (if supported by the CPU) in the hope that 2091 * we can avoid VMWrites during vmx_set_efer(). 2092 */ 2093 exec_control = (vmcs12->vm_entry_controls | vmx_vmentry_ctrl()) & 2094 ~VM_ENTRY_IA32E_MODE & ~VM_ENTRY_LOAD_IA32_EFER; 2095 if (cpu_has_load_ia32_efer()) { 2096 if (guest_efer & EFER_LMA) 2097 exec_control |= VM_ENTRY_IA32E_MODE; 2098 if (guest_efer != host_efer) 2099 exec_control |= VM_ENTRY_LOAD_IA32_EFER; 2100 } 2101 vm_entry_controls_init(vmx, exec_control); 2102 2103 /* 2104 * EXIT CONTROLS 2105 * 2106 * L2->L1 exit controls are emulated - the hardware exit is to L0 so 2107 * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER 2108 * bits may be modified by vmx_set_efer() in prepare_vmcs02(). 2109 */ 2110 exec_control = vmx_vmexit_ctrl(); 2111 if (cpu_has_load_ia32_efer() && guest_efer != host_efer) 2112 exec_control |= VM_EXIT_LOAD_IA32_EFER; 2113 vm_exit_controls_init(vmx, exec_control); 2114 2115 /* 2116 * Conceptually we want to copy the PML address and index from 2117 * vmcs01 here, and then back to vmcs01 on nested vmexit. But, 2118 * since we always flush the log on each vmexit and never change 2119 * the PML address (once set), this happens to be equivalent to 2120 * simply resetting the index in vmcs02. 2121 */ 2122 if (enable_pml) 2123 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); 2124 2125 /* 2126 * Interrupt/Exception Fields 2127 */ 2128 if (vmx->nested.nested_run_pending) { 2129 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 2130 vmcs12->vm_entry_intr_info_field); 2131 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, 2132 vmcs12->vm_entry_exception_error_code); 2133 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 2134 vmcs12->vm_entry_instruction_len); 2135 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 2136 vmcs12->guest_interruptibility_info); 2137 vmx->loaded_vmcs->nmi_known_unmasked = 2138 !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI); 2139 } else { 2140 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); 2141 } 2142 } 2143 2144 static void prepare_vmcs02_full(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) 2145 { 2146 struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs; 2147 2148 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields & 2149 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) { 2150 vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector); 2151 vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector); 2152 vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector); 2153 vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector); 2154 vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector); 2155 vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector); 2156 vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector); 2157 vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector); 2158 vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit); 2159 vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit); 2160 vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit); 2161 vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit); 2162 vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit); 2163 vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit); 2164 vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit); 2165 vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit); 2166 vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit); 2167 vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit); 2168 vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes); 2169 vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes); 2170 vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes); 2171 vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes); 2172 vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes); 2173 vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes); 2174 vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base); 2175 vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base); 2176 vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base); 2177 vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base); 2178 vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base); 2179 vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base); 2180 vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base); 2181 vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base); 2182 vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base); 2183 vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base); 2184 } 2185 2186 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields & 2187 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1)) { 2188 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs); 2189 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, 2190 vmcs12->guest_pending_dbg_exceptions); 2191 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp); 2192 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip); 2193 2194 /* 2195 * L1 may access the L2's PDPTR, so save them to construct 2196 * vmcs12 2197 */ 2198 if (enable_ept) { 2199 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0); 2200 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); 2201 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); 2202 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); 2203 } 2204 } 2205 2206 if (nested_cpu_has_xsaves(vmcs12)) 2207 vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap); 2208 2209 /* 2210 * Whether page-faults are trapped is determined by a combination of 2211 * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF. 2212 * If enable_ept, L0 doesn't care about page faults and we should 2213 * set all of these to L1's desires. However, if !enable_ept, L0 does 2214 * care about (at least some) page faults, and because it is not easy 2215 * (if at all possible?) to merge L0 and L1's desires, we simply ask 2216 * to exit on each and every L2 page fault. This is done by setting 2217 * MASK=MATCH=0 and (see below) EB.PF=1. 2218 * Note that below we don't need special code to set EB.PF beyond the 2219 * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept, 2220 * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when 2221 * !enable_ept, EB.PF is 1, so the "or" will always be 1. 2222 */ 2223 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 2224 enable_ept ? vmcs12->page_fault_error_code_mask : 0); 2225 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 2226 enable_ept ? vmcs12->page_fault_error_code_match : 0); 2227 2228 if (cpu_has_vmx_apicv()) { 2229 vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0); 2230 vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1); 2231 vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2); 2232 vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3); 2233 } 2234 2235 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); 2236 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); 2237 2238 set_cr4_guest_host_mask(vmx); 2239 2240 if (kvm_mpx_supported()) { 2241 if (vmx->nested.nested_run_pending && 2242 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)) 2243 vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs); 2244 else 2245 vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs); 2246 } 2247 } 2248 2249 /* 2250 * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested 2251 * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it 2252 * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2 2253 * guest in a way that will both be appropriate to L1's requests, and our 2254 * needs. In addition to modifying the active vmcs (which is vmcs02), this 2255 * function also has additional necessary side-effects, like setting various 2256 * vcpu->arch fields. 2257 * Returns 0 on success, 1 on failure. Invalid state exit qualification code 2258 * is assigned to entry_failure_code on failure. 2259 */ 2260 static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, 2261 u32 *entry_failure_code) 2262 { 2263 struct vcpu_vmx *vmx = to_vmx(vcpu); 2264 struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs; 2265 2266 if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs) { 2267 prepare_vmcs02_full(vmx, vmcs12); 2268 vmx->nested.dirty_vmcs12 = false; 2269 } 2270 2271 /* 2272 * First, the fields that are shadowed. This must be kept in sync 2273 * with vmcs_shadow_fields.h. 2274 */ 2275 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields & 2276 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) { 2277 vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes); 2278 vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes); 2279 } 2280 2281 if (vmx->nested.nested_run_pending && 2282 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) { 2283 kvm_set_dr(vcpu, 7, vmcs12->guest_dr7); 2284 vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl); 2285 } else { 2286 kvm_set_dr(vcpu, 7, vcpu->arch.dr7); 2287 vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl); 2288 } 2289 vmx_set_rflags(vcpu, vmcs12->guest_rflags); 2290 2291 vmx->nested.preemption_timer_expired = false; 2292 if (nested_cpu_has_preemption_timer(vmcs12)) 2293 vmx_start_preemption_timer(vcpu); 2294 2295 /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the 2296 * bitwise-or of what L1 wants to trap for L2, and what we want to 2297 * trap. Note that CR0.TS also needs updating - we do this later. 2298 */ 2299 update_exception_bitmap(vcpu); 2300 vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask; 2301 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); 2302 2303 if (vmx->nested.nested_run_pending && 2304 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) { 2305 vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat); 2306 vcpu->arch.pat = vmcs12->guest_ia32_pat; 2307 } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { 2308 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); 2309 } 2310 2311 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); 2312 2313 if (kvm_has_tsc_control) 2314 decache_tsc_multiplier(vmx); 2315 2316 if (enable_vpid) { 2317 /* 2318 * There is no direct mapping between vpid02 and vpid12, the 2319 * vpid02 is per-vCPU for L0 and reused while the value of 2320 * vpid12 is changed w/ one invvpid during nested vmentry. 2321 * The vpid12 is allocated by L1 for L2, so it will not 2322 * influence global bitmap(for vpid01 and vpid02 allocation) 2323 * even if spawn a lot of nested vCPUs. 2324 */ 2325 if (nested_cpu_has_vpid(vmcs12) && nested_has_guest_tlb_tag(vcpu)) { 2326 if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) { 2327 vmx->nested.last_vpid = vmcs12->virtual_processor_id; 2328 __vmx_flush_tlb(vcpu, nested_get_vpid02(vcpu), false); 2329 } 2330 } else { 2331 /* 2332 * If L1 use EPT, then L0 needs to execute INVEPT on 2333 * EPTP02 instead of EPTP01. Therefore, delay TLB 2334 * flush until vmcs02->eptp is fully updated by 2335 * KVM_REQ_LOAD_CR3. Note that this assumes 2336 * KVM_REQ_TLB_FLUSH is evaluated after 2337 * KVM_REQ_LOAD_CR3 in vcpu_enter_guest(). 2338 */ 2339 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 2340 } 2341 } 2342 2343 if (nested_cpu_has_ept(vmcs12)) 2344 nested_ept_init_mmu_context(vcpu); 2345 else if (nested_cpu_has2(vmcs12, 2346 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) 2347 vmx_flush_tlb(vcpu, true); 2348 2349 /* 2350 * This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those 2351 * bits which we consider mandatory enabled. 2352 * The CR0_READ_SHADOW is what L2 should have expected to read given 2353 * the specifications by L1; It's not enough to take 2354 * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we 2355 * have more bits than L1 expected. 2356 */ 2357 vmx_set_cr0(vcpu, vmcs12->guest_cr0); 2358 vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12)); 2359 2360 vmx_set_cr4(vcpu, vmcs12->guest_cr4); 2361 vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12)); 2362 2363 vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12); 2364 /* Note: may modify VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */ 2365 vmx_set_efer(vcpu, vcpu->arch.efer); 2366 2367 /* 2368 * Guest state is invalid and unrestricted guest is disabled, 2369 * which means L1 attempted VMEntry to L2 with invalid state. 2370 * Fail the VMEntry. 2371 */ 2372 if (vmx->emulation_required) { 2373 *entry_failure_code = ENTRY_FAIL_DEFAULT; 2374 return 1; 2375 } 2376 2377 /* Shadow page tables on either EPT or shadow page tables. */ 2378 if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12), 2379 entry_failure_code)) 2380 return 1; 2381 2382 if (!enable_ept) 2383 vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested; 2384 2385 kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp); 2386 kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip); 2387 return 0; 2388 } 2389 2390 static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12) 2391 { 2392 if (!nested_cpu_has_nmi_exiting(vmcs12) && 2393 nested_cpu_has_virtual_nmis(vmcs12)) 2394 return -EINVAL; 2395 2396 if (!nested_cpu_has_virtual_nmis(vmcs12) && 2397 nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING)) 2398 return -EINVAL; 2399 2400 return 0; 2401 } 2402 2403 static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address) 2404 { 2405 struct vcpu_vmx *vmx = to_vmx(vcpu); 2406 int maxphyaddr = cpuid_maxphyaddr(vcpu); 2407 2408 /* Check for memory type validity */ 2409 switch (address & VMX_EPTP_MT_MASK) { 2410 case VMX_EPTP_MT_UC: 2411 if (!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT)) 2412 return false; 2413 break; 2414 case VMX_EPTP_MT_WB: 2415 if (!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT)) 2416 return false; 2417 break; 2418 default: 2419 return false; 2420 } 2421 2422 /* only 4 levels page-walk length are valid */ 2423 if ((address & VMX_EPTP_PWL_MASK) != VMX_EPTP_PWL_4) 2424 return false; 2425 2426 /* Reserved bits should not be set */ 2427 if (address >> maxphyaddr || ((address >> 7) & 0x1f)) 2428 return false; 2429 2430 /* AD, if set, should be supported */ 2431 if (address & VMX_EPTP_AD_ENABLE_BIT) { 2432 if (!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT)) 2433 return false; 2434 } 2435 2436 return true; 2437 } 2438 2439 /* 2440 * Checks related to VM-Execution Control Fields 2441 */ 2442 static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu, 2443 struct vmcs12 *vmcs12) 2444 { 2445 struct vcpu_vmx *vmx = to_vmx(vcpu); 2446 2447 if (!vmx_control_verify(vmcs12->pin_based_vm_exec_control, 2448 vmx->nested.msrs.pinbased_ctls_low, 2449 vmx->nested.msrs.pinbased_ctls_high) || 2450 !vmx_control_verify(vmcs12->cpu_based_vm_exec_control, 2451 vmx->nested.msrs.procbased_ctls_low, 2452 vmx->nested.msrs.procbased_ctls_high)) 2453 return -EINVAL; 2454 2455 if (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) && 2456 !vmx_control_verify(vmcs12->secondary_vm_exec_control, 2457 vmx->nested.msrs.secondary_ctls_low, 2458 vmx->nested.msrs.secondary_ctls_high)) 2459 return -EINVAL; 2460 2461 if (vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu) || 2462 nested_vmx_check_io_bitmap_controls(vcpu, vmcs12) || 2463 nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12) || 2464 nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12) || 2465 nested_vmx_check_apic_access_controls(vcpu, vmcs12) || 2466 nested_vmx_check_apicv_controls(vcpu, vmcs12) || 2467 nested_vmx_check_nmi_controls(vmcs12) || 2468 nested_vmx_check_pml_controls(vcpu, vmcs12) || 2469 nested_vmx_check_unrestricted_guest_controls(vcpu, vmcs12) || 2470 nested_vmx_check_mode_based_ept_exec_controls(vcpu, vmcs12) || 2471 nested_vmx_check_shadow_vmcs_controls(vcpu, vmcs12) || 2472 (nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id)) 2473 return -EINVAL; 2474 2475 if (nested_cpu_has_ept(vmcs12) && 2476 !valid_ept_address(vcpu, vmcs12->ept_pointer)) 2477 return -EINVAL; 2478 2479 if (nested_cpu_has_vmfunc(vmcs12)) { 2480 if (vmcs12->vm_function_control & 2481 ~vmx->nested.msrs.vmfunc_controls) 2482 return -EINVAL; 2483 2484 if (nested_cpu_has_eptp_switching(vmcs12)) { 2485 if (!nested_cpu_has_ept(vmcs12) || 2486 !page_address_valid(vcpu, vmcs12->eptp_list_address)) 2487 return -EINVAL; 2488 } 2489 } 2490 2491 return 0; 2492 } 2493 2494 /* 2495 * Checks related to VM-Exit Control Fields 2496 */ 2497 static int nested_check_vm_exit_controls(struct kvm_vcpu *vcpu, 2498 struct vmcs12 *vmcs12) 2499 { 2500 struct vcpu_vmx *vmx = to_vmx(vcpu); 2501 2502 if (!vmx_control_verify(vmcs12->vm_exit_controls, 2503 vmx->nested.msrs.exit_ctls_low, 2504 vmx->nested.msrs.exit_ctls_high) || 2505 nested_vmx_check_exit_msr_switch_controls(vcpu, vmcs12)) 2506 return -EINVAL; 2507 2508 return 0; 2509 } 2510 2511 /* 2512 * Checks related to VM-Entry Control Fields 2513 */ 2514 static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu, 2515 struct vmcs12 *vmcs12) 2516 { 2517 struct vcpu_vmx *vmx = to_vmx(vcpu); 2518 2519 if (!vmx_control_verify(vmcs12->vm_entry_controls, 2520 vmx->nested.msrs.entry_ctls_low, 2521 vmx->nested.msrs.entry_ctls_high)) 2522 return -EINVAL; 2523 2524 /* 2525 * From the Intel SDM, volume 3: 2526 * Fields relevant to VM-entry event injection must be set properly. 2527 * These fields are the VM-entry interruption-information field, the 2528 * VM-entry exception error code, and the VM-entry instruction length. 2529 */ 2530 if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) { 2531 u32 intr_info = vmcs12->vm_entry_intr_info_field; 2532 u8 vector = intr_info & INTR_INFO_VECTOR_MASK; 2533 u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK; 2534 bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK; 2535 bool should_have_error_code; 2536 bool urg = nested_cpu_has2(vmcs12, 2537 SECONDARY_EXEC_UNRESTRICTED_GUEST); 2538 bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE; 2539 2540 /* VM-entry interruption-info field: interruption type */ 2541 if (intr_type == INTR_TYPE_RESERVED || 2542 (intr_type == INTR_TYPE_OTHER_EVENT && 2543 !nested_cpu_supports_monitor_trap_flag(vcpu))) 2544 return -EINVAL; 2545 2546 /* VM-entry interruption-info field: vector */ 2547 if ((intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) || 2548 (intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) || 2549 (intr_type == INTR_TYPE_OTHER_EVENT && vector != 0)) 2550 return -EINVAL; 2551 2552 /* VM-entry interruption-info field: deliver error code */ 2553 should_have_error_code = 2554 intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode && 2555 x86_exception_has_error_code(vector); 2556 if (has_error_code != should_have_error_code) 2557 return -EINVAL; 2558 2559 /* VM-entry exception error code */ 2560 if (has_error_code && 2561 vmcs12->vm_entry_exception_error_code & GENMASK(31, 15)) 2562 return -EINVAL; 2563 2564 /* VM-entry interruption-info field: reserved bits */ 2565 if (intr_info & INTR_INFO_RESVD_BITS_MASK) 2566 return -EINVAL; 2567 2568 /* VM-entry instruction length */ 2569 switch (intr_type) { 2570 case INTR_TYPE_SOFT_EXCEPTION: 2571 case INTR_TYPE_SOFT_INTR: 2572 case INTR_TYPE_PRIV_SW_EXCEPTION: 2573 if ((vmcs12->vm_entry_instruction_len > 15) || 2574 (vmcs12->vm_entry_instruction_len == 0 && 2575 !nested_cpu_has_zero_length_injection(vcpu))) 2576 return -EINVAL; 2577 } 2578 } 2579 2580 if (nested_vmx_check_entry_msr_switch_controls(vcpu, vmcs12)) 2581 return -EINVAL; 2582 2583 return 0; 2584 } 2585 2586 /* 2587 * Checks related to Host Control Registers and MSRs 2588 */ 2589 static int nested_check_host_control_regs(struct kvm_vcpu *vcpu, 2590 struct vmcs12 *vmcs12) 2591 { 2592 bool ia32e; 2593 2594 if (!nested_host_cr0_valid(vcpu, vmcs12->host_cr0) || 2595 !nested_host_cr4_valid(vcpu, vmcs12->host_cr4) || 2596 !nested_cr3_valid(vcpu, vmcs12->host_cr3)) 2597 return -EINVAL; 2598 /* 2599 * If the load IA32_EFER VM-exit control is 1, bits reserved in the 2600 * IA32_EFER MSR must be 0 in the field for that register. In addition, 2601 * the values of the LMA and LME bits in the field must each be that of 2602 * the host address-space size VM-exit control. 2603 */ 2604 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) { 2605 ia32e = (vmcs12->vm_exit_controls & 2606 VM_EXIT_HOST_ADDR_SPACE_SIZE) != 0; 2607 if (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) || 2608 ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA) || 2609 ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)) 2610 return -EINVAL; 2611 } 2612 2613 return 0; 2614 } 2615 2616 /* 2617 * Checks related to Guest Non-register State 2618 */ 2619 static int nested_check_guest_non_reg_state(struct vmcs12 *vmcs12) 2620 { 2621 if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE && 2622 vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT) 2623 return -EINVAL; 2624 2625 return 0; 2626 } 2627 2628 static int nested_vmx_check_vmentry_prereqs(struct kvm_vcpu *vcpu, 2629 struct vmcs12 *vmcs12) 2630 { 2631 if (nested_check_vm_execution_controls(vcpu, vmcs12) || 2632 nested_check_vm_exit_controls(vcpu, vmcs12) || 2633 nested_check_vm_entry_controls(vcpu, vmcs12)) 2634 return VMXERR_ENTRY_INVALID_CONTROL_FIELD; 2635 2636 if (nested_check_host_control_regs(vcpu, vmcs12)) 2637 return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD; 2638 2639 if (nested_check_guest_non_reg_state(vmcs12)) 2640 return VMXERR_ENTRY_INVALID_CONTROL_FIELD; 2641 2642 return 0; 2643 } 2644 2645 static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu, 2646 struct vmcs12 *vmcs12) 2647 { 2648 int r; 2649 struct page *page; 2650 struct vmcs12 *shadow; 2651 2652 if (vmcs12->vmcs_link_pointer == -1ull) 2653 return 0; 2654 2655 if (!page_address_valid(vcpu, vmcs12->vmcs_link_pointer)) 2656 return -EINVAL; 2657 2658 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->vmcs_link_pointer); 2659 if (is_error_page(page)) 2660 return -EINVAL; 2661 2662 r = 0; 2663 shadow = kmap(page); 2664 if (shadow->hdr.revision_id != VMCS12_REVISION || 2665 shadow->hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12)) 2666 r = -EINVAL; 2667 kunmap(page); 2668 kvm_release_page_clean(page); 2669 return r; 2670 } 2671 2672 static int nested_vmx_check_vmentry_postreqs(struct kvm_vcpu *vcpu, 2673 struct vmcs12 *vmcs12, 2674 u32 *exit_qual) 2675 { 2676 bool ia32e; 2677 2678 *exit_qual = ENTRY_FAIL_DEFAULT; 2679 2680 if (!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0) || 2681 !nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4)) 2682 return 1; 2683 2684 if (nested_vmx_check_vmcs_link_ptr(vcpu, vmcs12)) { 2685 *exit_qual = ENTRY_FAIL_VMCS_LINK_PTR; 2686 return 1; 2687 } 2688 2689 /* 2690 * If the load IA32_EFER VM-entry control is 1, the following checks 2691 * are performed on the field for the IA32_EFER MSR: 2692 * - Bits reserved in the IA32_EFER MSR must be 0. 2693 * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of 2694 * the IA-32e mode guest VM-exit control. It must also be identical 2695 * to bit 8 (LME) if bit 31 in the CR0 field (corresponding to 2696 * CR0.PG) is 1. 2697 */ 2698 if (to_vmx(vcpu)->nested.nested_run_pending && 2699 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) { 2700 ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0; 2701 if (!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer) || 2702 ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA) || 2703 ((vmcs12->guest_cr0 & X86_CR0_PG) && 2704 ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))) 2705 return 1; 2706 } 2707 2708 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) && 2709 (is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu) || 2710 (vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD))) 2711 return 1; 2712 2713 return 0; 2714 } 2715 2716 static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu) 2717 { 2718 struct vcpu_vmx *vmx = to_vmx(vcpu); 2719 unsigned long cr3, cr4; 2720 2721 if (!nested_early_check) 2722 return 0; 2723 2724 if (vmx->msr_autoload.host.nr) 2725 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); 2726 if (vmx->msr_autoload.guest.nr) 2727 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0); 2728 2729 preempt_disable(); 2730 2731 vmx_prepare_switch_to_guest(vcpu); 2732 2733 /* 2734 * Induce a consistency check VMExit by clearing bit 1 in GUEST_RFLAGS, 2735 * which is reserved to '1' by hardware. GUEST_RFLAGS is guaranteed to 2736 * be written (by preparve_vmcs02()) before the "real" VMEnter, i.e. 2737 * there is no need to preserve other bits or save/restore the field. 2738 */ 2739 vmcs_writel(GUEST_RFLAGS, 0); 2740 2741 cr3 = __get_current_cr3_fast(); 2742 if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) { 2743 vmcs_writel(HOST_CR3, cr3); 2744 vmx->loaded_vmcs->host_state.cr3 = cr3; 2745 } 2746 2747 cr4 = cr4_read_shadow(); 2748 if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) { 2749 vmcs_writel(HOST_CR4, cr4); 2750 vmx->loaded_vmcs->host_state.cr4 = cr4; 2751 } 2752 2753 vmx->__launched = vmx->loaded_vmcs->launched; 2754 2755 asm( 2756 /* Set HOST_RSP */ 2757 "sub $%c[wordsize], %%" _ASM_SP "\n\t" /* temporarily adjust RSP for CALL */ 2758 __ex("vmwrite %%" _ASM_SP ", %%" _ASM_DX) "\n\t" 2759 "mov %%" _ASM_SP ", %c[host_rsp](%1)\n\t" 2760 "add $%c[wordsize], %%" _ASM_SP "\n\t" /* un-adjust RSP */ 2761 2762 /* Check if vmlaunch or vmresume is needed */ 2763 "cmpl $0, %c[launched](%% " _ASM_CX")\n\t" 2764 2765 "call vmx_vmenter\n\t" 2766 2767 /* Set vmx->fail accordingly */ 2768 "setbe %c[fail](%% " _ASM_CX")\n\t" 2769 : ASM_CALL_CONSTRAINT 2770 : "c"(vmx), "d"((unsigned long)HOST_RSP), 2771 [launched]"i"(offsetof(struct vcpu_vmx, __launched)), 2772 [fail]"i"(offsetof(struct vcpu_vmx, fail)), 2773 [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)), 2774 [wordsize]"i"(sizeof(ulong)) 2775 : "rax", "cc", "memory" 2776 ); 2777 2778 preempt_enable(); 2779 2780 if (vmx->msr_autoload.host.nr) 2781 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); 2782 if (vmx->msr_autoload.guest.nr) 2783 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); 2784 2785 if (vmx->fail) { 2786 WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) != 2787 VMXERR_ENTRY_INVALID_CONTROL_FIELD); 2788 vmx->fail = 0; 2789 return 1; 2790 } 2791 2792 /* 2793 * VMExit clears RFLAGS.IF and DR7, even on a consistency check. 2794 */ 2795 local_irq_enable(); 2796 if (hw_breakpoint_active()) 2797 set_debugreg(__this_cpu_read(cpu_dr7), 7); 2798 2799 /* 2800 * A non-failing VMEntry means we somehow entered guest mode with 2801 * an illegal RIP, and that's just the tip of the iceberg. There 2802 * is no telling what memory has been modified or what state has 2803 * been exposed to unknown code. Hitting this all but guarantees 2804 * a (very critical) hardware issue. 2805 */ 2806 WARN_ON(!(vmcs_read32(VM_EXIT_REASON) & 2807 VMX_EXIT_REASONS_FAILED_VMENTRY)); 2808 2809 return 0; 2810 } 2811 STACK_FRAME_NON_STANDARD(nested_vmx_check_vmentry_hw); 2812 2813 2814 static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu, 2815 struct vmcs12 *vmcs12); 2816 2817 static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu) 2818 { 2819 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 2820 struct vcpu_vmx *vmx = to_vmx(vcpu); 2821 struct page *page; 2822 u64 hpa; 2823 2824 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { 2825 /* 2826 * Translate L1 physical address to host physical 2827 * address for vmcs02. Keep the page pinned, so this 2828 * physical address remains valid. We keep a reference 2829 * to it so we can release it later. 2830 */ 2831 if (vmx->nested.apic_access_page) { /* shouldn't happen */ 2832 kvm_release_page_dirty(vmx->nested.apic_access_page); 2833 vmx->nested.apic_access_page = NULL; 2834 } 2835 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->apic_access_addr); 2836 /* 2837 * If translation failed, no matter: This feature asks 2838 * to exit when accessing the given address, and if it 2839 * can never be accessed, this feature won't do 2840 * anything anyway. 2841 */ 2842 if (!is_error_page(page)) { 2843 vmx->nested.apic_access_page = page; 2844 hpa = page_to_phys(vmx->nested.apic_access_page); 2845 vmcs_write64(APIC_ACCESS_ADDR, hpa); 2846 } else { 2847 vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, 2848 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES); 2849 } 2850 } 2851 2852 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) { 2853 if (vmx->nested.virtual_apic_page) { /* shouldn't happen */ 2854 kvm_release_page_dirty(vmx->nested.virtual_apic_page); 2855 vmx->nested.virtual_apic_page = NULL; 2856 } 2857 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->virtual_apic_page_addr); 2858 2859 /* 2860 * If translation failed, VM entry will fail because 2861 * prepare_vmcs02 set VIRTUAL_APIC_PAGE_ADDR to -1ull. 2862 * Failing the vm entry is _not_ what the processor 2863 * does but it's basically the only possibility we 2864 * have. We could still enter the guest if CR8 load 2865 * exits are enabled, CR8 store exits are enabled, and 2866 * virtualize APIC access is disabled; in this case 2867 * the processor would never use the TPR shadow and we 2868 * could simply clear the bit from the execution 2869 * control. But such a configuration is useless, so 2870 * let's keep the code simple. 2871 */ 2872 if (!is_error_page(page)) { 2873 vmx->nested.virtual_apic_page = page; 2874 hpa = page_to_phys(vmx->nested.virtual_apic_page); 2875 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, hpa); 2876 } 2877 } 2878 2879 if (nested_cpu_has_posted_intr(vmcs12)) { 2880 if (vmx->nested.pi_desc_page) { /* shouldn't happen */ 2881 kunmap(vmx->nested.pi_desc_page); 2882 kvm_release_page_dirty(vmx->nested.pi_desc_page); 2883 vmx->nested.pi_desc_page = NULL; 2884 vmx->nested.pi_desc = NULL; 2885 vmcs_write64(POSTED_INTR_DESC_ADDR, -1ull); 2886 } 2887 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->posted_intr_desc_addr); 2888 if (is_error_page(page)) 2889 return; 2890 vmx->nested.pi_desc_page = page; 2891 vmx->nested.pi_desc = kmap(vmx->nested.pi_desc_page); 2892 vmx->nested.pi_desc = 2893 (struct pi_desc *)((void *)vmx->nested.pi_desc + 2894 (unsigned long)(vmcs12->posted_intr_desc_addr & 2895 (PAGE_SIZE - 1))); 2896 vmcs_write64(POSTED_INTR_DESC_ADDR, 2897 page_to_phys(vmx->nested.pi_desc_page) + 2898 (unsigned long)(vmcs12->posted_intr_desc_addr & 2899 (PAGE_SIZE - 1))); 2900 } 2901 if (nested_vmx_prepare_msr_bitmap(vcpu, vmcs12)) 2902 vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL, 2903 CPU_BASED_USE_MSR_BITMAPS); 2904 else 2905 vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL, 2906 CPU_BASED_USE_MSR_BITMAPS); 2907 } 2908 2909 /* 2910 * Intel's VMX Instruction Reference specifies a common set of prerequisites 2911 * for running VMX instructions (except VMXON, whose prerequisites are 2912 * slightly different). It also specifies what exception to inject otherwise. 2913 * Note that many of these exceptions have priority over VM exits, so they 2914 * don't have to be checked again here. 2915 */ 2916 static int nested_vmx_check_permission(struct kvm_vcpu *vcpu) 2917 { 2918 if (!to_vmx(vcpu)->nested.vmxon) { 2919 kvm_queue_exception(vcpu, UD_VECTOR); 2920 return 0; 2921 } 2922 2923 if (vmx_get_cpl(vcpu)) { 2924 kvm_inject_gp(vcpu, 0); 2925 return 0; 2926 } 2927 2928 return 1; 2929 } 2930 2931 static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu) 2932 { 2933 u8 rvi = vmx_get_rvi(); 2934 u8 vppr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_PROCPRI); 2935 2936 return ((rvi & 0xf0) > (vppr & 0xf0)); 2937 } 2938 2939 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, 2940 struct vmcs12 *vmcs12); 2941 2942 /* 2943 * If from_vmentry is false, this is being called from state restore (either RSM 2944 * or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume. 2945 + * 2946 + * Returns: 2947 + * 0 - success, i.e. proceed with actual VMEnter 2948 + * 1 - consistency check VMExit 2949 + * -1 - consistency check VMFail 2950 */ 2951 int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry) 2952 { 2953 struct vcpu_vmx *vmx = to_vmx(vcpu); 2954 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 2955 bool evaluate_pending_interrupts; 2956 u32 exit_reason = EXIT_REASON_INVALID_STATE; 2957 u32 exit_qual; 2958 2959 evaluate_pending_interrupts = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) & 2960 (CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING); 2961 if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu)) 2962 evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu); 2963 2964 if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) 2965 vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); 2966 if (kvm_mpx_supported() && 2967 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)) 2968 vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS); 2969 2970 vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02); 2971 2972 prepare_vmcs02_early(vmx, vmcs12); 2973 2974 if (from_vmentry) { 2975 nested_get_vmcs12_pages(vcpu); 2976 2977 if (nested_vmx_check_vmentry_hw(vcpu)) { 2978 vmx_switch_vmcs(vcpu, &vmx->vmcs01); 2979 return -1; 2980 } 2981 2982 if (nested_vmx_check_vmentry_postreqs(vcpu, vmcs12, &exit_qual)) 2983 goto vmentry_fail_vmexit; 2984 } 2985 2986 enter_guest_mode(vcpu); 2987 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) 2988 vcpu->arch.tsc_offset += vmcs12->tsc_offset; 2989 2990 if (prepare_vmcs02(vcpu, vmcs12, &exit_qual)) 2991 goto vmentry_fail_vmexit_guest_mode; 2992 2993 if (from_vmentry) { 2994 exit_reason = EXIT_REASON_MSR_LOAD_FAIL; 2995 exit_qual = nested_vmx_load_msr(vcpu, 2996 vmcs12->vm_entry_msr_load_addr, 2997 vmcs12->vm_entry_msr_load_count); 2998 if (exit_qual) 2999 goto vmentry_fail_vmexit_guest_mode; 3000 } else { 3001 /* 3002 * The MMU is not initialized to point at the right entities yet and 3003 * "get pages" would need to read data from the guest (i.e. we will 3004 * need to perform gpa to hpa translation). Request a call 3005 * to nested_get_vmcs12_pages before the next VM-entry. The MSRs 3006 * have already been set at vmentry time and should not be reset. 3007 */ 3008 kvm_make_request(KVM_REQ_GET_VMCS12_PAGES, vcpu); 3009 } 3010 3011 /* 3012 * If L1 had a pending IRQ/NMI until it executed 3013 * VMLAUNCH/VMRESUME which wasn't delivered because it was 3014 * disallowed (e.g. interrupts disabled), L0 needs to 3015 * evaluate if this pending event should cause an exit from L2 3016 * to L1 or delivered directly to L2 (e.g. In case L1 don't 3017 * intercept EXTERNAL_INTERRUPT). 3018 * 3019 * Usually this would be handled by the processor noticing an 3020 * IRQ/NMI window request, or checking RVI during evaluation of 3021 * pending virtual interrupts. However, this setting was done 3022 * on VMCS01 and now VMCS02 is active instead. Thus, we force L0 3023 * to perform pending event evaluation by requesting a KVM_REQ_EVENT. 3024 */ 3025 if (unlikely(evaluate_pending_interrupts)) 3026 kvm_make_request(KVM_REQ_EVENT, vcpu); 3027 3028 /* 3029 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point 3030 * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet 3031 * returned as far as L1 is concerned. It will only return (and set 3032 * the success flag) when L2 exits (see nested_vmx_vmexit()). 3033 */ 3034 return 0; 3035 3036 /* 3037 * A failed consistency check that leads to a VMExit during L1's 3038 * VMEnter to L2 is a variation of a normal VMexit, as explained in 3039 * 26.7 "VM-entry failures during or after loading guest state". 3040 */ 3041 vmentry_fail_vmexit_guest_mode: 3042 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) 3043 vcpu->arch.tsc_offset -= vmcs12->tsc_offset; 3044 leave_guest_mode(vcpu); 3045 3046 vmentry_fail_vmexit: 3047 vmx_switch_vmcs(vcpu, &vmx->vmcs01); 3048 3049 if (!from_vmentry) 3050 return 1; 3051 3052 load_vmcs12_host_state(vcpu, vmcs12); 3053 vmcs12->vm_exit_reason = exit_reason | VMX_EXIT_REASONS_FAILED_VMENTRY; 3054 vmcs12->exit_qualification = exit_qual; 3055 if (enable_shadow_vmcs || vmx->nested.hv_evmcs) 3056 vmx->nested.need_vmcs12_sync = true; 3057 return 1; 3058 } 3059 3060 /* 3061 * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1 3062 * for running an L2 nested guest. 3063 */ 3064 static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) 3065 { 3066 struct vmcs12 *vmcs12; 3067 struct vcpu_vmx *vmx = to_vmx(vcpu); 3068 u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu); 3069 int ret; 3070 3071 if (!nested_vmx_check_permission(vcpu)) 3072 return 1; 3073 3074 if (!nested_vmx_handle_enlightened_vmptrld(vcpu, true)) 3075 return 1; 3076 3077 if (!vmx->nested.hv_evmcs && vmx->nested.current_vmptr == -1ull) 3078 return nested_vmx_failInvalid(vcpu); 3079 3080 vmcs12 = get_vmcs12(vcpu); 3081 3082 /* 3083 * Can't VMLAUNCH or VMRESUME a shadow VMCS. Despite the fact 3084 * that there *is* a valid VMCS pointer, RFLAGS.CF is set 3085 * rather than RFLAGS.ZF, and no error number is stored to the 3086 * VM-instruction error field. 3087 */ 3088 if (vmcs12->hdr.shadow_vmcs) 3089 return nested_vmx_failInvalid(vcpu); 3090 3091 if (vmx->nested.hv_evmcs) { 3092 copy_enlightened_to_vmcs12(vmx); 3093 /* Enlightened VMCS doesn't have launch state */ 3094 vmcs12->launch_state = !launch; 3095 } else if (enable_shadow_vmcs) { 3096 copy_shadow_to_vmcs12(vmx); 3097 } 3098 3099 /* 3100 * The nested entry process starts with enforcing various prerequisites 3101 * on vmcs12 as required by the Intel SDM, and act appropriately when 3102 * they fail: As the SDM explains, some conditions should cause the 3103 * instruction to fail, while others will cause the instruction to seem 3104 * to succeed, but return an EXIT_REASON_INVALID_STATE. 3105 * To speed up the normal (success) code path, we should avoid checking 3106 * for misconfigurations which will anyway be caught by the processor 3107 * when using the merged vmcs02. 3108 */ 3109 if (interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS) 3110 return nested_vmx_failValid(vcpu, 3111 VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS); 3112 3113 if (vmcs12->launch_state == launch) 3114 return nested_vmx_failValid(vcpu, 3115 launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS 3116 : VMXERR_VMRESUME_NONLAUNCHED_VMCS); 3117 3118 ret = nested_vmx_check_vmentry_prereqs(vcpu, vmcs12); 3119 if (ret) 3120 return nested_vmx_failValid(vcpu, ret); 3121 3122 /* 3123 * We're finally done with prerequisite checking, and can start with 3124 * the nested entry. 3125 */ 3126 vmx->nested.nested_run_pending = 1; 3127 ret = nested_vmx_enter_non_root_mode(vcpu, true); 3128 vmx->nested.nested_run_pending = !ret; 3129 if (ret > 0) 3130 return 1; 3131 else if (ret) 3132 return nested_vmx_failValid(vcpu, 3133 VMXERR_ENTRY_INVALID_CONTROL_FIELD); 3134 3135 /* Hide L1D cache contents from the nested guest. */ 3136 vmx->vcpu.arch.l1tf_flush_l1d = true; 3137 3138 /* 3139 * Must happen outside of nested_vmx_enter_non_root_mode() as it will 3140 * also be used as part of restoring nVMX state for 3141 * snapshot restore (migration). 3142 * 3143 * In this flow, it is assumed that vmcs12 cache was 3144 * trasferred as part of captured nVMX state and should 3145 * therefore not be read from guest memory (which may not 3146 * exist on destination host yet). 3147 */ 3148 nested_cache_shadow_vmcs12(vcpu, vmcs12); 3149 3150 /* 3151 * If we're entering a halted L2 vcpu and the L2 vcpu won't be 3152 * awakened by event injection or by an NMI-window VM-exit or 3153 * by an interrupt-window VM-exit, halt the vcpu. 3154 */ 3155 if ((vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) && 3156 !(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) && 3157 !(vmcs12->cpu_based_vm_exec_control & CPU_BASED_VIRTUAL_NMI_PENDING) && 3158 !((vmcs12->cpu_based_vm_exec_control & CPU_BASED_VIRTUAL_INTR_PENDING) && 3159 (vmcs12->guest_rflags & X86_EFLAGS_IF))) { 3160 vmx->nested.nested_run_pending = 0; 3161 return kvm_vcpu_halt(vcpu); 3162 } 3163 return 1; 3164 } 3165 3166 /* 3167 * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date 3168 * because L2 may have changed some cr0 bits directly (CRO_GUEST_HOST_MASK). 3169 * This function returns the new value we should put in vmcs12.guest_cr0. 3170 * It's not enough to just return the vmcs02 GUEST_CR0. Rather, 3171 * 1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now 3172 * available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0 3173 * didn't trap the bit, because if L1 did, so would L0). 3174 * 2. Bits that L1 asked to trap (and therefore L0 also did) could not have 3175 * been modified by L2, and L1 knows it. So just leave the old value of 3176 * the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0 3177 * isn't relevant, because if L0 traps this bit it can set it to anything. 3178 * 3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have 3179 * changed these bits, and therefore they need to be updated, but L0 3180 * didn't necessarily allow them to be changed in GUEST_CR0 - and rather 3181 * put them in vmcs02 CR0_READ_SHADOW. So take these bits from there. 3182 */ 3183 static inline unsigned long 3184 vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) 3185 { 3186 return 3187 /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) | 3188 /*2*/ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) | 3189 /*3*/ (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask | 3190 vcpu->arch.cr0_guest_owned_bits)); 3191 } 3192 3193 static inline unsigned long 3194 vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) 3195 { 3196 return 3197 /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) | 3198 /*2*/ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) | 3199 /*3*/ (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask | 3200 vcpu->arch.cr4_guest_owned_bits)); 3201 } 3202 3203 static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu, 3204 struct vmcs12 *vmcs12) 3205 { 3206 u32 idt_vectoring; 3207 unsigned int nr; 3208 3209 if (vcpu->arch.exception.injected) { 3210 nr = vcpu->arch.exception.nr; 3211 idt_vectoring = nr | VECTORING_INFO_VALID_MASK; 3212 3213 if (kvm_exception_is_soft(nr)) { 3214 vmcs12->vm_exit_instruction_len = 3215 vcpu->arch.event_exit_inst_len; 3216 idt_vectoring |= INTR_TYPE_SOFT_EXCEPTION; 3217 } else 3218 idt_vectoring |= INTR_TYPE_HARD_EXCEPTION; 3219 3220 if (vcpu->arch.exception.has_error_code) { 3221 idt_vectoring |= VECTORING_INFO_DELIVER_CODE_MASK; 3222 vmcs12->idt_vectoring_error_code = 3223 vcpu->arch.exception.error_code; 3224 } 3225 3226 vmcs12->idt_vectoring_info_field = idt_vectoring; 3227 } else if (vcpu->arch.nmi_injected) { 3228 vmcs12->idt_vectoring_info_field = 3229 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR; 3230 } else if (vcpu->arch.interrupt.injected) { 3231 nr = vcpu->arch.interrupt.nr; 3232 idt_vectoring = nr | VECTORING_INFO_VALID_MASK; 3233 3234 if (vcpu->arch.interrupt.soft) { 3235 idt_vectoring |= INTR_TYPE_SOFT_INTR; 3236 vmcs12->vm_entry_instruction_len = 3237 vcpu->arch.event_exit_inst_len; 3238 } else 3239 idt_vectoring |= INTR_TYPE_EXT_INTR; 3240 3241 vmcs12->idt_vectoring_info_field = idt_vectoring; 3242 } 3243 } 3244 3245 3246 static void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu) 3247 { 3248 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 3249 gfn_t gfn; 3250 3251 /* 3252 * Don't need to mark the APIC access page dirty; it is never 3253 * written to by the CPU during APIC virtualization. 3254 */ 3255 3256 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) { 3257 gfn = vmcs12->virtual_apic_page_addr >> PAGE_SHIFT; 3258 kvm_vcpu_mark_page_dirty(vcpu, gfn); 3259 } 3260 3261 if (nested_cpu_has_posted_intr(vmcs12)) { 3262 gfn = vmcs12->posted_intr_desc_addr >> PAGE_SHIFT; 3263 kvm_vcpu_mark_page_dirty(vcpu, gfn); 3264 } 3265 } 3266 3267 static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu) 3268 { 3269 struct vcpu_vmx *vmx = to_vmx(vcpu); 3270 int max_irr; 3271 void *vapic_page; 3272 u16 status; 3273 3274 if (!vmx->nested.pi_desc || !vmx->nested.pi_pending) 3275 return; 3276 3277 vmx->nested.pi_pending = false; 3278 if (!pi_test_and_clear_on(vmx->nested.pi_desc)) 3279 return; 3280 3281 max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256); 3282 if (max_irr != 256) { 3283 vapic_page = kmap(vmx->nested.virtual_apic_page); 3284 __kvm_apic_update_irr(vmx->nested.pi_desc->pir, 3285 vapic_page, &max_irr); 3286 kunmap(vmx->nested.virtual_apic_page); 3287 3288 status = vmcs_read16(GUEST_INTR_STATUS); 3289 if ((u8)max_irr > ((u8)status & 0xff)) { 3290 status &= ~0xff; 3291 status |= (u8)max_irr; 3292 vmcs_write16(GUEST_INTR_STATUS, status); 3293 } 3294 } 3295 3296 nested_mark_vmcs12_pages_dirty(vcpu); 3297 } 3298 3299 static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu, 3300 unsigned long exit_qual) 3301 { 3302 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 3303 unsigned int nr = vcpu->arch.exception.nr; 3304 u32 intr_info = nr | INTR_INFO_VALID_MASK; 3305 3306 if (vcpu->arch.exception.has_error_code) { 3307 vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code; 3308 intr_info |= INTR_INFO_DELIVER_CODE_MASK; 3309 } 3310 3311 if (kvm_exception_is_soft(nr)) 3312 intr_info |= INTR_TYPE_SOFT_EXCEPTION; 3313 else 3314 intr_info |= INTR_TYPE_HARD_EXCEPTION; 3315 3316 if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) && 3317 vmx_get_nmi_mask(vcpu)) 3318 intr_info |= INTR_INFO_UNBLOCK_NMI; 3319 3320 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual); 3321 } 3322 3323 static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr) 3324 { 3325 struct vcpu_vmx *vmx = to_vmx(vcpu); 3326 unsigned long exit_qual; 3327 bool block_nested_events = 3328 vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu); 3329 3330 if (vcpu->arch.exception.pending && 3331 nested_vmx_check_exception(vcpu, &exit_qual)) { 3332 if (block_nested_events) 3333 return -EBUSY; 3334 nested_vmx_inject_exception_vmexit(vcpu, exit_qual); 3335 return 0; 3336 } 3337 3338 if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) && 3339 vmx->nested.preemption_timer_expired) { 3340 if (block_nested_events) 3341 return -EBUSY; 3342 nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0); 3343 return 0; 3344 } 3345 3346 if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) { 3347 if (block_nested_events) 3348 return -EBUSY; 3349 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, 3350 NMI_VECTOR | INTR_TYPE_NMI_INTR | 3351 INTR_INFO_VALID_MASK, 0); 3352 /* 3353 * The NMI-triggered VM exit counts as injection: 3354 * clear this one and block further NMIs. 3355 */ 3356 vcpu->arch.nmi_pending = 0; 3357 vmx_set_nmi_mask(vcpu, true); 3358 return 0; 3359 } 3360 3361 if ((kvm_cpu_has_interrupt(vcpu) || external_intr) && 3362 nested_exit_on_intr(vcpu)) { 3363 if (block_nested_events) 3364 return -EBUSY; 3365 nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0); 3366 return 0; 3367 } 3368 3369 vmx_complete_nested_posted_interrupt(vcpu); 3370 return 0; 3371 } 3372 3373 static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu) 3374 { 3375 ktime_t remaining = 3376 hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer); 3377 u64 value; 3378 3379 if (ktime_to_ns(remaining) <= 0) 3380 return 0; 3381 3382 value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz; 3383 do_div(value, 1000000); 3384 return value >> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; 3385 } 3386 3387 /* 3388 * Update the guest state fields of vmcs12 to reflect changes that 3389 * occurred while L2 was running. (The "IA-32e mode guest" bit of the 3390 * VM-entry controls is also updated, since this is really a guest 3391 * state bit.) 3392 */ 3393 static void sync_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) 3394 { 3395 vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12); 3396 vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12); 3397 3398 vmcs12->guest_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP); 3399 vmcs12->guest_rip = kvm_register_read(vcpu, VCPU_REGS_RIP); 3400 vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS); 3401 3402 vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR); 3403 vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR); 3404 vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR); 3405 vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR); 3406 vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR); 3407 vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR); 3408 vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR); 3409 vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR); 3410 vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT); 3411 vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT); 3412 vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT); 3413 vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT); 3414 vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT); 3415 vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT); 3416 vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT); 3417 vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT); 3418 vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT); 3419 vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT); 3420 vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES); 3421 vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES); 3422 vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES); 3423 vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES); 3424 vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES); 3425 vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES); 3426 vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES); 3427 vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES); 3428 vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE); 3429 vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE); 3430 vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE); 3431 vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE); 3432 vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE); 3433 vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE); 3434 vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE); 3435 vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE); 3436 vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE); 3437 vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE); 3438 3439 vmcs12->guest_interruptibility_info = 3440 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); 3441 vmcs12->guest_pending_dbg_exceptions = 3442 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS); 3443 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) 3444 vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT; 3445 else 3446 vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE; 3447 3448 if (nested_cpu_has_preemption_timer(vmcs12)) { 3449 if (vmcs12->vm_exit_controls & 3450 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER) 3451 vmcs12->vmx_preemption_timer_value = 3452 vmx_get_preemption_timer_value(vcpu); 3453 hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer); 3454 } 3455 3456 /* 3457 * In some cases (usually, nested EPT), L2 is allowed to change its 3458 * own CR3 without exiting. If it has changed it, we must keep it. 3459 * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined 3460 * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12. 3461 * 3462 * Additionally, restore L2's PDPTR to vmcs12. 3463 */ 3464 if (enable_ept) { 3465 vmcs12->guest_cr3 = vmcs_readl(GUEST_CR3); 3466 vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0); 3467 vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1); 3468 vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2); 3469 vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3); 3470 } 3471 3472 vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS); 3473 3474 if (nested_cpu_has_vid(vmcs12)) 3475 vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS); 3476 3477 vmcs12->vm_entry_controls = 3478 (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) | 3479 (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE); 3480 3481 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) { 3482 kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7); 3483 vmcs12->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); 3484 } 3485 3486 /* TODO: These cannot have changed unless we have MSR bitmaps and 3487 * the relevant bit asks not to trap the change */ 3488 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT) 3489 vmcs12->guest_ia32_pat = vmcs_read64(GUEST_IA32_PAT); 3490 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER) 3491 vmcs12->guest_ia32_efer = vcpu->arch.efer; 3492 vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS); 3493 vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP); 3494 vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP); 3495 if (kvm_mpx_supported()) 3496 vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS); 3497 } 3498 3499 /* 3500 * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits 3501 * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12), 3502 * and this function updates it to reflect the changes to the guest state while 3503 * L2 was running (and perhaps made some exits which were handled directly by L0 3504 * without going back to L1), and to reflect the exit reason. 3505 * Note that we do not have to copy here all VMCS fields, just those that 3506 * could have changed by the L2 guest or the exit - i.e., the guest-state and 3507 * exit-information fields only. Other fields are modified by L1 with VMWRITE, 3508 * which already writes to vmcs12 directly. 3509 */ 3510 static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, 3511 u32 exit_reason, u32 exit_intr_info, 3512 unsigned long exit_qualification) 3513 { 3514 /* update guest state fields: */ 3515 sync_vmcs12(vcpu, vmcs12); 3516 3517 /* update exit information fields: */ 3518 3519 vmcs12->vm_exit_reason = exit_reason; 3520 vmcs12->exit_qualification = exit_qualification; 3521 vmcs12->vm_exit_intr_info = exit_intr_info; 3522 3523 vmcs12->idt_vectoring_info_field = 0; 3524 vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); 3525 vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 3526 3527 if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) { 3528 vmcs12->launch_state = 1; 3529 3530 /* vm_entry_intr_info_field is cleared on exit. Emulate this 3531 * instead of reading the real value. */ 3532 vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK; 3533 3534 /* 3535 * Transfer the event that L0 or L1 may wanted to inject into 3536 * L2 to IDT_VECTORING_INFO_FIELD. 3537 */ 3538 vmcs12_save_pending_event(vcpu, vmcs12); 3539 3540 /* 3541 * According to spec, there's no need to store the guest's 3542 * MSRs if the exit is due to a VM-entry failure that occurs 3543 * during or after loading the guest state. Since this exit 3544 * does not fall in that category, we need to save the MSRs. 3545 */ 3546 if (nested_vmx_store_msr(vcpu, 3547 vmcs12->vm_exit_msr_store_addr, 3548 vmcs12->vm_exit_msr_store_count)) 3549 nested_vmx_abort(vcpu, 3550 VMX_ABORT_SAVE_GUEST_MSR_FAIL); 3551 } 3552 3553 /* 3554 * Drop what we picked up for L2 via vmx_complete_interrupts. It is 3555 * preserved above and would only end up incorrectly in L1. 3556 */ 3557 vcpu->arch.nmi_injected = false; 3558 kvm_clear_exception_queue(vcpu); 3559 kvm_clear_interrupt_queue(vcpu); 3560 } 3561 3562 /* 3563 * A part of what we need to when the nested L2 guest exits and we want to 3564 * run its L1 parent, is to reset L1's guest state to the host state specified 3565 * in vmcs12. 3566 * This function is to be called not only on normal nested exit, but also on 3567 * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry 3568 * Failures During or After Loading Guest State"). 3569 * This function should be called when the active VMCS is L1's (vmcs01). 3570 */ 3571 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, 3572 struct vmcs12 *vmcs12) 3573 { 3574 struct kvm_segment seg; 3575 u32 entry_failure_code; 3576 3577 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) 3578 vcpu->arch.efer = vmcs12->host_ia32_efer; 3579 else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) 3580 vcpu->arch.efer |= (EFER_LMA | EFER_LME); 3581 else 3582 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME); 3583 vmx_set_efer(vcpu, vcpu->arch.efer); 3584 3585 kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->host_rsp); 3586 kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->host_rip); 3587 vmx_set_rflags(vcpu, X86_EFLAGS_FIXED); 3588 vmx_set_interrupt_shadow(vcpu, 0); 3589 3590 /* 3591 * Note that calling vmx_set_cr0 is important, even if cr0 hasn't 3592 * actually changed, because vmx_set_cr0 refers to efer set above. 3593 * 3594 * CR0_GUEST_HOST_MASK is already set in the original vmcs01 3595 * (KVM doesn't change it); 3596 */ 3597 vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS; 3598 vmx_set_cr0(vcpu, vmcs12->host_cr0); 3599 3600 /* Same as above - no reason to call set_cr4_guest_host_mask(). */ 3601 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); 3602 vmx_set_cr4(vcpu, vmcs12->host_cr4); 3603 3604 nested_ept_uninit_mmu_context(vcpu); 3605 3606 /* 3607 * Only PDPTE load can fail as the value of cr3 was checked on entry and 3608 * couldn't have changed. 3609 */ 3610 if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code)) 3611 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL); 3612 3613 if (!enable_ept) 3614 vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault; 3615 3616 /* 3617 * If vmcs01 doesn't use VPID, CPU flushes TLB on every 3618 * VMEntry/VMExit. Thus, no need to flush TLB. 3619 * 3620 * If vmcs12 doesn't use VPID, L1 expects TLB to be 3621 * flushed on every VMEntry/VMExit. 3622 * 3623 * Otherwise, we can preserve TLB entries as long as we are 3624 * able to tag L1 TLB entries differently than L2 TLB entries. 3625 * 3626 * If vmcs12 uses EPT, we need to execute this flush on EPTP01 3627 * and therefore we request the TLB flush to happen only after VMCS EPTP 3628 * has been set by KVM_REQ_LOAD_CR3. 3629 */ 3630 if (enable_vpid && 3631 (!nested_cpu_has_vpid(vmcs12) || !nested_has_guest_tlb_tag(vcpu))) { 3632 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 3633 } 3634 3635 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs); 3636 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp); 3637 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip); 3638 vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base); 3639 vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base); 3640 vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF); 3641 vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF); 3642 3643 /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */ 3644 if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS) 3645 vmcs_write64(GUEST_BNDCFGS, 0); 3646 3647 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) { 3648 vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat); 3649 vcpu->arch.pat = vmcs12->host_ia32_pat; 3650 } 3651 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) 3652 vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL, 3653 vmcs12->host_ia32_perf_global_ctrl); 3654 3655 /* Set L1 segment info according to Intel SDM 3656 27.5.2 Loading Host Segment and Descriptor-Table Registers */ 3657 seg = (struct kvm_segment) { 3658 .base = 0, 3659 .limit = 0xFFFFFFFF, 3660 .selector = vmcs12->host_cs_selector, 3661 .type = 11, 3662 .present = 1, 3663 .s = 1, 3664 .g = 1 3665 }; 3666 if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) 3667 seg.l = 1; 3668 else 3669 seg.db = 1; 3670 vmx_set_segment(vcpu, &seg, VCPU_SREG_CS); 3671 seg = (struct kvm_segment) { 3672 .base = 0, 3673 .limit = 0xFFFFFFFF, 3674 .type = 3, 3675 .present = 1, 3676 .s = 1, 3677 .db = 1, 3678 .g = 1 3679 }; 3680 seg.selector = vmcs12->host_ds_selector; 3681 vmx_set_segment(vcpu, &seg, VCPU_SREG_DS); 3682 seg.selector = vmcs12->host_es_selector; 3683 vmx_set_segment(vcpu, &seg, VCPU_SREG_ES); 3684 seg.selector = vmcs12->host_ss_selector; 3685 vmx_set_segment(vcpu, &seg, VCPU_SREG_SS); 3686 seg.selector = vmcs12->host_fs_selector; 3687 seg.base = vmcs12->host_fs_base; 3688 vmx_set_segment(vcpu, &seg, VCPU_SREG_FS); 3689 seg.selector = vmcs12->host_gs_selector; 3690 seg.base = vmcs12->host_gs_base; 3691 vmx_set_segment(vcpu, &seg, VCPU_SREG_GS); 3692 seg = (struct kvm_segment) { 3693 .base = vmcs12->host_tr_base, 3694 .limit = 0x67, 3695 .selector = vmcs12->host_tr_selector, 3696 .type = 11, 3697 .present = 1 3698 }; 3699 vmx_set_segment(vcpu, &seg, VCPU_SREG_TR); 3700 3701 kvm_set_dr(vcpu, 7, 0x400); 3702 vmcs_write64(GUEST_IA32_DEBUGCTL, 0); 3703 3704 if (cpu_has_vmx_msr_bitmap()) 3705 vmx_update_msr_bitmap(vcpu); 3706 3707 if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr, 3708 vmcs12->vm_exit_msr_load_count)) 3709 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL); 3710 } 3711 3712 static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx) 3713 { 3714 struct shared_msr_entry *efer_msr; 3715 unsigned int i; 3716 3717 if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_EFER) 3718 return vmcs_read64(GUEST_IA32_EFER); 3719 3720 if (cpu_has_load_ia32_efer()) 3721 return host_efer; 3722 3723 for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) { 3724 if (vmx->msr_autoload.guest.val[i].index == MSR_EFER) 3725 return vmx->msr_autoload.guest.val[i].value; 3726 } 3727 3728 efer_msr = find_msr_entry(vmx, MSR_EFER); 3729 if (efer_msr) 3730 return efer_msr->data; 3731 3732 return host_efer; 3733 } 3734 3735 static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu) 3736 { 3737 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 3738 struct vcpu_vmx *vmx = to_vmx(vcpu); 3739 struct vmx_msr_entry g, h; 3740 struct msr_data msr; 3741 gpa_t gpa; 3742 u32 i, j; 3743 3744 vcpu->arch.pat = vmcs_read64(GUEST_IA32_PAT); 3745 3746 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) { 3747 /* 3748 * L1's host DR7 is lost if KVM_GUESTDBG_USE_HW_BP is set 3749 * as vmcs01.GUEST_DR7 contains a userspace defined value 3750 * and vcpu->arch.dr7 is not squirreled away before the 3751 * nested VMENTER (not worth adding a variable in nested_vmx). 3752 */ 3753 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) 3754 kvm_set_dr(vcpu, 7, DR7_FIXED_1); 3755 else 3756 WARN_ON(kvm_set_dr(vcpu, 7, vmcs_readl(GUEST_DR7))); 3757 } 3758 3759 /* 3760 * Note that calling vmx_set_{efer,cr0,cr4} is important as they 3761 * handle a variety of side effects to KVM's software model. 3762 */ 3763 vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx)); 3764 3765 vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS; 3766 vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW)); 3767 3768 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); 3769 vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW)); 3770 3771 nested_ept_uninit_mmu_context(vcpu); 3772 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); 3773 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); 3774 3775 /* 3776 * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs 3777 * from vmcs01 (if necessary). The PDPTRs are not loaded on 3778 * VMFail, like everything else we just need to ensure our 3779 * software model is up-to-date. 3780 */ 3781 ept_save_pdptrs(vcpu); 3782 3783 kvm_mmu_reset_context(vcpu); 3784 3785 if (cpu_has_vmx_msr_bitmap()) 3786 vmx_update_msr_bitmap(vcpu); 3787 3788 /* 3789 * This nasty bit of open coding is a compromise between blindly 3790 * loading L1's MSRs using the exit load lists (incorrect emulation 3791 * of VMFail), leaving the nested VM's MSRs in the software model 3792 * (incorrect behavior) and snapshotting the modified MSRs (too 3793 * expensive since the lists are unbound by hardware). For each 3794 * MSR that was (prematurely) loaded from the nested VMEntry load 3795 * list, reload it from the exit load list if it exists and differs 3796 * from the guest value. The intent is to stuff host state as 3797 * silently as possible, not to fully process the exit load list. 3798 */ 3799 msr.host_initiated = false; 3800 for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) { 3801 gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g)); 3802 if (kvm_vcpu_read_guest(vcpu, gpa, &g, sizeof(g))) { 3803 pr_debug_ratelimited( 3804 "%s read MSR index failed (%u, 0x%08llx)\n", 3805 __func__, i, gpa); 3806 goto vmabort; 3807 } 3808 3809 for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) { 3810 gpa = vmcs12->vm_exit_msr_load_addr + (j * sizeof(h)); 3811 if (kvm_vcpu_read_guest(vcpu, gpa, &h, sizeof(h))) { 3812 pr_debug_ratelimited( 3813 "%s read MSR failed (%u, 0x%08llx)\n", 3814 __func__, j, gpa); 3815 goto vmabort; 3816 } 3817 if (h.index != g.index) 3818 continue; 3819 if (h.value == g.value) 3820 break; 3821 3822 if (nested_vmx_load_msr_check(vcpu, &h)) { 3823 pr_debug_ratelimited( 3824 "%s check failed (%u, 0x%x, 0x%x)\n", 3825 __func__, j, h.index, h.reserved); 3826 goto vmabort; 3827 } 3828 3829 msr.index = h.index; 3830 msr.data = h.value; 3831 if (kvm_set_msr(vcpu, &msr)) { 3832 pr_debug_ratelimited( 3833 "%s WRMSR failed (%u, 0x%x, 0x%llx)\n", 3834 __func__, j, h.index, h.value); 3835 goto vmabort; 3836 } 3837 } 3838 } 3839 3840 return; 3841 3842 vmabort: 3843 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL); 3844 } 3845 3846 /* 3847 * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1 3848 * and modify vmcs12 to make it see what it would expect to see there if 3849 * L2 was its real guest. Must only be called when in L2 (is_guest_mode()) 3850 */ 3851 void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, 3852 u32 exit_intr_info, unsigned long exit_qualification) 3853 { 3854 struct vcpu_vmx *vmx = to_vmx(vcpu); 3855 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 3856 3857 /* trying to cancel vmlaunch/vmresume is a bug */ 3858 WARN_ON_ONCE(vmx->nested.nested_run_pending); 3859 3860 leave_guest_mode(vcpu); 3861 3862 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) 3863 vcpu->arch.tsc_offset -= vmcs12->tsc_offset; 3864 3865 if (likely(!vmx->fail)) { 3866 if (exit_reason == -1) 3867 sync_vmcs12(vcpu, vmcs12); 3868 else 3869 prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info, 3870 exit_qualification); 3871 3872 /* 3873 * Must happen outside of sync_vmcs12() as it will 3874 * also be used to capture vmcs12 cache as part of 3875 * capturing nVMX state for snapshot (migration). 3876 * 3877 * Otherwise, this flush will dirty guest memory at a 3878 * point it is already assumed by user-space to be 3879 * immutable. 3880 */ 3881 nested_flush_cached_shadow_vmcs12(vcpu, vmcs12); 3882 } else { 3883 /* 3884 * The only expected VM-instruction error is "VM entry with 3885 * invalid control field(s)." Anything else indicates a 3886 * problem with L0. And we should never get here with a 3887 * VMFail of any type if early consistency checks are enabled. 3888 */ 3889 WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) != 3890 VMXERR_ENTRY_INVALID_CONTROL_FIELD); 3891 WARN_ON_ONCE(nested_early_check); 3892 } 3893 3894 vmx_switch_vmcs(vcpu, &vmx->vmcs01); 3895 3896 /* Update any VMCS fields that might have changed while L2 ran */ 3897 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); 3898 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); 3899 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); 3900 3901 if (kvm_has_tsc_control) 3902 decache_tsc_multiplier(vmx); 3903 3904 if (vmx->nested.change_vmcs01_virtual_apic_mode) { 3905 vmx->nested.change_vmcs01_virtual_apic_mode = false; 3906 vmx_set_virtual_apic_mode(vcpu); 3907 } else if (!nested_cpu_has_ept(vmcs12) && 3908 nested_cpu_has2(vmcs12, 3909 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { 3910 vmx_flush_tlb(vcpu, true); 3911 } 3912 3913 /* This is needed for same reason as it was needed in prepare_vmcs02 */ 3914 vmx->host_rsp = 0; 3915 3916 /* Unpin physical memory we referred to in vmcs02 */ 3917 if (vmx->nested.apic_access_page) { 3918 kvm_release_page_dirty(vmx->nested.apic_access_page); 3919 vmx->nested.apic_access_page = NULL; 3920 } 3921 if (vmx->nested.virtual_apic_page) { 3922 kvm_release_page_dirty(vmx->nested.virtual_apic_page); 3923 vmx->nested.virtual_apic_page = NULL; 3924 } 3925 if (vmx->nested.pi_desc_page) { 3926 kunmap(vmx->nested.pi_desc_page); 3927 kvm_release_page_dirty(vmx->nested.pi_desc_page); 3928 vmx->nested.pi_desc_page = NULL; 3929 vmx->nested.pi_desc = NULL; 3930 } 3931 3932 /* 3933 * We are now running in L2, mmu_notifier will force to reload the 3934 * page's hpa for L2 vmcs. Need to reload it for L1 before entering L1. 3935 */ 3936 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); 3937 3938 if ((exit_reason != -1) && (enable_shadow_vmcs || vmx->nested.hv_evmcs)) 3939 vmx->nested.need_vmcs12_sync = true; 3940 3941 /* in case we halted in L2 */ 3942 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 3943 3944 if (likely(!vmx->fail)) { 3945 /* 3946 * TODO: SDM says that with acknowledge interrupt on 3947 * exit, bit 31 of the VM-exit interrupt information 3948 * (valid interrupt) is always set to 1 on 3949 * EXIT_REASON_EXTERNAL_INTERRUPT, so we shouldn't 3950 * need kvm_cpu_has_interrupt(). See the commit 3951 * message for details. 3952 */ 3953 if (nested_exit_intr_ack_set(vcpu) && 3954 exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT && 3955 kvm_cpu_has_interrupt(vcpu)) { 3956 int irq = kvm_cpu_get_interrupt(vcpu); 3957 WARN_ON(irq < 0); 3958 vmcs12->vm_exit_intr_info = irq | 3959 INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR; 3960 } 3961 3962 if (exit_reason != -1) 3963 trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason, 3964 vmcs12->exit_qualification, 3965 vmcs12->idt_vectoring_info_field, 3966 vmcs12->vm_exit_intr_info, 3967 vmcs12->vm_exit_intr_error_code, 3968 KVM_ISA_VMX); 3969 3970 load_vmcs12_host_state(vcpu, vmcs12); 3971 3972 return; 3973 } 3974 3975 /* 3976 * After an early L2 VM-entry failure, we're now back 3977 * in L1 which thinks it just finished a VMLAUNCH or 3978 * VMRESUME instruction, so we need to set the failure 3979 * flag and the VM-instruction error field of the VMCS 3980 * accordingly, and skip the emulated instruction. 3981 */ 3982 (void)nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); 3983 3984 /* 3985 * Restore L1's host state to KVM's software model. We're here 3986 * because a consistency check was caught by hardware, which 3987 * means some amount of guest state has been propagated to KVM's 3988 * model and needs to be unwound to the host's state. 3989 */ 3990 nested_vmx_restore_host_state(vcpu); 3991 3992 vmx->fail = 0; 3993 } 3994 3995 /* 3996 * Decode the memory-address operand of a vmx instruction, as recorded on an 3997 * exit caused by such an instruction (run by a guest hypervisor). 3998 * On success, returns 0. When the operand is invalid, returns 1 and throws 3999 * #UD or #GP. 4000 */ 4001 int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification, 4002 u32 vmx_instruction_info, bool wr, gva_t *ret) 4003 { 4004 gva_t off; 4005 bool exn; 4006 struct kvm_segment s; 4007 4008 /* 4009 * According to Vol. 3B, "Information for VM Exits Due to Instruction 4010 * Execution", on an exit, vmx_instruction_info holds most of the 4011 * addressing components of the operand. Only the displacement part 4012 * is put in exit_qualification (see 3B, "Basic VM-Exit Information"). 4013 * For how an actual address is calculated from all these components, 4014 * refer to Vol. 1, "Operand Addressing". 4015 */ 4016 int scaling = vmx_instruction_info & 3; 4017 int addr_size = (vmx_instruction_info >> 7) & 7; 4018 bool is_reg = vmx_instruction_info & (1u << 10); 4019 int seg_reg = (vmx_instruction_info >> 15) & 7; 4020 int index_reg = (vmx_instruction_info >> 18) & 0xf; 4021 bool index_is_valid = !(vmx_instruction_info & (1u << 22)); 4022 int base_reg = (vmx_instruction_info >> 23) & 0xf; 4023 bool base_is_valid = !(vmx_instruction_info & (1u << 27)); 4024 4025 if (is_reg) { 4026 kvm_queue_exception(vcpu, UD_VECTOR); 4027 return 1; 4028 } 4029 4030 /* Addr = segment_base + offset */ 4031 /* offset = base + [index * scale] + displacement */ 4032 off = exit_qualification; /* holds the displacement */ 4033 if (base_is_valid) 4034 off += kvm_register_read(vcpu, base_reg); 4035 if (index_is_valid) 4036 off += kvm_register_read(vcpu, index_reg)<<scaling; 4037 vmx_get_segment(vcpu, &s, seg_reg); 4038 *ret = s.base + off; 4039 4040 if (addr_size == 1) /* 32 bit */ 4041 *ret &= 0xffffffff; 4042 4043 /* Checks for #GP/#SS exceptions. */ 4044 exn = false; 4045 if (is_long_mode(vcpu)) { 4046 /* Long mode: #GP(0)/#SS(0) if the memory address is in a 4047 * non-canonical form. This is the only check on the memory 4048 * destination for long mode! 4049 */ 4050 exn = is_noncanonical_address(*ret, vcpu); 4051 } else if (is_protmode(vcpu)) { 4052 /* Protected mode: apply checks for segment validity in the 4053 * following order: 4054 * - segment type check (#GP(0) may be thrown) 4055 * - usability check (#GP(0)/#SS(0)) 4056 * - limit check (#GP(0)/#SS(0)) 4057 */ 4058 if (wr) 4059 /* #GP(0) if the destination operand is located in a 4060 * read-only data segment or any code segment. 4061 */ 4062 exn = ((s.type & 0xa) == 0 || (s.type & 8)); 4063 else 4064 /* #GP(0) if the source operand is located in an 4065 * execute-only code segment 4066 */ 4067 exn = ((s.type & 0xa) == 8); 4068 if (exn) { 4069 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); 4070 return 1; 4071 } 4072 /* Protected mode: #GP(0)/#SS(0) if the segment is unusable. 4073 */ 4074 exn = (s.unusable != 0); 4075 /* Protected mode: #GP(0)/#SS(0) if the memory 4076 * operand is outside the segment limit. 4077 */ 4078 exn = exn || (off + sizeof(u64) > s.limit); 4079 } 4080 if (exn) { 4081 kvm_queue_exception_e(vcpu, 4082 seg_reg == VCPU_SREG_SS ? 4083 SS_VECTOR : GP_VECTOR, 4084 0); 4085 return 1; 4086 } 4087 4088 return 0; 4089 } 4090 4091 static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer) 4092 { 4093 gva_t gva; 4094 struct x86_exception e; 4095 4096 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), 4097 vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva)) 4098 return 1; 4099 4100 if (kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e)) { 4101 kvm_inject_page_fault(vcpu, &e); 4102 return 1; 4103 } 4104 4105 return 0; 4106 } 4107 4108 /* 4109 * Allocate a shadow VMCS and associate it with the currently loaded 4110 * VMCS, unless such a shadow VMCS already exists. The newly allocated 4111 * VMCS is also VMCLEARed, so that it is ready for use. 4112 */ 4113 static struct vmcs *alloc_shadow_vmcs(struct kvm_vcpu *vcpu) 4114 { 4115 struct vcpu_vmx *vmx = to_vmx(vcpu); 4116 struct loaded_vmcs *loaded_vmcs = vmx->loaded_vmcs; 4117 4118 /* 4119 * We should allocate a shadow vmcs for vmcs01 only when L1 4120 * executes VMXON and free it when L1 executes VMXOFF. 4121 * As it is invalid to execute VMXON twice, we shouldn't reach 4122 * here when vmcs01 already have an allocated shadow vmcs. 4123 */ 4124 WARN_ON(loaded_vmcs == &vmx->vmcs01 && loaded_vmcs->shadow_vmcs); 4125 4126 if (!loaded_vmcs->shadow_vmcs) { 4127 loaded_vmcs->shadow_vmcs = alloc_vmcs(true); 4128 if (loaded_vmcs->shadow_vmcs) 4129 vmcs_clear(loaded_vmcs->shadow_vmcs); 4130 } 4131 return loaded_vmcs->shadow_vmcs; 4132 } 4133 4134 static int enter_vmx_operation(struct kvm_vcpu *vcpu) 4135 { 4136 struct vcpu_vmx *vmx = to_vmx(vcpu); 4137 int r; 4138 4139 r = alloc_loaded_vmcs(&vmx->nested.vmcs02); 4140 if (r < 0) 4141 goto out_vmcs02; 4142 4143 vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL); 4144 if (!vmx->nested.cached_vmcs12) 4145 goto out_cached_vmcs12; 4146 4147 vmx->nested.cached_shadow_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL); 4148 if (!vmx->nested.cached_shadow_vmcs12) 4149 goto out_cached_shadow_vmcs12; 4150 4151 if (enable_shadow_vmcs && !alloc_shadow_vmcs(vcpu)) 4152 goto out_shadow_vmcs; 4153 4154 hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC, 4155 HRTIMER_MODE_REL_PINNED); 4156 vmx->nested.preemption_timer.function = vmx_preemption_timer_fn; 4157 4158 vmx->nested.vpid02 = allocate_vpid(); 4159 4160 vmx->nested.vmcs02_initialized = false; 4161 vmx->nested.vmxon = true; 4162 4163 if (pt_mode == PT_MODE_HOST_GUEST) { 4164 vmx->pt_desc.guest.ctl = 0; 4165 pt_update_intercept_for_msr(vmx); 4166 } 4167 4168 return 0; 4169 4170 out_shadow_vmcs: 4171 kfree(vmx->nested.cached_shadow_vmcs12); 4172 4173 out_cached_shadow_vmcs12: 4174 kfree(vmx->nested.cached_vmcs12); 4175 4176 out_cached_vmcs12: 4177 free_loaded_vmcs(&vmx->nested.vmcs02); 4178 4179 out_vmcs02: 4180 return -ENOMEM; 4181 } 4182 4183 /* 4184 * Emulate the VMXON instruction. 4185 * Currently, we just remember that VMX is active, and do not save or even 4186 * inspect the argument to VMXON (the so-called "VMXON pointer") because we 4187 * do not currently need to store anything in that guest-allocated memory 4188 * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their 4189 * argument is different from the VMXON pointer (which the spec says they do). 4190 */ 4191 static int handle_vmon(struct kvm_vcpu *vcpu) 4192 { 4193 int ret; 4194 gpa_t vmptr; 4195 struct page *page; 4196 struct vcpu_vmx *vmx = to_vmx(vcpu); 4197 const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED 4198 | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; 4199 4200 /* 4201 * The Intel VMX Instruction Reference lists a bunch of bits that are 4202 * prerequisite to running VMXON, most notably cr4.VMXE must be set to 4203 * 1 (see vmx_set_cr4() for when we allow the guest to set this). 4204 * Otherwise, we should fail with #UD. But most faulting conditions 4205 * have already been checked by hardware, prior to the VM-exit for 4206 * VMXON. We do test guest cr4.VMXE because processor CR4 always has 4207 * that bit set to 1 in non-root mode. 4208 */ 4209 if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) { 4210 kvm_queue_exception(vcpu, UD_VECTOR); 4211 return 1; 4212 } 4213 4214 /* CPL=0 must be checked manually. */ 4215 if (vmx_get_cpl(vcpu)) { 4216 kvm_inject_gp(vcpu, 0); 4217 return 1; 4218 } 4219 4220 if (vmx->nested.vmxon) 4221 return nested_vmx_failValid(vcpu, 4222 VMXERR_VMXON_IN_VMX_ROOT_OPERATION); 4223 4224 if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES) 4225 != VMXON_NEEDED_FEATURES) { 4226 kvm_inject_gp(vcpu, 0); 4227 return 1; 4228 } 4229 4230 if (nested_vmx_get_vmptr(vcpu, &vmptr)) 4231 return 1; 4232 4233 /* 4234 * SDM 3: 24.11.5 4235 * The first 4 bytes of VMXON region contain the supported 4236 * VMCS revision identifier 4237 * 4238 * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case; 4239 * which replaces physical address width with 32 4240 */ 4241 if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) 4242 return nested_vmx_failInvalid(vcpu); 4243 4244 page = kvm_vcpu_gpa_to_page(vcpu, vmptr); 4245 if (is_error_page(page)) 4246 return nested_vmx_failInvalid(vcpu); 4247 4248 if (*(u32 *)kmap(page) != VMCS12_REVISION) { 4249 kunmap(page); 4250 kvm_release_page_clean(page); 4251 return nested_vmx_failInvalid(vcpu); 4252 } 4253 kunmap(page); 4254 kvm_release_page_clean(page); 4255 4256 vmx->nested.vmxon_ptr = vmptr; 4257 ret = enter_vmx_operation(vcpu); 4258 if (ret) 4259 return ret; 4260 4261 return nested_vmx_succeed(vcpu); 4262 } 4263 4264 static inline void nested_release_vmcs12(struct kvm_vcpu *vcpu) 4265 { 4266 struct vcpu_vmx *vmx = to_vmx(vcpu); 4267 4268 if (vmx->nested.current_vmptr == -1ull) 4269 return; 4270 4271 if (enable_shadow_vmcs) { 4272 /* copy to memory all shadowed fields in case 4273 they were modified */ 4274 copy_shadow_to_vmcs12(vmx); 4275 vmx->nested.need_vmcs12_sync = false; 4276 vmx_disable_shadow_vmcs(vmx); 4277 } 4278 vmx->nested.posted_intr_nv = -1; 4279 4280 /* Flush VMCS12 to guest memory */ 4281 kvm_vcpu_write_guest_page(vcpu, 4282 vmx->nested.current_vmptr >> PAGE_SHIFT, 4283 vmx->nested.cached_vmcs12, 0, VMCS12_SIZE); 4284 4285 kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); 4286 4287 vmx->nested.current_vmptr = -1ull; 4288 } 4289 4290 /* Emulate the VMXOFF instruction */ 4291 static int handle_vmoff(struct kvm_vcpu *vcpu) 4292 { 4293 if (!nested_vmx_check_permission(vcpu)) 4294 return 1; 4295 free_nested(vcpu); 4296 return nested_vmx_succeed(vcpu); 4297 } 4298 4299 /* Emulate the VMCLEAR instruction */ 4300 static int handle_vmclear(struct kvm_vcpu *vcpu) 4301 { 4302 struct vcpu_vmx *vmx = to_vmx(vcpu); 4303 u32 zero = 0; 4304 gpa_t vmptr; 4305 4306 if (!nested_vmx_check_permission(vcpu)) 4307 return 1; 4308 4309 if (nested_vmx_get_vmptr(vcpu, &vmptr)) 4310 return 1; 4311 4312 if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) 4313 return nested_vmx_failValid(vcpu, 4314 VMXERR_VMCLEAR_INVALID_ADDRESS); 4315 4316 if (vmptr == vmx->nested.vmxon_ptr) 4317 return nested_vmx_failValid(vcpu, 4318 VMXERR_VMCLEAR_VMXON_POINTER); 4319 4320 if (vmx->nested.hv_evmcs_page) { 4321 if (vmptr == vmx->nested.hv_evmcs_vmptr) 4322 nested_release_evmcs(vcpu); 4323 } else { 4324 if (vmptr == vmx->nested.current_vmptr) 4325 nested_release_vmcs12(vcpu); 4326 4327 kvm_vcpu_write_guest(vcpu, 4328 vmptr + offsetof(struct vmcs12, 4329 launch_state), 4330 &zero, sizeof(zero)); 4331 } 4332 4333 return nested_vmx_succeed(vcpu); 4334 } 4335 4336 static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch); 4337 4338 /* Emulate the VMLAUNCH instruction */ 4339 static int handle_vmlaunch(struct kvm_vcpu *vcpu) 4340 { 4341 return nested_vmx_run(vcpu, true); 4342 } 4343 4344 /* Emulate the VMRESUME instruction */ 4345 static int handle_vmresume(struct kvm_vcpu *vcpu) 4346 { 4347 4348 return nested_vmx_run(vcpu, false); 4349 } 4350 4351 static int handle_vmread(struct kvm_vcpu *vcpu) 4352 { 4353 unsigned long field; 4354 u64 field_value; 4355 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 4356 u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 4357 gva_t gva = 0; 4358 struct vmcs12 *vmcs12; 4359 4360 if (!nested_vmx_check_permission(vcpu)) 4361 return 1; 4362 4363 if (to_vmx(vcpu)->nested.current_vmptr == -1ull) 4364 return nested_vmx_failInvalid(vcpu); 4365 4366 if (!is_guest_mode(vcpu)) 4367 vmcs12 = get_vmcs12(vcpu); 4368 else { 4369 /* 4370 * When vmcs->vmcs_link_pointer is -1ull, any VMREAD 4371 * to shadowed-field sets the ALU flags for VMfailInvalid. 4372 */ 4373 if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull) 4374 return nested_vmx_failInvalid(vcpu); 4375 vmcs12 = get_shadow_vmcs12(vcpu); 4376 } 4377 4378 /* Decode instruction info and find the field to read */ 4379 field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); 4380 /* Read the field, zero-extended to a u64 field_value */ 4381 if (vmcs12_read_any(vmcs12, field, &field_value) < 0) 4382 return nested_vmx_failValid(vcpu, 4383 VMXERR_UNSUPPORTED_VMCS_COMPONENT); 4384 4385 /* 4386 * Now copy part of this value to register or memory, as requested. 4387 * Note that the number of bits actually copied is 32 or 64 depending 4388 * on the guest's mode (32 or 64 bit), not on the given field's length. 4389 */ 4390 if (vmx_instruction_info & (1u << 10)) { 4391 kvm_register_writel(vcpu, (((vmx_instruction_info) >> 3) & 0xf), 4392 field_value); 4393 } else { 4394 if (get_vmx_mem_address(vcpu, exit_qualification, 4395 vmx_instruction_info, true, &gva)) 4396 return 1; 4397 /* _system ok, nested_vmx_check_permission has verified cpl=0 */ 4398 kvm_write_guest_virt_system(vcpu, gva, &field_value, 4399 (is_long_mode(vcpu) ? 8 : 4), NULL); 4400 } 4401 4402 return nested_vmx_succeed(vcpu); 4403 } 4404 4405 4406 static int handle_vmwrite(struct kvm_vcpu *vcpu) 4407 { 4408 unsigned long field; 4409 gva_t gva; 4410 struct vcpu_vmx *vmx = to_vmx(vcpu); 4411 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 4412 u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 4413 4414 /* The value to write might be 32 or 64 bits, depending on L1's long 4415 * mode, and eventually we need to write that into a field of several 4416 * possible lengths. The code below first zero-extends the value to 64 4417 * bit (field_value), and then copies only the appropriate number of 4418 * bits into the vmcs12 field. 4419 */ 4420 u64 field_value = 0; 4421 struct x86_exception e; 4422 struct vmcs12 *vmcs12; 4423 4424 if (!nested_vmx_check_permission(vcpu)) 4425 return 1; 4426 4427 if (vmx->nested.current_vmptr == -1ull) 4428 return nested_vmx_failInvalid(vcpu); 4429 4430 if (vmx_instruction_info & (1u << 10)) 4431 field_value = kvm_register_readl(vcpu, 4432 (((vmx_instruction_info) >> 3) & 0xf)); 4433 else { 4434 if (get_vmx_mem_address(vcpu, exit_qualification, 4435 vmx_instruction_info, false, &gva)) 4436 return 1; 4437 if (kvm_read_guest_virt(vcpu, gva, &field_value, 4438 (is_64_bit_mode(vcpu) ? 8 : 4), &e)) { 4439 kvm_inject_page_fault(vcpu, &e); 4440 return 1; 4441 } 4442 } 4443 4444 4445 field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); 4446 /* 4447 * If the vCPU supports "VMWRITE to any supported field in the 4448 * VMCS," then the "read-only" fields are actually read/write. 4449 */ 4450 if (vmcs_field_readonly(field) && 4451 !nested_cpu_has_vmwrite_any_field(vcpu)) 4452 return nested_vmx_failValid(vcpu, 4453 VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT); 4454 4455 if (!is_guest_mode(vcpu)) 4456 vmcs12 = get_vmcs12(vcpu); 4457 else { 4458 /* 4459 * When vmcs->vmcs_link_pointer is -1ull, any VMWRITE 4460 * to shadowed-field sets the ALU flags for VMfailInvalid. 4461 */ 4462 if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull) 4463 return nested_vmx_failInvalid(vcpu); 4464 vmcs12 = get_shadow_vmcs12(vcpu); 4465 } 4466 4467 if (vmcs12_write_any(vmcs12, field, field_value) < 0) 4468 return nested_vmx_failValid(vcpu, 4469 VMXERR_UNSUPPORTED_VMCS_COMPONENT); 4470 4471 /* 4472 * Do not track vmcs12 dirty-state if in guest-mode 4473 * as we actually dirty shadow vmcs12 instead of vmcs12. 4474 */ 4475 if (!is_guest_mode(vcpu)) { 4476 switch (field) { 4477 #define SHADOW_FIELD_RW(x) case x: 4478 #include "vmcs_shadow_fields.h" 4479 /* 4480 * The fields that can be updated by L1 without a vmexit are 4481 * always updated in the vmcs02, the others go down the slow 4482 * path of prepare_vmcs02. 4483 */ 4484 break; 4485 default: 4486 vmx->nested.dirty_vmcs12 = true; 4487 break; 4488 } 4489 } 4490 4491 return nested_vmx_succeed(vcpu); 4492 } 4493 4494 static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr) 4495 { 4496 vmx->nested.current_vmptr = vmptr; 4497 if (enable_shadow_vmcs) { 4498 vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL, 4499 SECONDARY_EXEC_SHADOW_VMCS); 4500 vmcs_write64(VMCS_LINK_POINTER, 4501 __pa(vmx->vmcs01.shadow_vmcs)); 4502 vmx->nested.need_vmcs12_sync = true; 4503 } 4504 vmx->nested.dirty_vmcs12 = true; 4505 } 4506 4507 /* Emulate the VMPTRLD instruction */ 4508 static int handle_vmptrld(struct kvm_vcpu *vcpu) 4509 { 4510 struct vcpu_vmx *vmx = to_vmx(vcpu); 4511 gpa_t vmptr; 4512 4513 if (!nested_vmx_check_permission(vcpu)) 4514 return 1; 4515 4516 if (nested_vmx_get_vmptr(vcpu, &vmptr)) 4517 return 1; 4518 4519 if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) 4520 return nested_vmx_failValid(vcpu, 4521 VMXERR_VMPTRLD_INVALID_ADDRESS); 4522 4523 if (vmptr == vmx->nested.vmxon_ptr) 4524 return nested_vmx_failValid(vcpu, 4525 VMXERR_VMPTRLD_VMXON_POINTER); 4526 4527 /* Forbid normal VMPTRLD if Enlightened version was used */ 4528 if (vmx->nested.hv_evmcs) 4529 return 1; 4530 4531 if (vmx->nested.current_vmptr != vmptr) { 4532 struct vmcs12 *new_vmcs12; 4533 struct page *page; 4534 4535 page = kvm_vcpu_gpa_to_page(vcpu, vmptr); 4536 if (is_error_page(page)) { 4537 /* 4538 * Reads from an unbacked page return all 1s, 4539 * which means that the 32 bits located at the 4540 * given physical address won't match the required 4541 * VMCS12_REVISION identifier. 4542 */ 4543 nested_vmx_failValid(vcpu, 4544 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); 4545 return kvm_skip_emulated_instruction(vcpu); 4546 } 4547 new_vmcs12 = kmap(page); 4548 if (new_vmcs12->hdr.revision_id != VMCS12_REVISION || 4549 (new_vmcs12->hdr.shadow_vmcs && 4550 !nested_cpu_has_vmx_shadow_vmcs(vcpu))) { 4551 kunmap(page); 4552 kvm_release_page_clean(page); 4553 return nested_vmx_failValid(vcpu, 4554 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); 4555 } 4556 4557 nested_release_vmcs12(vcpu); 4558 4559 /* 4560 * Load VMCS12 from guest memory since it is not already 4561 * cached. 4562 */ 4563 memcpy(vmx->nested.cached_vmcs12, new_vmcs12, VMCS12_SIZE); 4564 kunmap(page); 4565 kvm_release_page_clean(page); 4566 4567 set_current_vmptr(vmx, vmptr); 4568 } 4569 4570 return nested_vmx_succeed(vcpu); 4571 } 4572 4573 /* Emulate the VMPTRST instruction */ 4574 static int handle_vmptrst(struct kvm_vcpu *vcpu) 4575 { 4576 unsigned long exit_qual = vmcs_readl(EXIT_QUALIFICATION); 4577 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO); 4578 gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr; 4579 struct x86_exception e; 4580 gva_t gva; 4581 4582 if (!nested_vmx_check_permission(vcpu)) 4583 return 1; 4584 4585 if (unlikely(to_vmx(vcpu)->nested.hv_evmcs)) 4586 return 1; 4587 4588 if (get_vmx_mem_address(vcpu, exit_qual, instr_info, true, &gva)) 4589 return 1; 4590 /* *_system ok, nested_vmx_check_permission has verified cpl=0 */ 4591 if (kvm_write_guest_virt_system(vcpu, gva, (void *)¤t_vmptr, 4592 sizeof(gpa_t), &e)) { 4593 kvm_inject_page_fault(vcpu, &e); 4594 return 1; 4595 } 4596 return nested_vmx_succeed(vcpu); 4597 } 4598 4599 /* Emulate the INVEPT instruction */ 4600 static int handle_invept(struct kvm_vcpu *vcpu) 4601 { 4602 struct vcpu_vmx *vmx = to_vmx(vcpu); 4603 u32 vmx_instruction_info, types; 4604 unsigned long type; 4605 gva_t gva; 4606 struct x86_exception e; 4607 struct { 4608 u64 eptp, gpa; 4609 } operand; 4610 4611 if (!(vmx->nested.msrs.secondary_ctls_high & 4612 SECONDARY_EXEC_ENABLE_EPT) || 4613 !(vmx->nested.msrs.ept_caps & VMX_EPT_INVEPT_BIT)) { 4614 kvm_queue_exception(vcpu, UD_VECTOR); 4615 return 1; 4616 } 4617 4618 if (!nested_vmx_check_permission(vcpu)) 4619 return 1; 4620 4621 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 4622 type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); 4623 4624 types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; 4625 4626 if (type >= 32 || !(types & (1 << type))) 4627 return nested_vmx_failValid(vcpu, 4628 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 4629 4630 /* According to the Intel VMX instruction reference, the memory 4631 * operand is read even if it isn't needed (e.g., for type==global) 4632 */ 4633 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), 4634 vmx_instruction_info, false, &gva)) 4635 return 1; 4636 if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) { 4637 kvm_inject_page_fault(vcpu, &e); 4638 return 1; 4639 } 4640 4641 switch (type) { 4642 case VMX_EPT_EXTENT_GLOBAL: 4643 /* 4644 * TODO: track mappings and invalidate 4645 * single context requests appropriately 4646 */ 4647 case VMX_EPT_EXTENT_CONTEXT: 4648 kvm_mmu_sync_roots(vcpu); 4649 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 4650 break; 4651 default: 4652 BUG_ON(1); 4653 break; 4654 } 4655 4656 return nested_vmx_succeed(vcpu); 4657 } 4658 4659 static int handle_invvpid(struct kvm_vcpu *vcpu) 4660 { 4661 struct vcpu_vmx *vmx = to_vmx(vcpu); 4662 u32 vmx_instruction_info; 4663 unsigned long type, types; 4664 gva_t gva; 4665 struct x86_exception e; 4666 struct { 4667 u64 vpid; 4668 u64 gla; 4669 } operand; 4670 u16 vpid02; 4671 4672 if (!(vmx->nested.msrs.secondary_ctls_high & 4673 SECONDARY_EXEC_ENABLE_VPID) || 4674 !(vmx->nested.msrs.vpid_caps & VMX_VPID_INVVPID_BIT)) { 4675 kvm_queue_exception(vcpu, UD_VECTOR); 4676 return 1; 4677 } 4678 4679 if (!nested_vmx_check_permission(vcpu)) 4680 return 1; 4681 4682 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 4683 type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); 4684 4685 types = (vmx->nested.msrs.vpid_caps & 4686 VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8; 4687 4688 if (type >= 32 || !(types & (1 << type))) 4689 return nested_vmx_failValid(vcpu, 4690 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 4691 4692 /* according to the intel vmx instruction reference, the memory 4693 * operand is read even if it isn't needed (e.g., for type==global) 4694 */ 4695 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), 4696 vmx_instruction_info, false, &gva)) 4697 return 1; 4698 if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) { 4699 kvm_inject_page_fault(vcpu, &e); 4700 return 1; 4701 } 4702 if (operand.vpid >> 16) 4703 return nested_vmx_failValid(vcpu, 4704 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 4705 4706 vpid02 = nested_get_vpid02(vcpu); 4707 switch (type) { 4708 case VMX_VPID_EXTENT_INDIVIDUAL_ADDR: 4709 if (!operand.vpid || 4710 is_noncanonical_address(operand.gla, vcpu)) 4711 return nested_vmx_failValid(vcpu, 4712 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 4713 if (cpu_has_vmx_invvpid_individual_addr()) { 4714 __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR, 4715 vpid02, operand.gla); 4716 } else 4717 __vmx_flush_tlb(vcpu, vpid02, false); 4718 break; 4719 case VMX_VPID_EXTENT_SINGLE_CONTEXT: 4720 case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL: 4721 if (!operand.vpid) 4722 return nested_vmx_failValid(vcpu, 4723 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 4724 __vmx_flush_tlb(vcpu, vpid02, false); 4725 break; 4726 case VMX_VPID_EXTENT_ALL_CONTEXT: 4727 __vmx_flush_tlb(vcpu, vpid02, false); 4728 break; 4729 default: 4730 WARN_ON_ONCE(1); 4731 return kvm_skip_emulated_instruction(vcpu); 4732 } 4733 4734 return nested_vmx_succeed(vcpu); 4735 } 4736 4737 static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu, 4738 struct vmcs12 *vmcs12) 4739 { 4740 u32 index = vcpu->arch.regs[VCPU_REGS_RCX]; 4741 u64 address; 4742 bool accessed_dirty; 4743 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 4744 4745 if (!nested_cpu_has_eptp_switching(vmcs12) || 4746 !nested_cpu_has_ept(vmcs12)) 4747 return 1; 4748 4749 if (index >= VMFUNC_EPTP_ENTRIES) 4750 return 1; 4751 4752 4753 if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT, 4754 &address, index * 8, 8)) 4755 return 1; 4756 4757 accessed_dirty = !!(address & VMX_EPTP_AD_ENABLE_BIT); 4758 4759 /* 4760 * If the (L2) guest does a vmfunc to the currently 4761 * active ept pointer, we don't have to do anything else 4762 */ 4763 if (vmcs12->ept_pointer != address) { 4764 if (!valid_ept_address(vcpu, address)) 4765 return 1; 4766 4767 kvm_mmu_unload(vcpu); 4768 mmu->ept_ad = accessed_dirty; 4769 mmu->mmu_role.base.ad_disabled = !accessed_dirty; 4770 vmcs12->ept_pointer = address; 4771 /* 4772 * TODO: Check what's the correct approach in case 4773 * mmu reload fails. Currently, we just let the next 4774 * reload potentially fail 4775 */ 4776 kvm_mmu_reload(vcpu); 4777 } 4778 4779 return 0; 4780 } 4781 4782 static int handle_vmfunc(struct kvm_vcpu *vcpu) 4783 { 4784 struct vcpu_vmx *vmx = to_vmx(vcpu); 4785 struct vmcs12 *vmcs12; 4786 u32 function = vcpu->arch.regs[VCPU_REGS_RAX]; 4787 4788 /* 4789 * VMFUNC is only supported for nested guests, but we always enable the 4790 * secondary control for simplicity; for non-nested mode, fake that we 4791 * didn't by injecting #UD. 4792 */ 4793 if (!is_guest_mode(vcpu)) { 4794 kvm_queue_exception(vcpu, UD_VECTOR); 4795 return 1; 4796 } 4797 4798 vmcs12 = get_vmcs12(vcpu); 4799 if ((vmcs12->vm_function_control & (1 << function)) == 0) 4800 goto fail; 4801 4802 switch (function) { 4803 case 0: 4804 if (nested_vmx_eptp_switching(vcpu, vmcs12)) 4805 goto fail; 4806 break; 4807 default: 4808 goto fail; 4809 } 4810 return kvm_skip_emulated_instruction(vcpu); 4811 4812 fail: 4813 nested_vmx_vmexit(vcpu, vmx->exit_reason, 4814 vmcs_read32(VM_EXIT_INTR_INFO), 4815 vmcs_readl(EXIT_QUALIFICATION)); 4816 return 1; 4817 } 4818 4819 4820 static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu, 4821 struct vmcs12 *vmcs12) 4822 { 4823 unsigned long exit_qualification; 4824 gpa_t bitmap, last_bitmap; 4825 unsigned int port; 4826 int size; 4827 u8 b; 4828 4829 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) 4830 return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING); 4831 4832 exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 4833 4834 port = exit_qualification >> 16; 4835 size = (exit_qualification & 7) + 1; 4836 4837 last_bitmap = (gpa_t)-1; 4838 b = -1; 4839 4840 while (size > 0) { 4841 if (port < 0x8000) 4842 bitmap = vmcs12->io_bitmap_a; 4843 else if (port < 0x10000) 4844 bitmap = vmcs12->io_bitmap_b; 4845 else 4846 return true; 4847 bitmap += (port & 0x7fff) / 8; 4848 4849 if (last_bitmap != bitmap) 4850 if (kvm_vcpu_read_guest(vcpu, bitmap, &b, 1)) 4851 return true; 4852 if (b & (1 << (port & 7))) 4853 return true; 4854 4855 port++; 4856 size--; 4857 last_bitmap = bitmap; 4858 } 4859 4860 return false; 4861 } 4862 4863 /* 4864 * Return 1 if we should exit from L2 to L1 to handle an MSR access access, 4865 * rather than handle it ourselves in L0. I.e., check whether L1 expressed 4866 * disinterest in the current event (read or write a specific MSR) by using an 4867 * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps. 4868 */ 4869 static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu, 4870 struct vmcs12 *vmcs12, u32 exit_reason) 4871 { 4872 u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX]; 4873 gpa_t bitmap; 4874 4875 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) 4876 return true; 4877 4878 /* 4879 * The MSR_BITMAP page is divided into four 1024-byte bitmaps, 4880 * for the four combinations of read/write and low/high MSR numbers. 4881 * First we need to figure out which of the four to use: 4882 */ 4883 bitmap = vmcs12->msr_bitmap; 4884 if (exit_reason == EXIT_REASON_MSR_WRITE) 4885 bitmap += 2048; 4886 if (msr_index >= 0xc0000000) { 4887 msr_index -= 0xc0000000; 4888 bitmap += 1024; 4889 } 4890 4891 /* Then read the msr_index'th bit from this bitmap: */ 4892 if (msr_index < 1024*8) { 4893 unsigned char b; 4894 if (kvm_vcpu_read_guest(vcpu, bitmap + msr_index/8, &b, 1)) 4895 return true; 4896 return 1 & (b >> (msr_index & 7)); 4897 } else 4898 return true; /* let L1 handle the wrong parameter */ 4899 } 4900 4901 /* 4902 * Return 1 if we should exit from L2 to L1 to handle a CR access exit, 4903 * rather than handle it ourselves in L0. I.e., check if L1 wanted to 4904 * intercept (via guest_host_mask etc.) the current event. 4905 */ 4906 static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, 4907 struct vmcs12 *vmcs12) 4908 { 4909 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 4910 int cr = exit_qualification & 15; 4911 int reg; 4912 unsigned long val; 4913 4914 switch ((exit_qualification >> 4) & 3) { 4915 case 0: /* mov to cr */ 4916 reg = (exit_qualification >> 8) & 15; 4917 val = kvm_register_readl(vcpu, reg); 4918 switch (cr) { 4919 case 0: 4920 if (vmcs12->cr0_guest_host_mask & 4921 (val ^ vmcs12->cr0_read_shadow)) 4922 return true; 4923 break; 4924 case 3: 4925 if ((vmcs12->cr3_target_count >= 1 && 4926 vmcs12->cr3_target_value0 == val) || 4927 (vmcs12->cr3_target_count >= 2 && 4928 vmcs12->cr3_target_value1 == val) || 4929 (vmcs12->cr3_target_count >= 3 && 4930 vmcs12->cr3_target_value2 == val) || 4931 (vmcs12->cr3_target_count >= 4 && 4932 vmcs12->cr3_target_value3 == val)) 4933 return false; 4934 if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING)) 4935 return true; 4936 break; 4937 case 4: 4938 if (vmcs12->cr4_guest_host_mask & 4939 (vmcs12->cr4_read_shadow ^ val)) 4940 return true; 4941 break; 4942 case 8: 4943 if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING)) 4944 return true; 4945 break; 4946 } 4947 break; 4948 case 2: /* clts */ 4949 if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) && 4950 (vmcs12->cr0_read_shadow & X86_CR0_TS)) 4951 return true; 4952 break; 4953 case 1: /* mov from cr */ 4954 switch (cr) { 4955 case 3: 4956 if (vmcs12->cpu_based_vm_exec_control & 4957 CPU_BASED_CR3_STORE_EXITING) 4958 return true; 4959 break; 4960 case 8: 4961 if (vmcs12->cpu_based_vm_exec_control & 4962 CPU_BASED_CR8_STORE_EXITING) 4963 return true; 4964 break; 4965 } 4966 break; 4967 case 3: /* lmsw */ 4968 /* 4969 * lmsw can change bits 1..3 of cr0, and only set bit 0 of 4970 * cr0. Other attempted changes are ignored, with no exit. 4971 */ 4972 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f; 4973 if (vmcs12->cr0_guest_host_mask & 0xe & 4974 (val ^ vmcs12->cr0_read_shadow)) 4975 return true; 4976 if ((vmcs12->cr0_guest_host_mask & 0x1) && 4977 !(vmcs12->cr0_read_shadow & 0x1) && 4978 (val & 0x1)) 4979 return true; 4980 break; 4981 } 4982 return false; 4983 } 4984 4985 static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu, 4986 struct vmcs12 *vmcs12, gpa_t bitmap) 4987 { 4988 u32 vmx_instruction_info; 4989 unsigned long field; 4990 u8 b; 4991 4992 if (!nested_cpu_has_shadow_vmcs(vmcs12)) 4993 return true; 4994 4995 /* Decode instruction info and find the field to access */ 4996 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 4997 field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); 4998 4999 /* Out-of-range fields always cause a VM exit from L2 to L1 */ 5000 if (field >> 15) 5001 return true; 5002 5003 if (kvm_vcpu_read_guest(vcpu, bitmap + field/8, &b, 1)) 5004 return true; 5005 5006 return 1 & (b >> (field & 7)); 5007 } 5008 5009 /* 5010 * Return 1 if we should exit from L2 to L1 to handle an exit, or 0 if we 5011 * should handle it ourselves in L0 (and then continue L2). Only call this 5012 * when in is_guest_mode (L2). 5013 */ 5014 bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason) 5015 { 5016 u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO); 5017 struct vcpu_vmx *vmx = to_vmx(vcpu); 5018 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 5019 5020 if (vmx->nested.nested_run_pending) 5021 return false; 5022 5023 if (unlikely(vmx->fail)) { 5024 pr_info_ratelimited("%s failed vm entry %x\n", __func__, 5025 vmcs_read32(VM_INSTRUCTION_ERROR)); 5026 return true; 5027 } 5028 5029 /* 5030 * The host physical addresses of some pages of guest memory 5031 * are loaded into the vmcs02 (e.g. vmcs12's Virtual APIC 5032 * Page). The CPU may write to these pages via their host 5033 * physical address while L2 is running, bypassing any 5034 * address-translation-based dirty tracking (e.g. EPT write 5035 * protection). 5036 * 5037 * Mark them dirty on every exit from L2 to prevent them from 5038 * getting out of sync with dirty tracking. 5039 */ 5040 nested_mark_vmcs12_pages_dirty(vcpu); 5041 5042 trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason, 5043 vmcs_readl(EXIT_QUALIFICATION), 5044 vmx->idt_vectoring_info, 5045 intr_info, 5046 vmcs_read32(VM_EXIT_INTR_ERROR_CODE), 5047 KVM_ISA_VMX); 5048 5049 switch (exit_reason) { 5050 case EXIT_REASON_EXCEPTION_NMI: 5051 if (is_nmi(intr_info)) 5052 return false; 5053 else if (is_page_fault(intr_info)) 5054 return !vmx->vcpu.arch.apf.host_apf_reason && enable_ept; 5055 else if (is_debug(intr_info) && 5056 vcpu->guest_debug & 5057 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) 5058 return false; 5059 else if (is_breakpoint(intr_info) && 5060 vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) 5061 return false; 5062 return vmcs12->exception_bitmap & 5063 (1u << (intr_info & INTR_INFO_VECTOR_MASK)); 5064 case EXIT_REASON_EXTERNAL_INTERRUPT: 5065 return false; 5066 case EXIT_REASON_TRIPLE_FAULT: 5067 return true; 5068 case EXIT_REASON_PENDING_INTERRUPT: 5069 return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_INTR_PENDING); 5070 case EXIT_REASON_NMI_WINDOW: 5071 return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING); 5072 case EXIT_REASON_TASK_SWITCH: 5073 return true; 5074 case EXIT_REASON_CPUID: 5075 return true; 5076 case EXIT_REASON_HLT: 5077 return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING); 5078 case EXIT_REASON_INVD: 5079 return true; 5080 case EXIT_REASON_INVLPG: 5081 return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING); 5082 case EXIT_REASON_RDPMC: 5083 return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING); 5084 case EXIT_REASON_RDRAND: 5085 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDRAND_EXITING); 5086 case EXIT_REASON_RDSEED: 5087 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDSEED_EXITING); 5088 case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP: 5089 return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING); 5090 case EXIT_REASON_VMREAD: 5091 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12, 5092 vmcs12->vmread_bitmap); 5093 case EXIT_REASON_VMWRITE: 5094 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12, 5095 vmcs12->vmwrite_bitmap); 5096 case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR: 5097 case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD: 5098 case EXIT_REASON_VMPTRST: case EXIT_REASON_VMRESUME: 5099 case EXIT_REASON_VMOFF: case EXIT_REASON_VMON: 5100 case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID: 5101 /* 5102 * VMX instructions trap unconditionally. This allows L1 to 5103 * emulate them for its L2 guest, i.e., allows 3-level nesting! 5104 */ 5105 return true; 5106 case EXIT_REASON_CR_ACCESS: 5107 return nested_vmx_exit_handled_cr(vcpu, vmcs12); 5108 case EXIT_REASON_DR_ACCESS: 5109 return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING); 5110 case EXIT_REASON_IO_INSTRUCTION: 5111 return nested_vmx_exit_handled_io(vcpu, vmcs12); 5112 case EXIT_REASON_GDTR_IDTR: case EXIT_REASON_LDTR_TR: 5113 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC); 5114 case EXIT_REASON_MSR_READ: 5115 case EXIT_REASON_MSR_WRITE: 5116 return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason); 5117 case EXIT_REASON_INVALID_STATE: 5118 return true; 5119 case EXIT_REASON_MWAIT_INSTRUCTION: 5120 return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING); 5121 case EXIT_REASON_MONITOR_TRAP_FLAG: 5122 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_TRAP_FLAG); 5123 case EXIT_REASON_MONITOR_INSTRUCTION: 5124 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING); 5125 case EXIT_REASON_PAUSE_INSTRUCTION: 5126 return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) || 5127 nested_cpu_has2(vmcs12, 5128 SECONDARY_EXEC_PAUSE_LOOP_EXITING); 5129 case EXIT_REASON_MCE_DURING_VMENTRY: 5130 return false; 5131 case EXIT_REASON_TPR_BELOW_THRESHOLD: 5132 return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW); 5133 case EXIT_REASON_APIC_ACCESS: 5134 case EXIT_REASON_APIC_WRITE: 5135 case EXIT_REASON_EOI_INDUCED: 5136 /* 5137 * The controls for "virtualize APIC accesses," "APIC- 5138 * register virtualization," and "virtual-interrupt 5139 * delivery" only come from vmcs12. 5140 */ 5141 return true; 5142 case EXIT_REASON_EPT_VIOLATION: 5143 /* 5144 * L0 always deals with the EPT violation. If nested EPT is 5145 * used, and the nested mmu code discovers that the address is 5146 * missing in the guest EPT table (EPT12), the EPT violation 5147 * will be injected with nested_ept_inject_page_fault() 5148 */ 5149 return false; 5150 case EXIT_REASON_EPT_MISCONFIG: 5151 /* 5152 * L2 never uses directly L1's EPT, but rather L0's own EPT 5153 * table (shadow on EPT) or a merged EPT table that L0 built 5154 * (EPT on EPT). So any problems with the structure of the 5155 * table is L0's fault. 5156 */ 5157 return false; 5158 case EXIT_REASON_INVPCID: 5159 return 5160 nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_INVPCID) && 5161 nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING); 5162 case EXIT_REASON_WBINVD: 5163 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING); 5164 case EXIT_REASON_XSETBV: 5165 return true; 5166 case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS: 5167 /* 5168 * This should never happen, since it is not possible to 5169 * set XSS to a non-zero value---neither in L1 nor in L2. 5170 * If if it were, XSS would have to be checked against 5171 * the XSS exit bitmap in vmcs12. 5172 */ 5173 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES); 5174 case EXIT_REASON_PREEMPTION_TIMER: 5175 return false; 5176 case EXIT_REASON_PML_FULL: 5177 /* We emulate PML support to L1. */ 5178 return false; 5179 case EXIT_REASON_VMFUNC: 5180 /* VM functions are emulated through L2->L0 vmexits. */ 5181 return false; 5182 case EXIT_REASON_ENCLS: 5183 /* SGX is never exposed to L1 */ 5184 return false; 5185 default: 5186 return true; 5187 } 5188 } 5189 5190 5191 static int vmx_get_nested_state(struct kvm_vcpu *vcpu, 5192 struct kvm_nested_state __user *user_kvm_nested_state, 5193 u32 user_data_size) 5194 { 5195 struct vcpu_vmx *vmx; 5196 struct vmcs12 *vmcs12; 5197 struct kvm_nested_state kvm_state = { 5198 .flags = 0, 5199 .format = 0, 5200 .size = sizeof(kvm_state), 5201 .vmx.vmxon_pa = -1ull, 5202 .vmx.vmcs_pa = -1ull, 5203 }; 5204 5205 if (!vcpu) 5206 return kvm_state.size + 2 * VMCS12_SIZE; 5207 5208 vmx = to_vmx(vcpu); 5209 vmcs12 = get_vmcs12(vcpu); 5210 5211 if (nested_vmx_allowed(vcpu) && vmx->nested.enlightened_vmcs_enabled) 5212 kvm_state.flags |= KVM_STATE_NESTED_EVMCS; 5213 5214 if (nested_vmx_allowed(vcpu) && 5215 (vmx->nested.vmxon || vmx->nested.smm.vmxon)) { 5216 kvm_state.vmx.vmxon_pa = vmx->nested.vmxon_ptr; 5217 kvm_state.vmx.vmcs_pa = vmx->nested.current_vmptr; 5218 5219 if (vmx_has_valid_vmcs12(vcpu)) { 5220 kvm_state.size += VMCS12_SIZE; 5221 5222 if (is_guest_mode(vcpu) && 5223 nested_cpu_has_shadow_vmcs(vmcs12) && 5224 vmcs12->vmcs_link_pointer != -1ull) 5225 kvm_state.size += VMCS12_SIZE; 5226 } 5227 5228 if (vmx->nested.smm.vmxon) 5229 kvm_state.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON; 5230 5231 if (vmx->nested.smm.guest_mode) 5232 kvm_state.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE; 5233 5234 if (is_guest_mode(vcpu)) { 5235 kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE; 5236 5237 if (vmx->nested.nested_run_pending) 5238 kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING; 5239 } 5240 } 5241 5242 if (user_data_size < kvm_state.size) 5243 goto out; 5244 5245 if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state))) 5246 return -EFAULT; 5247 5248 if (!vmx_has_valid_vmcs12(vcpu)) 5249 goto out; 5250 5251 /* 5252 * When running L2, the authoritative vmcs12 state is in the 5253 * vmcs02. When running L1, the authoritative vmcs12 state is 5254 * in the shadow or enlightened vmcs linked to vmcs01, unless 5255 * need_vmcs12_sync is set, in which case, the authoritative 5256 * vmcs12 state is in the vmcs12 already. 5257 */ 5258 if (is_guest_mode(vcpu)) { 5259 sync_vmcs12(vcpu, vmcs12); 5260 } else if (!vmx->nested.need_vmcs12_sync) { 5261 if (vmx->nested.hv_evmcs) 5262 copy_enlightened_to_vmcs12(vmx); 5263 else if (enable_shadow_vmcs) 5264 copy_shadow_to_vmcs12(vmx); 5265 } 5266 5267 if (copy_to_user(user_kvm_nested_state->data, vmcs12, sizeof(*vmcs12))) 5268 return -EFAULT; 5269 5270 if (nested_cpu_has_shadow_vmcs(vmcs12) && 5271 vmcs12->vmcs_link_pointer != -1ull) { 5272 if (copy_to_user(user_kvm_nested_state->data + VMCS12_SIZE, 5273 get_shadow_vmcs12(vcpu), sizeof(*vmcs12))) 5274 return -EFAULT; 5275 } 5276 5277 out: 5278 return kvm_state.size; 5279 } 5280 5281 /* 5282 * Forcibly leave nested mode in order to be able to reset the VCPU later on. 5283 */ 5284 void vmx_leave_nested(struct kvm_vcpu *vcpu) 5285 { 5286 if (is_guest_mode(vcpu)) { 5287 to_vmx(vcpu)->nested.nested_run_pending = 0; 5288 nested_vmx_vmexit(vcpu, -1, 0, 0); 5289 } 5290 free_nested(vcpu); 5291 } 5292 5293 static int vmx_set_nested_state(struct kvm_vcpu *vcpu, 5294 struct kvm_nested_state __user *user_kvm_nested_state, 5295 struct kvm_nested_state *kvm_state) 5296 { 5297 struct vcpu_vmx *vmx = to_vmx(vcpu); 5298 struct vmcs12 *vmcs12; 5299 u32 exit_qual; 5300 int ret; 5301 5302 if (kvm_state->format != 0) 5303 return -EINVAL; 5304 5305 if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) 5306 nested_enable_evmcs(vcpu, NULL); 5307 5308 if (!nested_vmx_allowed(vcpu)) 5309 return kvm_state->vmx.vmxon_pa == -1ull ? 0 : -EINVAL; 5310 5311 if (kvm_state->vmx.vmxon_pa == -1ull) { 5312 if (kvm_state->vmx.smm.flags) 5313 return -EINVAL; 5314 5315 if (kvm_state->vmx.vmcs_pa != -1ull) 5316 return -EINVAL; 5317 5318 vmx_leave_nested(vcpu); 5319 return 0; 5320 } 5321 5322 if (!page_address_valid(vcpu, kvm_state->vmx.vmxon_pa)) 5323 return -EINVAL; 5324 5325 if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && 5326 (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) 5327 return -EINVAL; 5328 5329 if (kvm_state->vmx.smm.flags & 5330 ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON)) 5331 return -EINVAL; 5332 5333 /* 5334 * SMM temporarily disables VMX, so we cannot be in guest mode, 5335 * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags 5336 * must be zero. 5337 */ 5338 if (is_smm(vcpu) ? kvm_state->flags : kvm_state->vmx.smm.flags) 5339 return -EINVAL; 5340 5341 if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && 5342 !(kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON)) 5343 return -EINVAL; 5344 5345 vmx_leave_nested(vcpu); 5346 if (kvm_state->vmx.vmxon_pa == -1ull) 5347 return 0; 5348 5349 vmx->nested.vmxon_ptr = kvm_state->vmx.vmxon_pa; 5350 ret = enter_vmx_operation(vcpu); 5351 if (ret) 5352 return ret; 5353 5354 /* Empty 'VMXON' state is permitted */ 5355 if (kvm_state->size < sizeof(kvm_state) + sizeof(*vmcs12)) 5356 return 0; 5357 5358 if (kvm_state->vmx.vmcs_pa != -1ull) { 5359 if (kvm_state->vmx.vmcs_pa == kvm_state->vmx.vmxon_pa || 5360 !page_address_valid(vcpu, kvm_state->vmx.vmcs_pa)) 5361 return -EINVAL; 5362 5363 set_current_vmptr(vmx, kvm_state->vmx.vmcs_pa); 5364 } else if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) { 5365 /* 5366 * Sync eVMCS upon entry as we may not have 5367 * HV_X64_MSR_VP_ASSIST_PAGE set up yet. 5368 */ 5369 vmx->nested.need_vmcs12_sync = true; 5370 } else { 5371 return -EINVAL; 5372 } 5373 5374 if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) { 5375 vmx->nested.smm.vmxon = true; 5376 vmx->nested.vmxon = false; 5377 5378 if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) 5379 vmx->nested.smm.guest_mode = true; 5380 } 5381 5382 vmcs12 = get_vmcs12(vcpu); 5383 if (copy_from_user(vmcs12, user_kvm_nested_state->data, sizeof(*vmcs12))) 5384 return -EFAULT; 5385 5386 if (vmcs12->hdr.revision_id != VMCS12_REVISION) 5387 return -EINVAL; 5388 5389 if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) 5390 return 0; 5391 5392 vmx->nested.nested_run_pending = 5393 !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING); 5394 5395 if (nested_cpu_has_shadow_vmcs(vmcs12) && 5396 vmcs12->vmcs_link_pointer != -1ull) { 5397 struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu); 5398 5399 if (kvm_state->size < sizeof(kvm_state) + 2 * sizeof(*vmcs12)) 5400 return -EINVAL; 5401 5402 if (copy_from_user(shadow_vmcs12, 5403 user_kvm_nested_state->data + VMCS12_SIZE, 5404 sizeof(*vmcs12))) 5405 return -EFAULT; 5406 5407 if (shadow_vmcs12->hdr.revision_id != VMCS12_REVISION || 5408 !shadow_vmcs12->hdr.shadow_vmcs) 5409 return -EINVAL; 5410 } 5411 5412 if (nested_vmx_check_vmentry_prereqs(vcpu, vmcs12) || 5413 nested_vmx_check_vmentry_postreqs(vcpu, vmcs12, &exit_qual)) 5414 return -EINVAL; 5415 5416 vmx->nested.dirty_vmcs12 = true; 5417 ret = nested_vmx_enter_non_root_mode(vcpu, false); 5418 if (ret) 5419 return -EINVAL; 5420 5421 return 0; 5422 } 5423 5424 void nested_vmx_vcpu_setup(void) 5425 { 5426 if (enable_shadow_vmcs) { 5427 /* 5428 * At vCPU creation, "VMWRITE to any supported field 5429 * in the VMCS" is supported, so use the more 5430 * permissive vmx_vmread_bitmap to specify both read 5431 * and write permissions for the shadow VMCS. 5432 */ 5433 vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap)); 5434 vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmread_bitmap)); 5435 } 5436 } 5437 5438 /* 5439 * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be 5440 * returned for the various VMX controls MSRs when nested VMX is enabled. 5441 * The same values should also be used to verify that vmcs12 control fields are 5442 * valid during nested entry from L1 to L2. 5443 * Each of these control msrs has a low and high 32-bit half: A low bit is on 5444 * if the corresponding bit in the (32-bit) control field *must* be on, and a 5445 * bit in the high half is on if the corresponding bit in the control field 5446 * may be on. See also vmx_control_verify(). 5447 */ 5448 void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps, 5449 bool apicv) 5450 { 5451 /* 5452 * Note that as a general rule, the high half of the MSRs (bits in 5453 * the control fields which may be 1) should be initialized by the 5454 * intersection of the underlying hardware's MSR (i.e., features which 5455 * can be supported) and the list of features we want to expose - 5456 * because they are known to be properly supported in our code. 5457 * Also, usually, the low half of the MSRs (bits which must be 1) can 5458 * be set to 0, meaning that L1 may turn off any of these bits. The 5459 * reason is that if one of these bits is necessary, it will appear 5460 * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control 5461 * fields of vmcs01 and vmcs02, will turn these bits off - and 5462 * nested_vmx_exit_reflected() will not pass related exits to L1. 5463 * These rules have exceptions below. 5464 */ 5465 5466 /* pin-based controls */ 5467 rdmsr(MSR_IA32_VMX_PINBASED_CTLS, 5468 msrs->pinbased_ctls_low, 5469 msrs->pinbased_ctls_high); 5470 msrs->pinbased_ctls_low |= 5471 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR; 5472 msrs->pinbased_ctls_high &= 5473 PIN_BASED_EXT_INTR_MASK | 5474 PIN_BASED_NMI_EXITING | 5475 PIN_BASED_VIRTUAL_NMIS | 5476 (apicv ? PIN_BASED_POSTED_INTR : 0); 5477 msrs->pinbased_ctls_high |= 5478 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR | 5479 PIN_BASED_VMX_PREEMPTION_TIMER; 5480 5481 /* exit controls */ 5482 rdmsr(MSR_IA32_VMX_EXIT_CTLS, 5483 msrs->exit_ctls_low, 5484 msrs->exit_ctls_high); 5485 msrs->exit_ctls_low = 5486 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR; 5487 5488 msrs->exit_ctls_high &= 5489 #ifdef CONFIG_X86_64 5490 VM_EXIT_HOST_ADDR_SPACE_SIZE | 5491 #endif 5492 VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT; 5493 msrs->exit_ctls_high |= 5494 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR | 5495 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER | 5496 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT; 5497 5498 /* We support free control of debug control saving. */ 5499 msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS; 5500 5501 /* entry controls */ 5502 rdmsr(MSR_IA32_VMX_ENTRY_CTLS, 5503 msrs->entry_ctls_low, 5504 msrs->entry_ctls_high); 5505 msrs->entry_ctls_low = 5506 VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR; 5507 msrs->entry_ctls_high &= 5508 #ifdef CONFIG_X86_64 5509 VM_ENTRY_IA32E_MODE | 5510 #endif 5511 VM_ENTRY_LOAD_IA32_PAT; 5512 msrs->entry_ctls_high |= 5513 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER); 5514 5515 /* We support free control of debug control loading. */ 5516 msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS; 5517 5518 /* cpu-based controls */ 5519 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, 5520 msrs->procbased_ctls_low, 5521 msrs->procbased_ctls_high); 5522 msrs->procbased_ctls_low = 5523 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR; 5524 msrs->procbased_ctls_high &= 5525 CPU_BASED_VIRTUAL_INTR_PENDING | 5526 CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING | 5527 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING | 5528 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING | 5529 CPU_BASED_CR3_STORE_EXITING | 5530 #ifdef CONFIG_X86_64 5531 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING | 5532 #endif 5533 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING | 5534 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG | 5535 CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING | 5536 CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING | 5537 CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; 5538 /* 5539 * We can allow some features even when not supported by the 5540 * hardware. For example, L1 can specify an MSR bitmap - and we 5541 * can use it to avoid exits to L1 - even when L0 runs L2 5542 * without MSR bitmaps. 5543 */ 5544 msrs->procbased_ctls_high |= 5545 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR | 5546 CPU_BASED_USE_MSR_BITMAPS; 5547 5548 /* We support free control of CR3 access interception. */ 5549 msrs->procbased_ctls_low &= 5550 ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING); 5551 5552 /* 5553 * secondary cpu-based controls. Do not include those that 5554 * depend on CPUID bits, they are added later by vmx_cpuid_update. 5555 */ 5556 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, 5557 msrs->secondary_ctls_low, 5558 msrs->secondary_ctls_high); 5559 msrs->secondary_ctls_low = 0; 5560 msrs->secondary_ctls_high &= 5561 SECONDARY_EXEC_DESC | 5562 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 5563 SECONDARY_EXEC_APIC_REGISTER_VIRT | 5564 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 5565 SECONDARY_EXEC_WBINVD_EXITING; 5566 5567 /* 5568 * We can emulate "VMCS shadowing," even if the hardware 5569 * doesn't support it. 5570 */ 5571 msrs->secondary_ctls_high |= 5572 SECONDARY_EXEC_SHADOW_VMCS; 5573 5574 if (enable_ept) { 5575 /* nested EPT: emulate EPT also to L1 */ 5576 msrs->secondary_ctls_high |= 5577 SECONDARY_EXEC_ENABLE_EPT; 5578 msrs->ept_caps = VMX_EPT_PAGE_WALK_4_BIT | 5579 VMX_EPTP_WB_BIT | VMX_EPT_INVEPT_BIT; 5580 if (cpu_has_vmx_ept_execute_only()) 5581 msrs->ept_caps |= 5582 VMX_EPT_EXECUTE_ONLY_BIT; 5583 msrs->ept_caps &= ept_caps; 5584 msrs->ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT | 5585 VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT | 5586 VMX_EPT_1GB_PAGE_BIT; 5587 if (enable_ept_ad_bits) { 5588 msrs->secondary_ctls_high |= 5589 SECONDARY_EXEC_ENABLE_PML; 5590 msrs->ept_caps |= VMX_EPT_AD_BIT; 5591 } 5592 } 5593 5594 if (cpu_has_vmx_vmfunc()) { 5595 msrs->secondary_ctls_high |= 5596 SECONDARY_EXEC_ENABLE_VMFUNC; 5597 /* 5598 * Advertise EPTP switching unconditionally 5599 * since we emulate it 5600 */ 5601 if (enable_ept) 5602 msrs->vmfunc_controls = 5603 VMX_VMFUNC_EPTP_SWITCHING; 5604 } 5605 5606 /* 5607 * Old versions of KVM use the single-context version without 5608 * checking for support, so declare that it is supported even 5609 * though it is treated as global context. The alternative is 5610 * not failing the single-context invvpid, and it is worse. 5611 */ 5612 if (enable_vpid) { 5613 msrs->secondary_ctls_high |= 5614 SECONDARY_EXEC_ENABLE_VPID; 5615 msrs->vpid_caps = VMX_VPID_INVVPID_BIT | 5616 VMX_VPID_EXTENT_SUPPORTED_MASK; 5617 } 5618 5619 if (enable_unrestricted_guest) 5620 msrs->secondary_ctls_high |= 5621 SECONDARY_EXEC_UNRESTRICTED_GUEST; 5622 5623 if (flexpriority_enabled) 5624 msrs->secondary_ctls_high |= 5625 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; 5626 5627 /* miscellaneous data */ 5628 rdmsr(MSR_IA32_VMX_MISC, 5629 msrs->misc_low, 5630 msrs->misc_high); 5631 msrs->misc_low &= VMX_MISC_SAVE_EFER_LMA; 5632 msrs->misc_low |= 5633 MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS | 5634 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE | 5635 VMX_MISC_ACTIVITY_HLT; 5636 msrs->misc_high = 0; 5637 5638 /* 5639 * This MSR reports some information about VMX support. We 5640 * should return information about the VMX we emulate for the 5641 * guest, and the VMCS structure we give it - not about the 5642 * VMX support of the underlying hardware. 5643 */ 5644 msrs->basic = 5645 VMCS12_REVISION | 5646 VMX_BASIC_TRUE_CTLS | 5647 ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) | 5648 (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT); 5649 5650 if (cpu_has_vmx_basic_inout()) 5651 msrs->basic |= VMX_BASIC_INOUT; 5652 5653 /* 5654 * These MSRs specify bits which the guest must keep fixed on 5655 * while L1 is in VMXON mode (in L1's root mode, or running an L2). 5656 * We picked the standard core2 setting. 5657 */ 5658 #define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE) 5659 #define VMXON_CR4_ALWAYSON X86_CR4_VMXE 5660 msrs->cr0_fixed0 = VMXON_CR0_ALWAYSON; 5661 msrs->cr4_fixed0 = VMXON_CR4_ALWAYSON; 5662 5663 /* These MSRs specify bits which the guest must keep fixed off. */ 5664 rdmsrl(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1); 5665 rdmsrl(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1); 5666 5667 /* highest index: VMX_PREEMPTION_TIMER_VALUE */ 5668 msrs->vmcs_enum = VMCS12_MAX_FIELD_INDEX << 1; 5669 } 5670 5671 void nested_vmx_hardware_unsetup(void) 5672 { 5673 int i; 5674 5675 if (enable_shadow_vmcs) { 5676 for (i = 0; i < VMX_BITMAP_NR; i++) 5677 free_page((unsigned long)vmx_bitmap[i]); 5678 } 5679 } 5680 5681 __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *)) 5682 { 5683 int i; 5684 5685 if (!cpu_has_vmx_shadow_vmcs()) 5686 enable_shadow_vmcs = 0; 5687 if (enable_shadow_vmcs) { 5688 for (i = 0; i < VMX_BITMAP_NR; i++) { 5689 vmx_bitmap[i] = (unsigned long *) 5690 __get_free_page(GFP_KERNEL); 5691 if (!vmx_bitmap[i]) { 5692 nested_vmx_hardware_unsetup(); 5693 return -ENOMEM; 5694 } 5695 } 5696 5697 init_vmcs_shadow_fields(); 5698 } 5699 5700 exit_handlers[EXIT_REASON_VMCLEAR] = handle_vmclear, 5701 exit_handlers[EXIT_REASON_VMLAUNCH] = handle_vmlaunch, 5702 exit_handlers[EXIT_REASON_VMPTRLD] = handle_vmptrld, 5703 exit_handlers[EXIT_REASON_VMPTRST] = handle_vmptrst, 5704 exit_handlers[EXIT_REASON_VMREAD] = handle_vmread, 5705 exit_handlers[EXIT_REASON_VMRESUME] = handle_vmresume, 5706 exit_handlers[EXIT_REASON_VMWRITE] = handle_vmwrite, 5707 exit_handlers[EXIT_REASON_VMOFF] = handle_vmoff, 5708 exit_handlers[EXIT_REASON_VMON] = handle_vmon, 5709 exit_handlers[EXIT_REASON_INVEPT] = handle_invept, 5710 exit_handlers[EXIT_REASON_INVVPID] = handle_invvpid, 5711 exit_handlers[EXIT_REASON_VMFUNC] = handle_vmfunc, 5712 5713 kvm_x86_ops->check_nested_events = vmx_check_nested_events; 5714 kvm_x86_ops->get_nested_state = vmx_get_nested_state; 5715 kvm_x86_ops->set_nested_state = vmx_set_nested_state; 5716 kvm_x86_ops->get_vmcs12_pages = nested_get_vmcs12_pages, 5717 kvm_x86_ops->nested_enable_evmcs = nested_enable_evmcs; 5718 kvm_x86_ops->nested_get_evmcs_version = nested_get_evmcs_version; 5719 5720 return 0; 5721 } 5722